-# Copyright 2011-2012 pooler@litecoinpool.org
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-# 1. Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# 2. Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in the
-# documentation and/or other materials provided with the distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
-# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
-# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
-# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
-# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
-# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
-# SUCH DAMAGE.
+/*
+ * Copyright 2011-2012 pooler@litecoinpool.org
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
movl %r11d, \do+56(\dest)
.endm
-.macro gen_salsa8_core_doubleround
- movq 72(%rsp), %r15
+.macro salsa8_core_gen_doubleround
+ movq 72(%rsp), %r15
+
leaq (%r14, %rdx), %rbp
roll $7, %ebp
- xorq %rbp, %r9
+ xorl %ebp, %r9d
leaq (%rdi, %r15), %rbp
roll $7, %ebp
- xorq %rbp, %r10
+ xorl %ebp, %r10d
leaq (%rdx, %r9), %rbp
roll $9, %ebp
- xorq %rbp, %r11
+ xorl %ebp, %r11d
leaq (%r15, %r10), %rbp
roll $9, %ebp
- xorq %rbp, %r13
+ xorl %ebp, %r13d
+
leaq (%r9, %r11), %rbp
roll $13, %ebp
- xorq %rbp, %r14
+ xorl %ebp, %r14d
leaq (%r10, %r13), %rbp
roll $13, %ebp
- xorq %rbp, %rdi
+ xorl %ebp, %edi
leaq (%r11, %r14), %rbp
roll $18, %ebp
- xorq %rbp, %rdx
+ xorl %ebp, %edx
leaq (%r13, %rdi), %rbp
roll $18, %ebp
- xorq %rbp, %r15
-
+ xorl %ebp, %r15d
+
movq 48(%rsp), %rbp
movq %r15, 72(%rsp)
-
+
leaq (%rax, %rbp), %r15
roll $7, %r15d
- xorq %r15, %rbx
+ xorl %r15d, %ebx
leaq (%rbp, %rbx), %r15
roll $9, %r15d
- xorq %r15, %rcx
+ xorl %r15d, %ecx
leaq (%rbx, %rcx), %r15
roll $13, %r15d
- xorq %r15, %rax
+ xorl %r15d, %eax
leaq (%rcx, %rax), %r15
roll $18, %r15d
- xorq %r15, %rbp
-
+ xorl %r15d, %ebp
+
movq 88(%rsp), %r15
movq %rbp, 48(%rsp)
-
+
leaq (%r12, %r15), %rbp
roll $7, %ebp
- xorq %rbp, %rsi
+ xorl %ebp, %esi
leaq (%r15, %rsi), %rbp
roll $9, %ebp
- xorq %rbp, %r8
+ xorl %ebp, %r8d
leaq (%rsi, %r8), %rbp
roll $13, %ebp
- xorq %rbp, %r12
+ xorl %ebp, %r12d
leaq (%r8, %r12), %rbp
roll $18, %ebp
- xorq %rbp, %r15
-
+ xorl %ebp, %r15d
+
movq %r15, 88(%rsp)
movq 72(%rsp), %r15
-
+
leaq (%rsi, %rdx), %rbp
roll $7, %ebp
- xorq %rbp, %rdi
+ xorl %ebp, %edi
leaq (%r9, %r15), %rbp
roll $7, %ebp
- xorq %rbp, %rax
+ xorl %ebp, %eax
leaq (%rdx, %rdi), %rbp
roll $9, %ebp
- xorq %rbp, %rcx
+ xorl %ebp, %ecx
leaq (%r15, %rax), %rbp
roll $9, %ebp
- xorq %rbp, %r8
+ xorl %ebp, %r8d
+
leaq (%rdi, %rcx), %rbp
roll $13, %ebp
- xorq %rbp, %rsi
+ xorl %ebp, %esi
leaq (%rax, %r8), %rbp
roll $13, %ebp
- xorq %rbp, %r9
+ xorl %ebp, %r9d
leaq (%rcx, %rsi), %rbp
roll $18, %ebp
- xorq %rbp, %rdx
+ xorl %ebp, %edx
leaq (%r8, %r9), %rbp
roll $18, %ebp
- xorq %rbp, %r15
-
+ xorl %ebp, %r15d
+
movq 48(%rsp), %rbp
movq %r15, 72(%rsp)
-
+
leaq (%r10, %rbp), %r15
roll $7, %r15d
- xorq %r15, %r12
+ xorl %r15d, %r12d
leaq (%rbp, %r12), %r15
roll $9, %r15d
- xorq %r15, %r11
+ xorl %r15d, %r11d
leaq (%r12, %r11), %r15
roll $13, %r15d
- xorq %r15, %r10
+ xorl %r15d, %r10d
leaq (%r11, %r10), %r15
roll $18, %r15d
- xorq %r15, %rbp
-
+ xorl %r15d, %ebp
+
movq 88(%rsp), %r15
movq %rbp, 48(%rsp)
-
+
leaq (%rbx, %r15), %rbp
roll $7, %ebp
- xorq %rbp, %r14
+ xorl %ebp, %r14d
leaq (%r15, %r14), %rbp
roll $9, %ebp
- xorq %rbp, %r13
+ xorl %ebp, %r13d
leaq (%r14, %r13), %rbp
roll $13, %ebp
- xorq %rbp, %rbx
+ xorl %ebp, %ebx
leaq (%r13, %rbx), %rbp
roll $18, %ebp
- xorq %rbp, %r15
-
+ xorl %ebp, %r15d
+
movq %r15, 88(%rsp)
.endm
.text
- .align 32
-gen_salsa8_core:
- # 0: %rdx, %rdi, %rcx, %rsi
+ .p2align 6
+salsa8_core_gen:
+ /* 0: %rdx, %rdi, %rcx, %rsi */
movq 8(%rsp), %rdi
movq %rdi, %rdx
shrq $32, %rdi
movq 16(%rsp), %rsi
movq %rsi, %rcx
shrq $32, %rsi
- # 1: %r9, 72(%rsp), %rax, %r8
+ /* 1: %r9, 72(%rsp), %rax, %r8 */
movq 24(%rsp), %r8
movq %r8, %r9
shrq $32, %r8
movq 32(%rsp), %r8
movq %r8, %rax
shrq $32, %r8
- # 2: %r11, %r10, 48(%rsp), %r12
+ /* 2: %r11, %r10, 48(%rsp), %r12 */
movq 40(%rsp), %r10
movq %r10, %r11
shrq $32, %r10
movq 48(%rsp), %r12
- #movq %r12, %r13
- #movq %r13, 48(%rsp)
+ /* movq %r12, %r13 */
+ /* movq %r13, 48(%rsp) */
shrq $32, %r12
- # 3: %r14, %r13, %rbx, 88(%rsp)
+ /* 3: %r14, %r13, %rbx, 88(%rsp) */
movq 56(%rsp), %r13
movq %r13, %r14
shrq $32, %r13
movq %r15, %rbx
shrq $32, %r15
movq %r15, 88(%rsp)
-
- gen_salsa8_core_doubleround
- gen_salsa8_core_doubleround
- gen_salsa8_core_doubleround
- gen_salsa8_core_doubleround
-
- movl %edx, %edx
+
+ salsa8_core_gen_doubleround
+ salsa8_core_gen_doubleround
+ salsa8_core_gen_doubleround
+ salsa8_core_gen_doubleround
+
shlq $32, %rdi
- addq %rdi, %rdx
- movd %rdx, %xmm0
-
- movl %ecx, %ecx
+ xorq %rdi, %rdx
+ movq %rdx, 24(%rsp)
+
shlq $32, %rsi
- addq %rsi, %rcx
- movd %rcx, %xmm4
-
- movq 72(%rsp), %rdi
- movl %r9d, %r9d
+ xorq %rsi, %rcx
+ movq %rcx, 32(%rsp)
+
+ movl 72(%rsp), %edi
shlq $32, %rdi
- addq %rdi, %r9
- movd %r9, %xmm1
-
- movl %eax, %eax
+ xorq %rdi, %r9
+ movq %r9, 40(%rsp)
+
+ movl 48(%rsp), %ebp
shlq $32, %r8
- addq %r8, %rax
- movd %rax, %xmm5
-
- movl %r11d, %r11d
+ xorq %r8, %rax
+ movq %rax, 48(%rsp)
+
shlq $32, %r10
- addq %r10, %r11
- movd %r11, %xmm2
-
- movl 48(%rsp), %r8d
+ xorq %r10, %r11
+ movq %r11, 56(%rsp)
+
shlq $32, %r12
- addq %r12, %r8
- movd %r8, %xmm6
-
- movl %r14d, %r14d
+ xorq %r12, %rbp
+ movq %rbp, 64(%rsp)
+
shlq $32, %r13
- addq %r13, %r14
- movd %r14, %xmm3
-
- movq 88(%rsp), %rdi
- movl %ebx, %ebx
- shlq $32, %rdi
- addq %rdi, %rbx
- movd %rbx, %xmm7
-
- punpcklqdq %xmm4, %xmm0
- punpcklqdq %xmm5, %xmm1
- punpcklqdq %xmm6, %xmm2
- punpcklqdq %xmm7, %xmm3
-
- #movq %rdx, 8(%rsp)
- #movq %rcx, 16(%rsp)
- #movq %r9, 24(%rsp)
- #movq %rax, 32(%rsp)
- #movq %r11, 40(%rsp)
- #movq %r8, 48(%rsp)
- #movq %r14, 56(%rsp)
- #movq %rbx, 64(%rsp)
-
+ xorq %r13, %r14
+ movq %r14, 72(%rsp)
+
+ movdqa 24(%rsp), %xmm0
+
+ shlq $32, %r15
+ xorq %r15, %rbx
+ movq %rbx, 80(%rsp)
+
+ movdqa 40(%rsp), %xmm1
+ movdqa 56(%rsp), %xmm2
+ movdqa 72(%rsp), %xmm3
+
ret
-
-
+
+
.text
- .align 32
+ .p2align 6
.globl scrypt_core
.globl _scrypt_core
scrypt_core:
popq %rbp
popq %rbx
.endm
-
- # GenuineIntel processors have fast SIMD
+
+ /* GenuineIntel processors have fast SIMD */
xorl %eax, %eax
cpuid
cmpl $0x6c65746e, %ecx
- jne gen_scrypt_core
+ jne scrypt_core_gen
cmpl $0x49656e69, %edx
- jne gen_scrypt_core
+ jne scrypt_core_gen
cmpl $0x756e6547, %ebx
- je xmm_scrypt_core
-
-gen_scrypt_core:
+ je scrypt_core_xmm
+
+ .p2align 6
+scrypt_core_gen:
subq $136, %rsp
movdqa 0(%rdi), %xmm8
movdqa 16(%rdi), %xmm9
movdqa 80(%rdi), %xmm13
movdqa 96(%rdi), %xmm14
movdqa 112(%rdi), %xmm15
-
+
leaq 131072(%rsi), %rcx
movq %rdi, 104(%rsp)
movq %rsi, 112(%rsp)
movq %rcx, 120(%rsp)
-gen_scrypt_core_loop1:
+scrypt_core_gen_loop1:
movdqa %xmm8, 0(%rsi)
movdqa %xmm9, 16(%rsi)
movdqa %xmm10, 32(%rsi)
movdqa %xmm13, 80(%rsi)
movdqa %xmm14, 96(%rsi)
movdqa %xmm15, 112(%rsi)
-
+
pxor %xmm12, %xmm8
pxor %xmm13, %xmm9
pxor %xmm14, %xmm10
movdqa %xmm10, 32(%rsp)
movdqa %xmm11, 48(%rsp)
movq %rsi, 128(%rsp)
- call gen_salsa8_core
+ call salsa8_core_gen
paddd %xmm0, %xmm8
paddd %xmm1, %xmm9
paddd %xmm2, %xmm10
paddd %xmm3, %xmm11
-
+
pxor %xmm8, %xmm12
pxor %xmm9, %xmm13
pxor %xmm10, %xmm14
movdqa %xmm13, 16(%rsp)
movdqa %xmm14, 32(%rsp)
movdqa %xmm15, 48(%rsp)
- call gen_salsa8_core
+ call salsa8_core_gen
movq 128(%rsp), %rsi
paddd %xmm0, %xmm12
paddd %xmm1, %xmm13
paddd %xmm2, %xmm14
paddd %xmm3, %xmm15
-
+
addq $128, %rsi
movq 120(%rsp), %rcx
cmpq %rcx, %rsi
- jne gen_scrypt_core_loop1
-
+ jne scrypt_core_gen_loop1
+
movq $1024, %rcx
-gen_scrypt_core_loop2:
- movq 112(%rsp), %rsi
movd %xmm12, %edx
+scrypt_core_gen_loop2:
+ movq 112(%rsp), %rsi
andl $1023, %edx
shll $7, %edx
- movdqa 0(%rsi, %rdx), %xmm0
- movdqa 16(%rsi, %rdx), %xmm1
- movdqa 32(%rsi, %rdx), %xmm2
- movdqa 48(%rsi, %rdx), %xmm3
- movdqa 64(%rsi, %rdx), %xmm4
- movdqa 80(%rsi, %rdx), %xmm5
- movdqa 96(%rsi, %rdx), %xmm6
- movdqa 112(%rsi, %rdx), %xmm7
+ addq %rsi, %rdx
+ movdqa 0(%rdx), %xmm0
+ movdqa 16(%rdx), %xmm1
+ movdqa 32(%rdx), %xmm2
+ movdqa 48(%rdx), %xmm3
+ movdqa 64(%rdx), %xmm4
+ movdqa 80(%rdx), %xmm5
+ movdqa 96(%rdx), %xmm6
+ movdqa 112(%rdx), %xmm7
pxor %xmm0, %xmm8
pxor %xmm1, %xmm9
pxor %xmm2, %xmm10
pxor %xmm5, %xmm13
pxor %xmm6, %xmm14
pxor %xmm7, %xmm15
-
+
pxor %xmm12, %xmm8
pxor %xmm13, %xmm9
pxor %xmm14, %xmm10
movdqa %xmm10, 32(%rsp)
movdqa %xmm11, 48(%rsp)
movq %rcx, 128(%rsp)
- call gen_salsa8_core
+ call salsa8_core_gen
paddd %xmm0, %xmm8
paddd %xmm1, %xmm9
paddd %xmm2, %xmm10
paddd %xmm3, %xmm11
-
+
pxor %xmm8, %xmm12
pxor %xmm9, %xmm13
pxor %xmm10, %xmm14
movdqa %xmm13, 16(%rsp)
movdqa %xmm14, 32(%rsp)
movdqa %xmm15, 48(%rsp)
- call gen_salsa8_core
+ call salsa8_core_gen
movq 128(%rsp), %rcx
+ addl 0(%rsp), %edx
paddd %xmm0, %xmm12
paddd %xmm1, %xmm13
paddd %xmm2, %xmm14
paddd %xmm3, %xmm15
-
+
subq $1, %rcx
- ja gen_scrypt_core_loop2
-
+ ja scrypt_core_gen_loop2
+
movq 104(%rsp), %rdi
movdqa %xmm8, 0(%rdi)
movdqa %xmm9, 16(%rdi)
movdqa %xmm13, 80(%rdi)
movdqa %xmm14, 96(%rdi)
movdqa %xmm15, 112(%rdi)
-
+
addq $136, %rsp
scrypt_core_cleanup
ret
-.macro xmm_salsa8_core_doubleround
+.macro salsa8_core_xmm_doubleround
movdqa %xmm1, %xmm4
paddd %xmm0, %xmm4
movdqa %xmm4, %xmm5
pslld $7, %xmm4
psrld $25, %xmm5
pxor %xmm4, %xmm3
- pxor %xmm5, %xmm3
movdqa %xmm0, %xmm4
-
+ pxor %xmm5, %xmm3
+
paddd %xmm3, %xmm4
movdqa %xmm4, %xmm5
pslld $9, %xmm4
psrld $23, %xmm5
pxor %xmm4, %xmm2
movdqa %xmm3, %xmm4
- pshufd $0x93, %xmm3, %xmm3
pxor %xmm5, %xmm2
-
+ pshufd $0x93, %xmm3, %xmm3
+
paddd %xmm2, %xmm4
movdqa %xmm4, %xmm5
pslld $13, %xmm4
psrld $19, %xmm5
pxor %xmm4, %xmm1
movdqa %xmm2, %xmm4
- pshufd $0x4e, %xmm2, %xmm2
pxor %xmm5, %xmm1
-
+ pshufd $0x4e, %xmm2, %xmm2
+
paddd %xmm1, %xmm4
movdqa %xmm4, %xmm5
pslld $18, %xmm4
psrld $14, %xmm5
pxor %xmm4, %xmm0
- pshufd $0x39, %xmm1, %xmm1
- pxor %xmm5, %xmm0
movdqa %xmm3, %xmm4
-
+ pxor %xmm5, %xmm0
+ pshufd $0x39, %xmm1, %xmm1
+
paddd %xmm0, %xmm4
movdqa %xmm4, %xmm5
pslld $7, %xmm4
psrld $25, %xmm5
pxor %xmm4, %xmm1
- pxor %xmm5, %xmm1
movdqa %xmm0, %xmm4
-
+ pxor %xmm5, %xmm1
+
paddd %xmm1, %xmm4
movdqa %xmm4, %xmm5
pslld $9, %xmm4
psrld $23, %xmm5
pxor %xmm4, %xmm2
movdqa %xmm1, %xmm4
- pshufd $0x93, %xmm1, %xmm1
pxor %xmm5, %xmm2
-
+ pshufd $0x93, %xmm1, %xmm1
+
paddd %xmm2, %xmm4
movdqa %xmm4, %xmm5
pslld $13, %xmm4
psrld $19, %xmm5
pxor %xmm4, %xmm3
movdqa %xmm2, %xmm4
- pshufd $0x4e, %xmm2, %xmm2
pxor %xmm5, %xmm3
-
+ pshufd $0x4e, %xmm2, %xmm2
+
paddd %xmm3, %xmm4
movdqa %xmm4, %xmm5
pslld $18, %xmm4
pxor %xmm5, %xmm0
.endm
-.macro xmm_salsa8_core
- xmm_salsa8_core_doubleround
- xmm_salsa8_core_doubleround
- xmm_salsa8_core_doubleround
- xmm_salsa8_core_doubleround
+.macro salsa8_core_xmm
+ salsa8_core_xmm_doubleround
+ salsa8_core_xmm_doubleround
+ salsa8_core_xmm_doubleround
+ salsa8_core_xmm_doubleround
.endm
-
- .align 32
-xmm_scrypt_core:
- # shuffle 1st block into %xmm8-%xmm11
- movl 60(%rdi), %edx
- movl 44(%rdi), %ecx
- movl 28(%rdi), %ebx
- movl 12(%rdi), %eax
- movd %edx, %xmm0
- movd %ecx, %xmm1
- movd %ebx, %xmm2
- movd %eax, %xmm3
- movl 40(%rdi), %ecx
- movl 24(%rdi), %ebx
- movl 8(%rdi), %eax
- movl 56(%rdi), %edx
- pshufd $0x93, %xmm0, %xmm0
- pshufd $0x93, %xmm1, %xmm1
- pshufd $0x93, %xmm2, %xmm2
- pshufd $0x93, %xmm3, %xmm3
- movd %ecx, %xmm4
- movd %ebx, %xmm5
- movd %eax, %xmm6
- movd %edx, %xmm7
- paddd %xmm4, %xmm0
- paddd %xmm5, %xmm1
- paddd %xmm6, %xmm2
- paddd %xmm7, %xmm3
- movl 20(%rdi), %ebx
- movl 4(%rdi), %eax
- movl 52(%rdi), %edx
- movl 36(%rdi), %ecx
- pshufd $0x93, %xmm0, %xmm0
- pshufd $0x93, %xmm1, %xmm1
- pshufd $0x93, %xmm2, %xmm2
- pshufd $0x93, %xmm3, %xmm3
- movd %ebx, %xmm4
- movd %eax, %xmm5
- movd %edx, %xmm6
- movd %ecx, %xmm7
- paddd %xmm4, %xmm0
- paddd %xmm5, %xmm1
- paddd %xmm6, %xmm2
- paddd %xmm7, %xmm3
- movl 0(%rdi), %eax
- movl 48(%rdi), %edx
- movl 32(%rdi), %ecx
- movl 16(%rdi), %ebx
- pshufd $0x93, %xmm0, %xmm0
- pshufd $0x93, %xmm1, %xmm1
- pshufd $0x93, %xmm2, %xmm2
- pshufd $0x93, %xmm3, %xmm3
- movd %eax, %xmm8
- movd %edx, %xmm9
- movd %ecx, %xmm10
- movd %ebx, %xmm11
- paddd %xmm0, %xmm8
- paddd %xmm1, %xmm9
- paddd %xmm2, %xmm10
- paddd %xmm3, %xmm11
-
- # shuffle 2nd block into %xmm12-%xmm15
- movl 124(%rdi), %edx
- movl 108(%rdi), %ecx
- movl 92(%rdi), %ebx
- movl 76(%rdi), %eax
- movd %edx, %xmm0
- movd %ecx, %xmm1
- movd %ebx, %xmm2
- movd %eax, %xmm3
- movl 104(%rdi), %ecx
- movl 88(%rdi), %ebx
- movl 72(%rdi), %eax
- movl 120(%rdi), %edx
- pshufd $0x93, %xmm0, %xmm0
- pshufd $0x93, %xmm1, %xmm1
- pshufd $0x93, %xmm2, %xmm2
- pshufd $0x93, %xmm3, %xmm3
- movd %ecx, %xmm4
- movd %ebx, %xmm5
- movd %eax, %xmm6
- movd %edx, %xmm7
- paddd %xmm4, %xmm0
- paddd %xmm5, %xmm1
- paddd %xmm6, %xmm2
- paddd %xmm7, %xmm3
- movl 84(%rdi), %ebx
- movl 68(%rdi), %eax
- movl 116(%rdi), %edx
- movl 100(%rdi), %ecx
- pshufd $0x93, %xmm0, %xmm0
- pshufd $0x93, %xmm1, %xmm1
- pshufd $0x93, %xmm2, %xmm2
- pshufd $0x93, %xmm3, %xmm3
- movd %ebx, %xmm4
- movd %eax, %xmm5
- movd %edx, %xmm6
- movd %ecx, %xmm7
- paddd %xmm4, %xmm0
- paddd %xmm5, %xmm1
- paddd %xmm6, %xmm2
- paddd %xmm7, %xmm3
- movl 64(%rdi), %eax
- movl 112(%rdi), %edx
- movl 96(%rdi), %ecx
- movl 80(%rdi), %ebx
- pshufd $0x93, %xmm0, %xmm0
- pshufd $0x93, %xmm1, %xmm1
- pshufd $0x93, %xmm2, %xmm2
- pshufd $0x93, %xmm3, %xmm3
- movd %eax, %xmm12
- movd %edx, %xmm13
- movd %ecx, %xmm14
- movd %ebx, %xmm15
- paddd %xmm0, %xmm12
- paddd %xmm1, %xmm13
- paddd %xmm2, %xmm14
- paddd %xmm3, %xmm15
-
+
+ .p2align 6
+scrypt_core_xmm:
+ pcmpeqw %xmm1, %xmm1
+ psrlq $32, %xmm1
+
+ movdqa 0(%rdi), %xmm8
+ movdqa 16(%rdi), %xmm11
+ movdqa 32(%rdi), %xmm10
+ movdqa 48(%rdi), %xmm9
+ movdqa %xmm8, %xmm0
+ pxor %xmm11, %xmm8
+ pand %xmm1, %xmm8
+ pxor %xmm11, %xmm8
+ pxor %xmm10, %xmm11
+ pand %xmm1, %xmm11
+ pxor %xmm10, %xmm11
+ pxor %xmm9, %xmm10
+ pand %xmm1, %xmm10
+ pxor %xmm9, %xmm10
+ pxor %xmm0, %xmm9
+ pand %xmm1, %xmm9
+ pxor %xmm0, %xmm9
+ movdqa %xmm8, %xmm0
+ pshufd $0x4e, %xmm10, %xmm10
+ punpcklqdq %xmm10, %xmm8
+ punpckhqdq %xmm0, %xmm10
+ movdqa %xmm11, %xmm0
+ pshufd $0x4e, %xmm9, %xmm9
+ punpcklqdq %xmm9, %xmm11
+ punpckhqdq %xmm0, %xmm9
+
+ movdqa 64(%rdi), %xmm12
+ movdqa 80(%rdi), %xmm15
+ movdqa 96(%rdi), %xmm14
+ movdqa 112(%rdi), %xmm13
+ movdqa %xmm12, %xmm0
+ pxor %xmm15, %xmm12
+ pand %xmm1, %xmm12
+ pxor %xmm15, %xmm12
+ pxor %xmm14, %xmm15
+ pand %xmm1, %xmm15
+ pxor %xmm14, %xmm15
+ pxor %xmm13, %xmm14
+ pand %xmm1, %xmm14
+ pxor %xmm13, %xmm14
+ pxor %xmm0, %xmm13
+ pand %xmm1, %xmm13
+ pxor %xmm0, %xmm13
+ movdqa %xmm12, %xmm0
+ pshufd $0x4e, %xmm14, %xmm14
+ punpcklqdq %xmm14, %xmm12
+ punpckhqdq %xmm0, %xmm14
+ movdqa %xmm15, %xmm0
+ pshufd $0x4e, %xmm13, %xmm13
+ punpcklqdq %xmm13, %xmm15
+ punpckhqdq %xmm0, %xmm13
+
movq %rsi, %rdx
leaq 131072(%rsi), %rcx
-xmm_scrypt_core_loop1:
+scrypt_core_xmm_loop1:
+ pxor %xmm12, %xmm8
+ pxor %xmm13, %xmm9
+ pxor %xmm14, %xmm10
+ pxor %xmm15, %xmm11
movdqa %xmm8, 0(%rdx)
movdqa %xmm9, 16(%rdx)
movdqa %xmm10, 32(%rdx)
movdqa %xmm13, 80(%rdx)
movdqa %xmm14, 96(%rdx)
movdqa %xmm15, 112(%rdx)
-
- pxor %xmm12, %xmm8
- pxor %xmm13, %xmm9
- pxor %xmm14, %xmm10
- pxor %xmm15, %xmm11
+
movdqa %xmm8, %xmm0
movdqa %xmm9, %xmm1
movdqa %xmm10, %xmm2
movdqa %xmm11, %xmm3
- xmm_salsa8_core
+ salsa8_core_xmm
paddd %xmm0, %xmm8
paddd %xmm1, %xmm9
paddd %xmm2, %xmm10
paddd %xmm3, %xmm11
-
+
pxor %xmm8, %xmm12
pxor %xmm9, %xmm13
pxor %xmm10, %xmm14
movdqa %xmm13, %xmm1
movdqa %xmm14, %xmm2
movdqa %xmm15, %xmm3
- xmm_salsa8_core
+ salsa8_core_xmm
paddd %xmm0, %xmm12
paddd %xmm1, %xmm13
paddd %xmm2, %xmm14
paddd %xmm3, %xmm15
-
+
addq $128, %rdx
cmpq %rcx, %rdx
- jne xmm_scrypt_core_loop1
-
+ jne scrypt_core_xmm_loop1
+
movq $1024, %rcx
-xmm_scrypt_core_loop2:
+scrypt_core_xmm_loop2:
movd %xmm12, %edx
andl $1023, %edx
shll $7, %edx
- movdqa 0(%rsi, %rdx), %xmm0
- movdqa 16(%rsi, %rdx), %xmm1
- movdqa 32(%rsi, %rdx), %xmm2
- movdqa 48(%rsi, %rdx), %xmm3
- movdqa 64(%rsi, %rdx), %xmm4
- movdqa 80(%rsi, %rdx), %xmm5
- movdqa 96(%rsi, %rdx), %xmm6
- movdqa 112(%rsi, %rdx), %xmm7
- pxor %xmm0, %xmm8
- pxor %xmm1, %xmm9
- pxor %xmm2, %xmm10
- pxor %xmm3, %xmm11
- pxor %xmm4, %xmm12
- pxor %xmm5, %xmm13
- pxor %xmm6, %xmm14
- pxor %xmm7, %xmm15
-
+ pxor 0(%rsi, %rdx), %xmm8
+ pxor 16(%rsi, %rdx), %xmm9
+ pxor 32(%rsi, %rdx), %xmm10
+ pxor 48(%rsi, %rdx), %xmm11
+
pxor %xmm12, %xmm8
pxor %xmm13, %xmm9
pxor %xmm14, %xmm10
movdqa %xmm9, %xmm1
movdqa %xmm10, %xmm2
movdqa %xmm11, %xmm3
- xmm_salsa8_core
+ salsa8_core_xmm
paddd %xmm0, %xmm8
paddd %xmm1, %xmm9
paddd %xmm2, %xmm10
paddd %xmm3, %xmm11
-
+
+ pxor 64(%rsi, %rdx), %xmm12
+ pxor 80(%rsi, %rdx), %xmm13
+ pxor 96(%rsi, %rdx), %xmm14
+ pxor 112(%rsi, %rdx), %xmm15
pxor %xmm8, %xmm12
pxor %xmm9, %xmm13
pxor %xmm10, %xmm14
movdqa %xmm13, %xmm1
movdqa %xmm14, %xmm2
movdqa %xmm15, %xmm3
- xmm_salsa8_core
+ salsa8_core_xmm
paddd %xmm0, %xmm12
paddd %xmm1, %xmm13
paddd %xmm2, %xmm14
paddd %xmm3, %xmm15
-
- subq $1, %rcx
- ja xmm_scrypt_core_loop2
-
- # re-shuffle 1st block back
- movd %xmm8, %eax
- movd %xmm9, %edx
- movd %xmm10, %ecx
- movd %xmm11, %ebx
- pshufd $0x39, %xmm8, %xmm8
- pshufd $0x39, %xmm9, %xmm9
- pshufd $0x39, %xmm10, %xmm10
- pshufd $0x39, %xmm11, %xmm11
- movl %eax, 0(%rdi)
- movl %edx, 48(%rdi)
- movl %ecx, 32(%rdi)
- movl %ebx, 16(%rdi)
- movd %xmm8, %ebx
- movd %xmm9, %eax
- movd %xmm10, %edx
- movd %xmm11, %ecx
- pshufd $0x39, %xmm8, %xmm8
- pshufd $0x39, %xmm9, %xmm9
- pshufd $0x39, %xmm10, %xmm10
- pshufd $0x39, %xmm11, %xmm11
- movl %ebx, 20(%rdi)
- movl %eax, 4(%rdi)
- movl %edx, 52(%rdi)
- movl %ecx, 36(%rdi)
- movd %xmm8, %ecx
- movd %xmm9, %ebx
- movd %xmm10, %eax
- movd %xmm11, %edx
- pshufd $0x39, %xmm8, %xmm8
- pshufd $0x39, %xmm9, %xmm9
- pshufd $0x39, %xmm10, %xmm10
- pshufd $0x39, %xmm11, %xmm11
- movl %ecx, 40(%rdi)
- movl %ebx, 24(%rdi)
- movl %eax, 8(%rdi)
- movl %edx, 56(%rdi)
- movd %xmm8, %edx
- movd %xmm9, %ecx
- movd %xmm10, %ebx
- movd %xmm11, %eax
- movl %edx, 60(%rdi)
- movl %ecx, 44(%rdi)
- movl %ebx, 28(%rdi)
- movl %eax, 12(%rdi)
-
- # re-shuffle 2nd block back
- movd %xmm12, %eax
- movd %xmm13, %edx
- movd %xmm14, %ecx
- movd %xmm15, %ebx
- pshufd $0x39, %xmm12, %xmm12
- pshufd $0x39, %xmm13, %xmm13
- pshufd $0x39, %xmm14, %xmm14
- pshufd $0x39, %xmm15, %xmm15
- movl %eax, 64(%rdi)
- movl %edx, 112(%rdi)
- movl %ecx, 96(%rdi)
- movl %ebx, 80(%rdi)
- movd %xmm12, %ebx
- movd %xmm13, %eax
- movd %xmm14, %edx
- movd %xmm15, %ecx
- pshufd $0x39, %xmm12, %xmm12
- pshufd $0x39, %xmm13, %xmm13
- pshufd $0x39, %xmm14, %xmm14
- pshufd $0x39, %xmm15, %xmm15
- movl %ebx, 84(%rdi)
- movl %eax, 68(%rdi)
- movl %edx, 116(%rdi)
- movl %ecx, 100(%rdi)
- movd %xmm12, %ecx
- movd %xmm13, %ebx
- movd %xmm14, %eax
- movd %xmm15, %edx
- pshufd $0x39, %xmm12, %xmm12
- pshufd $0x39, %xmm13, %xmm13
- pshufd $0x39, %xmm14, %xmm14
- pshufd $0x39, %xmm15, %xmm15
- movl %ecx, 104(%rdi)
- movl %ebx, 88(%rdi)
- movl %eax, 72(%rdi)
- movl %edx, 120(%rdi)
- movd %xmm12, %edx
- movd %xmm13, %ecx
- movd %xmm14, %ebx
- movd %xmm15, %eax
- movl %edx, 124(%rdi)
- movl %ecx, 108(%rdi)
- movl %ebx, 92(%rdi)
- movl %eax, 76(%rdi)
-
- scrypt_core_cleanup
- ret
-
-
- .text
- .align 32
- .globl scrypt_best_throughput
- .globl _scrypt_best_throughput
-scrypt_best_throughput:
-_scrypt_best_throughput:
- pushq %rbx
- xorq %rax, %rax
- cpuid
- movl $3, %eax
- cmpl $0x444d4163, %ecx
- jne scrypt_best_throughput_exit
- cmpl $0x69746e65, %edx
- jne scrypt_best_throughput_exit
- cmpl $0x68747541, %ebx
- jne scrypt_best_throughput_exit
- movl $1, %eax
- cpuid
- andl $0x0ff00000, %eax
- movl $3, %eax
- jnz scrypt_best_throughput_exit
- movl $1, %eax
-scrypt_best_throughput_exit:
- popq %rbx
- ret
-
-
-.macro xmm_salsa8_core_2way_doubleround
- movdqa %xmm1, %xmm4
- movdqa %xmm9, %xmm6
- paddd %xmm0, %xmm4
- paddd %xmm8, %xmm6
- movdqa %xmm4, %xmm5
- movdqa %xmm6, %xmm7
- pslld $7, %xmm4
- pslld $7, %xmm6
- psrld $25, %xmm5
- psrld $25, %xmm7
- pxor %xmm4, %xmm3
- pxor %xmm6, %xmm11
- pxor %xmm5, %xmm3
- pxor %xmm7, %xmm11
- movdqa %xmm0, %xmm4
- movdqa %xmm8, %xmm6
-
- paddd %xmm3, %xmm4
- paddd %xmm11, %xmm6
- movdqa %xmm4, %xmm5
- movdqa %xmm6, %xmm7
- pslld $9, %xmm4
- pslld $9, %xmm6
- psrld $23, %xmm5
- psrld $23, %xmm7
- pxor %xmm4, %xmm2
- pxor %xmm6, %xmm10
- movdqa %xmm3, %xmm4
- movdqa %xmm11, %xmm6
- pshufd $0x93, %xmm3, %xmm3
- pshufd $0x93, %xmm11, %xmm11
- pxor %xmm5, %xmm2
- pxor %xmm7, %xmm10
-
- paddd %xmm2, %xmm4
- paddd %xmm10, %xmm6
- movdqa %xmm4, %xmm5
- movdqa %xmm6, %xmm7
- pslld $13, %xmm4
- pslld $13, %xmm6
- psrld $19, %xmm5
- psrld $19, %xmm7
- pxor %xmm4, %xmm1
- pxor %xmm6, %xmm9
- movdqa %xmm2, %xmm4
- movdqa %xmm10, %xmm6
- pshufd $0x4e, %xmm2, %xmm2
- pshufd $0x4e, %xmm10, %xmm10
- pxor %xmm5, %xmm1
- pxor %xmm7, %xmm9
-
- paddd %xmm1, %xmm4
- paddd %xmm9, %xmm6
- movdqa %xmm4, %xmm5
- movdqa %xmm6, %xmm7
- pslld $18, %xmm4
- pslld $18, %xmm6
- psrld $14, %xmm5
- psrld $14, %xmm7
- pxor %xmm4, %xmm0
- pxor %xmm6, %xmm8
- pshufd $0x39, %xmm1, %xmm1
- pshufd $0x39, %xmm9, %xmm9
- pxor %xmm5, %xmm0
- pxor %xmm7, %xmm8
- movdqa %xmm3, %xmm4
- movdqa %xmm11, %xmm6
-
- paddd %xmm0, %xmm4
- paddd %xmm8, %xmm6
- movdqa %xmm4, %xmm5
- movdqa %xmm6, %xmm7
- pslld $7, %xmm4
- pslld $7, %xmm6
- psrld $25, %xmm5
- psrld $25, %xmm7
- pxor %xmm4, %xmm1
- pxor %xmm6, %xmm9
- pxor %xmm5, %xmm1
- pxor %xmm7, %xmm9
- movdqa %xmm0, %xmm4
- movdqa %xmm8, %xmm6
-
- paddd %xmm1, %xmm4
- paddd %xmm9, %xmm6
- movdqa %xmm4, %xmm5
- movdqa %xmm6, %xmm7
- pslld $9, %xmm4
- pslld $9, %xmm6
- psrld $23, %xmm5
- psrld $23, %xmm7
- pxor %xmm4, %xmm2
- pxor %xmm6, %xmm10
- movdqa %xmm1, %xmm4
- movdqa %xmm9, %xmm6
- pshufd $0x93, %xmm1, %xmm1
- pshufd $0x93, %xmm9, %xmm9
- pxor %xmm5, %xmm2
- pxor %xmm7, %xmm10
-
- paddd %xmm2, %xmm4
- paddd %xmm10, %xmm6
- movdqa %xmm4, %xmm5
- movdqa %xmm6, %xmm7
- pslld $13, %xmm4
- pslld $13, %xmm6
- psrld $19, %xmm5
- psrld $19, %xmm7
- pxor %xmm4, %xmm3
- pxor %xmm6, %xmm11
- movdqa %xmm2, %xmm4
- movdqa %xmm10, %xmm6
- pshufd $0x4e, %xmm2, %xmm2
- pshufd $0x4e, %xmm10, %xmm10
- pxor %xmm5, %xmm3
- pxor %xmm7, %xmm11
-
- paddd %xmm3, %xmm4
- paddd %xmm11, %xmm6
- movdqa %xmm4, %xmm5
- movdqa %xmm6, %xmm7
- pslld $18, %xmm4
- pslld $18, %xmm6
- psrld $14, %xmm5
- psrld $14, %xmm7
- pxor %xmm4, %xmm0
- pxor %xmm6, %xmm8
- pshufd $0x39, %xmm3, %xmm3
- pshufd $0x39, %xmm11, %xmm11
- pxor %xmm5, %xmm0
- pxor %xmm7, %xmm8
-.endm
-
-.macro xmm_salsa8_core_2way
- xmm_salsa8_core_2way_doubleround
- xmm_salsa8_core_2way_doubleround
- xmm_salsa8_core_2way_doubleround
- xmm_salsa8_core_2way_doubleround
-.endm
-
-
- .text
- .align 32
- .globl scrypt_core_2way
- .globl _scrypt_core_2way
-scrypt_core_2way:
-_scrypt_core_2way:
- pushq %rbx
- pushq %rbp
-#if defined(WIN64)
- subq $176, %rsp
- movdqa %xmm6, 8(%rsp)
- movdqa %xmm7, 24(%rsp)
- movdqa %xmm8, 40(%rsp)
- movdqa %xmm9, 56(%rsp)
- movdqa %xmm10, 72(%rsp)
- movdqa %xmm11, 88(%rsp)
- movdqa %xmm12, 104(%rsp)
- movdqa %xmm13, 120(%rsp)
- movdqa %xmm14, 136(%rsp)
- movdqa %xmm15, 152(%rsp)
- pushq %rdi
- pushq %rsi
- movq %rcx, %rdi
- movq %rdx, %rsi
- movq %r8, %rdx
-#endif
- subq $264, %rsp
-
- scrypt_shuffle %rdi, 0, %rsp, 0
- scrypt_shuffle %rdi, 64, %rsp, 64
- scrypt_shuffle %rsi, 0, %rsp, 128
- scrypt_shuffle %rsi, 64, %rsp, 192
-
- movdqa 192(%rsp), %xmm12
- movdqa 208(%rsp), %xmm13
- movdqa 224(%rsp), %xmm14
- movdqa 240(%rsp), %xmm15
-
- movq %rdx, %rbp
- leaq 262144(%rdx), %rcx
-scrypt_core_2way_loop1:
- movdqa 0(%rsp), %xmm0
- movdqa 16(%rsp), %xmm1
- movdqa 32(%rsp), %xmm2
- movdqa 48(%rsp), %xmm3
- movdqa 64(%rsp), %xmm4
- movdqa 80(%rsp), %xmm5
- movdqa 96(%rsp), %xmm6
- movdqa 112(%rsp), %xmm7
- movdqa 128(%rsp), %xmm8
- movdqa 144(%rsp), %xmm9
- movdqa 160(%rsp), %xmm10
- movdqa 176(%rsp), %xmm11
- pxor %xmm4, %xmm0
- pxor %xmm5, %xmm1
- pxor %xmm6, %xmm2
- pxor %xmm7, %xmm3
- movdqa %xmm0, 0(%rbp)
- movdqa %xmm1, 16(%rbp)
- movdqa %xmm2, 32(%rbp)
- movdqa %xmm3, 48(%rbp)
- movdqa %xmm4, 64(%rbp)
- movdqa %xmm5, 80(%rbp)
- movdqa %xmm6, 96(%rbp)
- movdqa %xmm7, 112(%rbp)
- pxor %xmm12, %xmm8
- pxor %xmm13, %xmm9
- pxor %xmm14, %xmm10
- pxor %xmm15, %xmm11
- movdqa %xmm8, 128(%rbp)
- movdqa %xmm9, 144(%rbp)
- movdqa %xmm10, 160(%rbp)
- movdqa %xmm11, 176(%rbp)
- movdqa %xmm12, 192(%rbp)
- movdqa %xmm13, 208(%rbp)
- movdqa %xmm14, 224(%rbp)
- movdqa %xmm15, 240(%rbp)
-
- xmm_salsa8_core_2way
- paddd 0(%rbp), %xmm0
- paddd 16(%rbp), %xmm1
- paddd 32(%rbp), %xmm2
- paddd 48(%rbp), %xmm3
- paddd 128(%rbp), %xmm8
- paddd 144(%rbp), %xmm9
- paddd 160(%rbp), %xmm10
- paddd 176(%rbp), %xmm11
- movdqa %xmm0, 0(%rsp)
- movdqa %xmm1, 16(%rsp)
- movdqa %xmm2, 32(%rsp)
- movdqa %xmm3, 48(%rsp)
- movdqa %xmm8, 128(%rsp)
- movdqa %xmm9, 144(%rsp)
- movdqa %xmm10, 160(%rsp)
- movdqa %xmm11, 176(%rsp)
-
- pxor 64(%rsp), %xmm0
- pxor 80(%rsp), %xmm1
- pxor 96(%rsp), %xmm2
- pxor 112(%rsp), %xmm3
- pxor %xmm12, %xmm8
- pxor %xmm13, %xmm9
- pxor %xmm14, %xmm10
- pxor %xmm15, %xmm11
- movdqa %xmm0, 64(%rsp)
- movdqa %xmm1, 80(%rsp)
- movdqa %xmm2, 96(%rsp)
- movdqa %xmm3, 112(%rsp)
- movdqa %xmm8, %xmm12
- movdqa %xmm9, %xmm13
- movdqa %xmm10, %xmm14
- movdqa %xmm11, %xmm15
- xmm_salsa8_core_2way
- paddd 64(%rsp), %xmm0
- paddd 80(%rsp), %xmm1
- paddd 96(%rsp), %xmm2
- paddd 112(%rsp), %xmm3
- paddd %xmm8, %xmm12
- paddd %xmm9, %xmm13
- paddd %xmm10, %xmm14
- paddd %xmm11, %xmm15
- movdqa %xmm0, 64(%rsp)
- movdqa %xmm1, 80(%rsp)
- movdqa %xmm2, 96(%rsp)
- movdqa %xmm3, 112(%rsp)
-
- addq $256, %rbp
- cmpq %rcx, %rbp
- jne scrypt_core_2way_loop1
-
- movq $1024, %rcx
-scrypt_core_2way_loop2:
- movdqa 0(%rsp), %xmm0
- movdqa 16(%rsp), %xmm1
- movdqa 32(%rsp), %xmm2
- movdqa 48(%rsp), %xmm3
- movdqa 64(%rsp), %xmm4
- movdqa 80(%rsp), %xmm5
- movdqa 96(%rsp), %xmm6
- movdqa 112(%rsp), %xmm7
- movdqa 128(%rsp), %xmm8
- movdqa 144(%rsp), %xmm9
- movdqa 160(%rsp), %xmm10
- movdqa 176(%rsp), %xmm11
- movd %xmm4, %ebp
- andl $1023, %ebp
- shll $8, %ebp
- pxor 0(%rdx, %rbp), %xmm0
- pxor 16(%rdx, %rbp), %xmm1
- pxor 32(%rdx, %rbp), %xmm2
- pxor 48(%rdx, %rbp), %xmm3
- movd %xmm12, %ebx
- andl $1023, %ebx
- shll $8, %ebx
- addl $128, %ebx
- pxor 0(%rdx, %rbx), %xmm8
- pxor 16(%rdx, %rbx), %xmm9
- pxor 32(%rdx, %rbx), %xmm10
- pxor 48(%rdx, %rbx), %xmm11
-
- pxor %xmm4, %xmm0
- pxor %xmm5, %xmm1
- pxor %xmm6, %xmm2
- pxor %xmm7, %xmm3
- pxor %xmm12, %xmm8
- pxor %xmm13, %xmm9
- pxor %xmm14, %xmm10
- pxor %xmm15, %xmm11
- movdqa %xmm0, 0(%rsp)
- movdqa %xmm1, 16(%rsp)
- movdqa %xmm2, 32(%rsp)
- movdqa %xmm3, 48(%rsp)
- movdqa %xmm8, 128(%rsp)
- movdqa %xmm9, 144(%rsp)
- movdqa %xmm10, 160(%rsp)
- movdqa %xmm11, 176(%rsp)
- xmm_salsa8_core_2way
- paddd 0(%rsp), %xmm0
- paddd 16(%rsp), %xmm1
- paddd 32(%rsp), %xmm2
- paddd 48(%rsp), %xmm3
- paddd 128(%rsp), %xmm8
- paddd 144(%rsp), %xmm9
- paddd 160(%rsp), %xmm10
- paddd 176(%rsp), %xmm11
- movdqa %xmm0, 0(%rsp)
- movdqa %xmm1, 16(%rsp)
- movdqa %xmm2, 32(%rsp)
- movdqa %xmm3, 48(%rsp)
- movdqa %xmm8, 128(%rsp)
- movdqa %xmm9, 144(%rsp)
- movdqa %xmm10, 160(%rsp)
- movdqa %xmm11, 176(%rsp)
-
- pxor 64(%rdx, %rbp), %xmm0
- pxor 80(%rdx, %rbp), %xmm1
- pxor 96(%rdx, %rbp), %xmm2
- pxor 112(%rdx, %rbp), %xmm3
- pxor 64(%rdx, %rbx), %xmm8
- pxor 80(%rdx, %rbx), %xmm9
- pxor 96(%rdx, %rbx), %xmm10
- pxor 112(%rdx, %rbx), %xmm11
- pxor 64(%rsp), %xmm0
- pxor 80(%rsp), %xmm1
- pxor 96(%rsp), %xmm2
- pxor 112(%rsp), %xmm3
- pxor %xmm12, %xmm8
- pxor %xmm13, %xmm9
- pxor %xmm14, %xmm10
- pxor %xmm15, %xmm11
- movdqa %xmm0, 64(%rsp)
- movdqa %xmm1, 80(%rsp)
- movdqa %xmm2, 96(%rsp)
- movdqa %xmm3, 112(%rsp)
- movdqa %xmm8, %xmm12
- movdqa %xmm9, %xmm13
- movdqa %xmm10, %xmm14
- movdqa %xmm11, %xmm15
- xmm_salsa8_core_2way
- paddd 64(%rsp), %xmm0
- paddd 80(%rsp), %xmm1
- paddd 96(%rsp), %xmm2
- paddd 112(%rsp), %xmm3
- paddd %xmm8, %xmm12
- paddd %xmm9, %xmm13
- paddd %xmm10, %xmm14
- paddd %xmm11, %xmm15
- movdqa %xmm0, 64(%rsp)
- movdqa %xmm1, 80(%rsp)
- movdqa %xmm2, 96(%rsp)
- movdqa %xmm3, 112(%rsp)
-
+
subq $1, %rcx
- ja scrypt_core_2way_loop2
-
- movdqa %xmm12, 192(%rsp)
- movdqa %xmm13, 208(%rsp)
- movdqa %xmm14, 224(%rsp)
- movdqa %xmm15, 240(%rsp)
-
- scrypt_shuffle %rsp, 0, %rdi, 0
- scrypt_shuffle %rsp, 64, %rdi, 64
- scrypt_shuffle %rsp, 128, %rsi, 0
- scrypt_shuffle %rsp, 192, %rsi, 64
-
- addq $264, %rsp
-#if defined(WIN64)
- popq %rsi
- popq %rdi
- movdqa 8(%rsp), %xmm6
- movdqa 24(%rsp), %xmm7
- movdqa 40(%rsp), %xmm8
- movdqa 56(%rsp), %xmm9
- movdqa 72(%rsp), %xmm10
- movdqa 88(%rsp), %xmm11
- movdqa 104(%rsp), %xmm12
- movdqa 120(%rsp), %xmm13
- movdqa 136(%rsp), %xmm14
- movdqa 152(%rsp), %xmm15
- addq $176, %rsp
-#endif
- popq %rbp
- popq %rbx
- ret
-
-
-.macro xmm_salsa8_core_3way_doubleround
- movdqa %xmm1, %xmm4
- movdqa %xmm9, %xmm6
- movdqa %xmm13, %xmm7
- paddd %xmm0, %xmm4
- paddd %xmm8, %xmm6
- paddd %xmm12, %xmm7
- movdqa %xmm4, %xmm5
- pslld $7, %xmm4
- psrld $25, %xmm5
- pxor %xmm4, %xmm3
- pxor %xmm5, %xmm3
- movdqa %xmm0, %xmm4
- movdqa %xmm6, %xmm5
- pslld $7, %xmm6
- psrld $25, %xmm5
- pxor %xmm6, %xmm11
- pxor %xmm5, %xmm11
- movdqa %xmm8, %xmm6
- movdqa %xmm7, %xmm5
- pslld $7, %xmm7
- psrld $25, %xmm5
- pxor %xmm7, %xmm15
- pxor %xmm5, %xmm15
- movdqa %xmm12, %xmm7
-
- paddd %xmm3, %xmm4
- paddd %xmm11, %xmm6
- paddd %xmm15, %xmm7
- movdqa %xmm4, %xmm5
- pslld $9, %xmm4
- psrld $23, %xmm5
- pxor %xmm4, %xmm2
- movdqa %xmm3, %xmm4
- pshufd $0x93, %xmm3, %xmm3
- pxor %xmm5, %xmm2
- movdqa %xmm6, %xmm5
- pslld $9, %xmm6
- psrld $23, %xmm5
- pxor %xmm6, %xmm10
- movdqa %xmm11, %xmm6
- pshufd $0x93, %xmm11, %xmm11
- pxor %xmm5, %xmm10
- movdqa %xmm7, %xmm5
- pslld $9, %xmm7
- psrld $23, %xmm5
- pxor %xmm7, %xmm14
- movdqa %xmm15, %xmm7
- pshufd $0x93, %xmm15, %xmm15
- pxor %xmm5, %xmm14
-
- paddd %xmm2, %xmm4
- paddd %xmm10, %xmm6
- paddd %xmm14, %xmm7
- movdqa %xmm4, %xmm5
- pslld $13, %xmm4
- psrld $19, %xmm5
- pxor %xmm4, %xmm1
- movdqa %xmm2, %xmm4
- pshufd $0x4e, %xmm2, %xmm2
- pxor %xmm5, %xmm1
- movdqa %xmm6, %xmm5
- pslld $13, %xmm6
- psrld $19, %xmm5
- pxor %xmm6, %xmm9
- movdqa %xmm10, %xmm6
- pshufd $0x4e, %xmm10, %xmm10
- pxor %xmm5, %xmm9
- movdqa %xmm7, %xmm5
- pslld $13, %xmm7
- psrld $19, %xmm5
- pxor %xmm7, %xmm13
- movdqa %xmm14, %xmm7
- pshufd $0x4e, %xmm14, %xmm14
- pxor %xmm5, %xmm13
-
- paddd %xmm1, %xmm4
- paddd %xmm9, %xmm6
- paddd %xmm13, %xmm7
- movdqa %xmm4, %xmm5
- pslld $18, %xmm4
- psrld $14, %xmm5
- pxor %xmm4, %xmm0
- pshufd $0x39, %xmm1, %xmm1
- pxor %xmm5, %xmm0
- movdqa %xmm3, %xmm4
- movdqa %xmm6, %xmm5
- pslld $18, %xmm6
- psrld $14, %xmm5
- pxor %xmm6, %xmm8
- pshufd $0x39, %xmm9, %xmm9
- pxor %xmm5, %xmm8
- movdqa %xmm11, %xmm6
- movdqa %xmm7, %xmm5
- pslld $18, %xmm7
- psrld $14, %xmm5
- pxor %xmm7, %xmm12
- pshufd $0x39, %xmm13, %xmm13
- pxor %xmm5, %xmm12
- movdqa %xmm15, %xmm7
-
- paddd %xmm0, %xmm4
- paddd %xmm8, %xmm6
- paddd %xmm12, %xmm7
- movdqa %xmm4, %xmm5
- pslld $7, %xmm4
- psrld $25, %xmm5
- pxor %xmm4, %xmm1
- pxor %xmm5, %xmm1
- movdqa %xmm0, %xmm4
- movdqa %xmm6, %xmm5
- pslld $7, %xmm6
- psrld $25, %xmm5
- pxor %xmm6, %xmm9
- pxor %xmm5, %xmm9
- movdqa %xmm8, %xmm6
- movdqa %xmm7, %xmm5
- pslld $7, %xmm7
- psrld $25, %xmm5
- pxor %xmm7, %xmm13
- pxor %xmm5, %xmm13
- movdqa %xmm12, %xmm7
-
- paddd %xmm1, %xmm4
- paddd %xmm9, %xmm6
- paddd %xmm13, %xmm7
- movdqa %xmm4, %xmm5
- pslld $9, %xmm4
- psrld $23, %xmm5
- pxor %xmm4, %xmm2
- movdqa %xmm1, %xmm4
- pshufd $0x93, %xmm1, %xmm1
- pxor %xmm5, %xmm2
- movdqa %xmm6, %xmm5
- pslld $9, %xmm6
- psrld $23, %xmm5
- pxor %xmm6, %xmm10
- movdqa %xmm9, %xmm6
- pshufd $0x93, %xmm9, %xmm9
- pxor %xmm5, %xmm10
- movdqa %xmm7, %xmm5
- pslld $9, %xmm7
- psrld $23, %xmm5
- pxor %xmm7, %xmm14
- movdqa %xmm13, %xmm7
- pshufd $0x93, %xmm13, %xmm13
- pxor %xmm5, %xmm14
-
- paddd %xmm2, %xmm4
- paddd %xmm10, %xmm6
- paddd %xmm14, %xmm7
- movdqa %xmm4, %xmm5
- pslld $13, %xmm4
- psrld $19, %xmm5
- pxor %xmm4, %xmm3
- movdqa %xmm2, %xmm4
- pshufd $0x4e, %xmm2, %xmm2
- pxor %xmm5, %xmm3
- movdqa %xmm6, %xmm5
- pslld $13, %xmm6
- psrld $19, %xmm5
- pxor %xmm6, %xmm11
- movdqa %xmm10, %xmm6
+ ja scrypt_core_xmm_loop2
+
+ pcmpeqw %xmm1, %xmm1
+ psrlq $32, %xmm1
+
+ movdqa %xmm8, %xmm0
+ pxor %xmm9, %xmm8
+ pand %xmm1, %xmm8
+ pxor %xmm9, %xmm8
+ pxor %xmm10, %xmm9
+ pand %xmm1, %xmm9
+ pxor %xmm10, %xmm9
+ pxor %xmm11, %xmm10
+ pand %xmm1, %xmm10
+ pxor %xmm11, %xmm10
+ pxor %xmm0, %xmm11
+ pand %xmm1, %xmm11
+ pxor %xmm0, %xmm11
+ movdqa %xmm8, %xmm0
pshufd $0x4e, %xmm10, %xmm10
- pxor %xmm5, %xmm11
- movdqa %xmm7, %xmm5
- pslld $13, %xmm7
- psrld $19, %xmm5
- pxor %xmm7, %xmm15
- movdqa %xmm14, %xmm7
+ punpcklqdq %xmm10, %xmm8
+ punpckhqdq %xmm0, %xmm10
+ movdqa %xmm9, %xmm0
+ pshufd $0x4e, %xmm11, %xmm11
+ punpcklqdq %xmm11, %xmm9
+ punpckhqdq %xmm0, %xmm11
+ movdqa %xmm8, 0(%rdi)
+ movdqa %xmm11, 16(%rdi)
+ movdqa %xmm10, 32(%rdi)
+ movdqa %xmm9, 48(%rdi)
+
+ movdqa %xmm12, %xmm0
+ pxor %xmm13, %xmm12
+ pand %xmm1, %xmm12
+ pxor %xmm13, %xmm12
+ pxor %xmm14, %xmm13
+ pand %xmm1, %xmm13
+ pxor %xmm14, %xmm13
+ pxor %xmm15, %xmm14
+ pand %xmm1, %xmm14
+ pxor %xmm15, %xmm14
+ pxor %xmm0, %xmm15
+ pand %xmm1, %xmm15
+ pxor %xmm0, %xmm15
+ movdqa %xmm12, %xmm0
pshufd $0x4e, %xmm14, %xmm14
- pxor %xmm5, %xmm15
-
- paddd %xmm3, %xmm4
- paddd %xmm11, %xmm6
- paddd %xmm15, %xmm7
- movdqa %xmm4, %xmm5
- pslld $18, %xmm4
- psrld $14, %xmm5
- pxor %xmm4, %xmm0
- pshufd $0x39, %xmm3, %xmm3
- pxor %xmm5, %xmm0
- movdqa %xmm6, %xmm5
- pslld $18, %xmm6
- psrld $14, %xmm5
- pxor %xmm6, %xmm8
- pshufd $0x39, %xmm11, %xmm11
- pxor %xmm5, %xmm8
- movdqa %xmm7, %xmm5
- pslld $18, %xmm7
- psrld $14, %xmm5
- pxor %xmm7, %xmm12
- pshufd $0x39, %xmm15, %xmm15
- pxor %xmm5, %xmm12
-.endm
-
-.macro xmm_salsa8_core_3way
- xmm_salsa8_core_3way_doubleround
- xmm_salsa8_core_3way_doubleround
- xmm_salsa8_core_3way_doubleround
- xmm_salsa8_core_3way_doubleround
-.endm
-
- .text
- .align 32
- .globl scrypt_core_3way
- .globl _scrypt_core_3way
-scrypt_core_3way:
-_scrypt_core_3way:
- pushq %rbx
- pushq %rbp
-#if defined(WIN64)
- subq $176, %rsp
- movdqa %xmm6, 8(%rsp)
- movdqa %xmm7, 24(%rsp)
- movdqa %xmm8, 40(%rsp)
- movdqa %xmm9, 56(%rsp)
- movdqa %xmm10, 72(%rsp)
- movdqa %xmm11, 88(%rsp)
- movdqa %xmm12, 104(%rsp)
- movdqa %xmm13, 120(%rsp)
- movdqa %xmm14, 136(%rsp)
- movdqa %xmm15, 152(%rsp)
- pushq %rdi
- pushq %rsi
- movq %rcx, %rdi
- movq %rdx, %rsi
- movq %r8, %rdx
- movq %r9, %rcx
-#endif
- subq $392, %rsp
-
- scrypt_shuffle %rdi, 0, %rsp, 0
- scrypt_shuffle %rdi, 64, %rsp, 64
- scrypt_shuffle %rsi, 0, %rsp, 128
- scrypt_shuffle %rsi, 64, %rsp, 192
- scrypt_shuffle %rdx, 0, %rsp, 256
- scrypt_shuffle %rdx, 64, %rsp, 320
-
- movdqa 128+64(%rsp), %xmm8
- movdqa 128+80(%rsp), %xmm9
- movdqa 128+96(%rsp), %xmm10
- movdqa 128+112(%rsp), %xmm11
-
- movq %rcx, %rbp
- leaq 3*131072(%rcx), %rax
-scrypt_core_3way_loop1:
- movdqa %xmm8, %xmm12
- movdqa %xmm9, %xmm13
- movdqa %xmm10, %xmm14
- movdqa %xmm11, %xmm15
- movdqa 0(%rsp), %xmm0
- movdqa 16(%rsp), %xmm1
- movdqa 32(%rsp), %xmm2
- movdqa 48(%rsp), %xmm3
- movdqa 64(%rsp), %xmm4
- movdqa 80(%rsp), %xmm5
- movdqa 96(%rsp), %xmm6
- movdqa 112(%rsp), %xmm7
- movdqa 128+0(%rsp), %xmm8
- movdqa 128+16(%rsp), %xmm9
- movdqa 128+32(%rsp), %xmm10
- movdqa 128+48(%rsp), %xmm11
- pxor %xmm4, %xmm0
- pxor %xmm5, %xmm1
- pxor %xmm6, %xmm2
- pxor %xmm7, %xmm3
- movdqa %xmm0, 0(%rbp)
- movdqa %xmm1, 16(%rbp)
- movdqa %xmm2, 32(%rbp)
- movdqa %xmm3, 48(%rbp)
- movdqa %xmm4, 64(%rbp)
- movdqa %xmm5, 80(%rbp)
- movdqa %xmm6, 96(%rbp)
- movdqa %xmm7, 112(%rbp)
- pxor %xmm12, %xmm8
- pxor %xmm13, %xmm9
- pxor %xmm14, %xmm10
- pxor %xmm15, %xmm11
- movdqa %xmm8, 128+0(%rbp)
- movdqa %xmm9, 128+16(%rbp)
- movdqa %xmm10, 128+32(%rbp)
- movdqa %xmm11, 128+48(%rbp)
- movdqa %xmm12, 128+64(%rbp)
- movdqa %xmm13, 128+80(%rbp)
- movdqa %xmm14, 128+96(%rbp)
- movdqa %xmm15, 128+112(%rbp)
- movdqa 256+0(%rsp), %xmm12
- movdqa 256+16(%rsp), %xmm13
- movdqa 256+32(%rsp), %xmm14
- movdqa 256+48(%rsp), %xmm15
- movdqa 256+64(%rsp), %xmm4
- movdqa 256+80(%rsp), %xmm5
- movdqa 256+96(%rsp), %xmm6
- movdqa 256+112(%rsp), %xmm7
- pxor %xmm4, %xmm12
- pxor %xmm5, %xmm13
- pxor %xmm6, %xmm14
- pxor %xmm7, %xmm15
- movdqa %xmm12, 256+0(%rbp)
- movdqa %xmm13, 256+16(%rbp)
- movdqa %xmm14, 256+32(%rbp)
- movdqa %xmm15, 256+48(%rbp)
- movdqa %xmm4, 256+64(%rbp)
- movdqa %xmm5, 256+80(%rbp)
- movdqa %xmm6, 256+96(%rbp)
- movdqa %xmm7, 256+112(%rbp)
-
- xmm_salsa8_core_3way
- paddd 0(%rbp), %xmm0
- paddd 16(%rbp), %xmm1
- paddd 32(%rbp), %xmm2
- paddd 48(%rbp), %xmm3
- paddd 128+0(%rbp), %xmm8
- paddd 128+16(%rbp), %xmm9
- paddd 128+32(%rbp), %xmm10
- paddd 128+48(%rbp), %xmm11
- paddd 256+0(%rbp), %xmm12
- paddd 256+16(%rbp), %xmm13
- paddd 256+32(%rbp), %xmm14
- paddd 256+48(%rbp), %xmm15
- movdqa %xmm0, 0(%rsp)
- movdqa %xmm1, 16(%rsp)
- movdqa %xmm2, 32(%rsp)
- movdqa %xmm3, 48(%rsp)
- movdqa %xmm8, 128+0(%rsp)
- movdqa %xmm9, 128+16(%rsp)
- movdqa %xmm10, 128+32(%rsp)
- movdqa %xmm11, 128+48(%rsp)
- movdqa %xmm12, 256+0(%rsp)
- movdqa %xmm13, 256+16(%rsp)
- movdqa %xmm14, 256+32(%rsp)
- movdqa %xmm15, 256+48(%rsp)
-
- pxor 64(%rsp), %xmm0
- pxor 80(%rsp), %xmm1
- pxor 96(%rsp), %xmm2
- pxor 112(%rsp), %xmm3
- pxor 128+64(%rsp), %xmm8
- pxor 128+80(%rsp), %xmm9
- pxor 128+96(%rsp), %xmm10
- pxor 128+112(%rsp), %xmm11
- pxor 256+64(%rsp), %xmm12
- pxor 256+80(%rsp), %xmm13
- pxor 256+96(%rsp), %xmm14
- pxor 256+112(%rsp), %xmm15
- movdqa %xmm0, 64(%rsp)
- movdqa %xmm1, 80(%rsp)
- movdqa %xmm2, 96(%rsp)
- movdqa %xmm3, 112(%rsp)
- movdqa %xmm8, 128+64(%rsp)
- movdqa %xmm9, 128+80(%rsp)
- movdqa %xmm10, 128+96(%rsp)
- movdqa %xmm11, 128+112(%rsp)
- movdqa %xmm12, 256+64(%rsp)
- movdqa %xmm13, 256+80(%rsp)
- movdqa %xmm14, 256+96(%rsp)
- movdqa %xmm15, 256+112(%rsp)
- xmm_salsa8_core_3way
- paddd 64(%rsp), %xmm0
- paddd 80(%rsp), %xmm1
- paddd 96(%rsp), %xmm2
- paddd 112(%rsp), %xmm3
- paddd 128+64(%rsp), %xmm8
- paddd 128+80(%rsp), %xmm9
- paddd 128+96(%rsp), %xmm10
- paddd 128+112(%rsp), %xmm11
- paddd 256+64(%rsp), %xmm12
- paddd 256+80(%rsp), %xmm13
- paddd 256+96(%rsp), %xmm14
- paddd 256+112(%rsp), %xmm15
- movdqa %xmm0, 64(%rsp)
- movdqa %xmm1, 80(%rsp)
- movdqa %xmm2, 96(%rsp)
- movdqa %xmm3, 112(%rsp)
- movdqa %xmm8, 128+64(%rsp)
- movdqa %xmm9, 128+80(%rsp)
- movdqa %xmm10, 128+96(%rsp)
- movdqa %xmm11, 128+112(%rsp)
- movdqa %xmm12, 256+64(%rsp)
- movdqa %xmm13, 256+80(%rsp)
- movdqa %xmm14, 256+96(%rsp)
- movdqa %xmm15, 256+112(%rsp)
-
- addq $3*128, %rbp
- cmpq %rax, %rbp
- jne scrypt_core_3way_loop1
-
- movq $1024, %rax
-scrypt_core_3way_loop2:
- movl 64(%rsp), %ebp
- andl $1023, %ebp
- leal (%ebp, %ebp, 2), %ebp
- shll $7, %ebp
- movl 128+64(%rsp), %ebx
- andl $1023, %ebx
- leal (%ebx, %ebx, 2), %ebx
- shll $7, %ebx
- addl $128, %ebx
- movl 256+64(%rsp), %r8d
- andl $1023, %r8d
- leal (%r8d, %r8d, 2), %r8d
- shll $7, %r8d
- addl $256, %r8d
- movdqa 0(%rsp), %xmm0
- movdqa 16(%rsp), %xmm1
- movdqa 32(%rsp), %xmm2
- movdqa 48(%rsp), %xmm3
- movdqa 128+0(%rsp), %xmm8
- movdqa 128+16(%rsp), %xmm9
- movdqa 128+32(%rsp), %xmm10
- movdqa 128+48(%rsp), %xmm11
- movdqa 256+0(%rsp), %xmm12
- movdqa 256+16(%rsp), %xmm13
- movdqa 256+32(%rsp), %xmm14
- movdqa 256+48(%rsp), %xmm15
- pxor 0(%rcx, %rbp), %xmm0
- pxor 16(%rcx, %rbp), %xmm1
- pxor 32(%rcx, %rbp), %xmm2
- pxor 48(%rcx, %rbp), %xmm3
- pxor 0(%rcx, %rbx), %xmm8
- pxor 16(%rcx, %rbx), %xmm9
- pxor 32(%rcx, %rbx), %xmm10
- pxor 48(%rcx, %rbx), %xmm11
- pxor 0(%rcx, %r8), %xmm12
- pxor 16(%rcx, %r8), %xmm13
- pxor 32(%rcx, %r8), %xmm14
- pxor 48(%rcx, %r8), %xmm15
-
- pxor 64(%rsp), %xmm0
- pxor 80(%rsp), %xmm1
- pxor 96(%rsp), %xmm2
- pxor 112(%rsp), %xmm3
- pxor 128+64(%rsp), %xmm8
- pxor 128+80(%rsp), %xmm9
- pxor 128+96(%rsp), %xmm10
- pxor 128+112(%rsp), %xmm11
- pxor 256+64(%rsp), %xmm12
- pxor 256+80(%rsp), %xmm13
- pxor 256+96(%rsp), %xmm14
- pxor 256+112(%rsp), %xmm15
- movdqa %xmm0, 0(%rsp)
- movdqa %xmm1, 16(%rsp)
- movdqa %xmm2, 32(%rsp)
- movdqa %xmm3, 48(%rsp)
- movdqa %xmm8, 128+0(%rsp)
- movdqa %xmm9, 128+16(%rsp)
- movdqa %xmm10, 128+32(%rsp)
- movdqa %xmm11, 128+48(%rsp)
- movdqa %xmm12, 256+0(%rsp)
- movdqa %xmm13, 256+16(%rsp)
- movdqa %xmm14, 256+32(%rsp)
- movdqa %xmm15, 256+48(%rsp)
- xmm_salsa8_core_3way
- paddd 0(%rsp), %xmm0
- paddd 16(%rsp), %xmm1
- paddd 32(%rsp), %xmm2
- paddd 48(%rsp), %xmm3
- paddd 128+0(%rsp), %xmm8
- paddd 128+16(%rsp), %xmm9
- paddd 128+32(%rsp), %xmm10
- paddd 128+48(%rsp), %xmm11
- paddd 256+0(%rsp), %xmm12
- paddd 256+16(%rsp), %xmm13
- paddd 256+32(%rsp), %xmm14
- paddd 256+48(%rsp), %xmm15
- movdqa %xmm0, 0(%rsp)
- movdqa %xmm1, 16(%rsp)
- movdqa %xmm2, 32(%rsp)
- movdqa %xmm3, 48(%rsp)
- movdqa %xmm8, 128+0(%rsp)
- movdqa %xmm9, 128+16(%rsp)
- movdqa %xmm10, 128+32(%rsp)
- movdqa %xmm11, 128+48(%rsp)
- movdqa %xmm12, 256+0(%rsp)
- movdqa %xmm13, 256+16(%rsp)
- movdqa %xmm14, 256+32(%rsp)
- movdqa %xmm15, 256+48(%rsp)
-
- pxor 64(%rcx, %rbp), %xmm0
- pxor 80(%rcx, %rbp), %xmm1
- pxor 96(%rcx, %rbp), %xmm2
- pxor 112(%rcx, %rbp), %xmm3
- pxor 64(%rcx, %rbx), %xmm8
- pxor 80(%rcx, %rbx), %xmm9
- pxor 96(%rcx, %rbx), %xmm10
- pxor 112(%rcx, %rbx), %xmm11
- pxor 64(%rcx, %r8), %xmm12
- pxor 80(%rcx, %r8), %xmm13
- pxor 96(%rcx, %r8), %xmm14
- pxor 112(%rcx, %r8), %xmm15
- pxor 64(%rsp), %xmm0
- pxor 80(%rsp), %xmm1
- pxor 96(%rsp), %xmm2
- pxor 112(%rsp), %xmm3
- pxor 128+64(%rsp), %xmm8
- pxor 128+80(%rsp), %xmm9
- pxor 128+96(%rsp), %xmm10
- pxor 128+112(%rsp), %xmm11
- pxor 256+64(%rsp), %xmm12
- pxor 256+80(%rsp), %xmm13
- pxor 256+96(%rsp), %xmm14
- pxor 256+112(%rsp), %xmm15
- movdqa %xmm0, 64(%rsp)
- movdqa %xmm1, 80(%rsp)
- movdqa %xmm2, 96(%rsp)
- movdqa %xmm3, 112(%rsp)
- movdqa %xmm8, 128+64(%rsp)
- movdqa %xmm9, 128+80(%rsp)
- movdqa %xmm10, 128+96(%rsp)
- movdqa %xmm11, 128+112(%rsp)
- movdqa %xmm12, 256+64(%rsp)
- movdqa %xmm13, 256+80(%rsp)
- movdqa %xmm14, 256+96(%rsp)
- movdqa %xmm15, 256+112(%rsp)
- xmm_salsa8_core_3way
- paddd 64(%rsp), %xmm0
- paddd 80(%rsp), %xmm1
- paddd 96(%rsp), %xmm2
- paddd 112(%rsp), %xmm3
- paddd 128+64(%rsp), %xmm8
- paddd 128+80(%rsp), %xmm9
- paddd 128+96(%rsp), %xmm10
- paddd 128+112(%rsp), %xmm11
- paddd 256+64(%rsp), %xmm12
- paddd 256+80(%rsp), %xmm13
- paddd 256+96(%rsp), %xmm14
- paddd 256+112(%rsp), %xmm15
- movdqa %xmm0, 64(%rsp)
- movdqa %xmm1, 80(%rsp)
- movdqa %xmm2, 96(%rsp)
- movdqa %xmm3, 112(%rsp)
- movdqa %xmm8, 128+64(%rsp)
- movdqa %xmm9, 128+80(%rsp)
- movdqa %xmm10, 128+96(%rsp)
- movdqa %xmm11, 128+112(%rsp)
- movdqa %xmm12, 256+64(%rsp)
- movdqa %xmm13, 256+80(%rsp)
- movdqa %xmm14, 256+96(%rsp)
- movdqa %xmm15, 256+112(%rsp)
-
- subq $1, %rax
- ja scrypt_core_3way_loop2
-
- scrypt_shuffle %rsp, 0, %rdi, 0
- scrypt_shuffle %rsp, 64, %rdi, 64
- scrypt_shuffle %rsp, 128, %rsi, 0
- scrypt_shuffle %rsp, 192, %rsi, 64
- scrypt_shuffle %rsp, 256, %rdx, 0
- scrypt_shuffle %rsp, 320, %rdx, 64
-
- addq $392, %rsp
-#if defined(WIN64)
- popq %rsi
- popq %rdi
- movdqa 8(%rsp), %xmm6
- movdqa 24(%rsp), %xmm7
- movdqa 40(%rsp), %xmm8
- movdqa 56(%rsp), %xmm9
- movdqa 72(%rsp), %xmm10
- movdqa 88(%rsp), %xmm11
- movdqa 104(%rsp), %xmm12
- movdqa 120(%rsp), %xmm13
- movdqa 136(%rsp), %xmm14
- movdqa 152(%rsp), %xmm15
- addq $176, %rsp
-#endif
- popq %rbp
- popq %rbx
+ punpcklqdq %xmm14, %xmm12
+ punpckhqdq %xmm0, %xmm14
+ movdqa %xmm13, %xmm0
+ pshufd $0x4e, %xmm15, %xmm15
+ punpcklqdq %xmm15, %xmm13
+ punpckhqdq %xmm0, %xmm15
+ movdqa %xmm12, 64(%rdi)
+ movdqa %xmm15, 80(%rdi)
+ movdqa %xmm14, 96(%rdi)
+ movdqa %xmm13, 112(%rdi)
+
+ scrypt_core_cleanup
ret
-#endif
\ No newline at end of file
+#endif