# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
+
+#if defined(OPTIMIZED_SALSA) && defined(__x86_64__)
+
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
-#if defined(__x86_64__)
-
-.macro scrypt_shuffle src, so, dest, do
- movl \so+60(\src), %r8d
- movl \so+44(\src), %r9d
- movl \so+28(\src), %r10d
- movl \so+12(\src), %r11d
- movl %r8d, \do+12(\dest)
- movl %r9d, \do+28(\dest)
- movl %r10d, \do+44(\dest)
- movl %r11d, \do+60(\dest)
- movl \so+40(\src), %r8d
- movl \so+8(\src), %r9d
- movl \so+48(\src), %r10d
- movl \so+16(\src), %r11d
- movl %r8d, \do+8(\dest)
- movl %r9d, \do+40(\dest)
- movl %r10d, \do+16(\dest)
- movl %r11d, \do+48(\dest)
- movl \so+20(\src), %r8d
- movl \so+4(\src), %r9d
- movl \so+52(\src), %r10d
- movl \so+36(\src), %r11d
- movl %r8d, \do+4(\dest)
- movl %r9d, \do+20(\dest)
- movl %r10d, \do+36(\dest)
- movl %r11d, \do+52(\dest)
- movl \so+0(\src), %r8d
- movl \so+24(\src), %r9d
- movl \so+32(\src), %r10d
- movl \so+56(\src), %r11d
- movl %r8d, \do+0(\dest)
- movl %r9d, \do+24(\dest)
- movl %r10d, \do+32(\dest)
- movl %r11d, \do+56(\dest)
-.endm
-
-.macro gen_salsa8_core_doubleround
- movq 72(%rsp), %r15
-
- leaq (%r14, %rdx), %rbp
- roll $7, %ebp
- xorq %rbp, %r9
- leaq (%rdi, %r15), %rbp
- roll $7, %ebp
- xorq %rbp, %r10
- leaq (%rdx, %r9), %rbp
- roll $9, %ebp
- xorq %rbp, %r11
- leaq (%r15, %r10), %rbp
- roll $9, %ebp
- xorq %rbp, %r13
- leaq (%r9, %r11), %rbp
- roll $13, %ebp
- xorq %rbp, %r14
- leaq (%r10, %r13), %rbp
- roll $13, %ebp
- xorq %rbp, %rdi
- leaq (%r11, %r14), %rbp
- roll $18, %ebp
- xorq %rbp, %rdx
- leaq (%r13, %rdi), %rbp
- roll $18, %ebp
- xorq %rbp, %r15
-
- movq 48(%rsp), %rbp
- movq %r15, 72(%rsp)
-
- leaq (%rax, %rbp), %r15
- roll $7, %r15d
- xorq %r15, %rbx
- leaq (%rbp, %rbx), %r15
- roll $9, %r15d
- xorq %r15, %rcx
- leaq (%rbx, %rcx), %r15
- roll $13, %r15d
- xorq %r15, %rax
- leaq (%rcx, %rax), %r15
- roll $18, %r15d
- xorq %r15, %rbp
-
- movq 88(%rsp), %r15
- movq %rbp, 48(%rsp)
-
- leaq (%r12, %r15), %rbp
- roll $7, %ebp
- xorq %rbp, %rsi
- leaq (%r15, %rsi), %rbp
- roll $9, %ebp
- xorq %rbp, %r8
- leaq (%rsi, %r8), %rbp
- roll $13, %ebp
- xorq %rbp, %r12
- leaq (%r8, %r12), %rbp
- roll $18, %ebp
- xorq %rbp, %r15
-
- movq %r15, 88(%rsp)
- movq 72(%rsp), %r15
-
- leaq (%rsi, %rdx), %rbp
- roll $7, %ebp
- xorq %rbp, %rdi
- leaq (%r9, %r15), %rbp
- roll $7, %ebp
- xorq %rbp, %rax
- leaq (%rdx, %rdi), %rbp
- roll $9, %ebp
- xorq %rbp, %rcx
- leaq (%r15, %rax), %rbp
- roll $9, %ebp
- xorq %rbp, %r8
- leaq (%rdi, %rcx), %rbp
- roll $13, %ebp
- xorq %rbp, %rsi
- leaq (%rax, %r8), %rbp
- roll $13, %ebp
- xorq %rbp, %r9
- leaq (%rcx, %rsi), %rbp
- roll $18, %ebp
- xorq %rbp, %rdx
- leaq (%r8, %r9), %rbp
- roll $18, %ebp
- xorq %rbp, %r15
-
- movq 48(%rsp), %rbp
- movq %r15, 72(%rsp)
+#define scrypt_shuffle(src, so, dest, do) \
+ movl so+60(src), %r8d; \
+ movl so+44(src), %r9d; \
+ movl so+28(src), %r10d; \
+ movl so+12(src), %r11d; \
+ movl %r8d, do+12(dest); \
+ movl %r9d, do+28(dest); \
+ movl %r10d, do+44(dest); \
+ movl %r11d, do+60(dest); \
+ movl so+40(src), %r8d; \
+ movl so+8(src), %r9d; \
+ movl so+48(src), %r10d; \
+ movl so+16(src), %r11d; \
+ movl %r8d, do+8(dest); \
+ movl %r9d, do+40(dest); \
+ movl %r10d, do+16(dest); \
+ movl %r11d, do+48(dest); \
+ movl so+20(src), %r8d; \
+ movl so+4(src), %r9d; \
+ movl so+52(src), %r10d; \
+ movl so+36(src), %r11d; \
+ movl %r8d, do+4(dest); \
+ movl %r9d, do+20(dest); \
+ movl %r10d, do+36(dest); \
+ movl %r11d, do+52(dest); \
+ movl so+0(src), %r8d; \
+ movl so+24(src), %r9d; \
+ movl so+32(src), %r10d; \
+ movl so+56(src), %r11d; \
+ movl %r8d, do+0(dest); \
+ movl %r9d, do+24(dest); \
+ movl %r10d, do+32(dest); \
+ movl %r11d, do+56(dest); \
+
+
+#define gen_salsa8_core_doubleround() \
+ movq 72(%rsp), %r15; \
+ leaq (%r14, %rdx), %rbp; \
+ roll $7, %ebp; \
+ xorq %rbp, %r9; \
+ leaq (%rdi, %r15), %rbp; \
+ roll $7, %ebp; \
+ xorq %rbp, %r10; \
+ leaq (%rdx, %r9), %rbp; \
+ roll $9, %ebp; \
+ xorq %rbp, %r11; \
+ leaq (%r15, %r10), %rbp; \
+ roll $9, %ebp; \
+ xorq %rbp, %r13; \
+ leaq (%r9, %r11), %rbp; \
+ roll $13, %ebp; \
+ xorq %rbp, %r14; \
+ leaq (%r10, %r13), %rbp; \
+ roll $13, %ebp; \
+ xorq %rbp, %rdi; \
+ leaq (%r11, %r14), %rbp; \
+ roll $18, %ebp; \
+ xorq %rbp, %rdx; \
+ leaq (%r13, %rdi), %rbp; \
+ roll $18, %ebp; \
+ xorq %rbp, %r15; \
+ movq 48(%rsp), %rbp; \
+ movq %r15, 72(%rsp); \
+ leaq (%rax, %rbp), %r15; \
+ roll $7, %r15d; \
+ xorq %r15, %rbx; \
+ leaq (%rbp, %rbx), %r15; \
+ roll $9, %r15d; \
+ xorq %r15, %rcx; \
+ leaq (%rbx, %rcx), %r15; \
+ roll $13, %r15d; \
+ xorq %r15, %rax; \
+ leaq (%rcx, %rax), %r15; \
+ roll $18, %r15d; \
+ xorq %r15, %rbp; \
+ movq 88(%rsp), %r15; \
+ movq %rbp, 48(%rsp); \
+ leaq (%r12, %r15), %rbp; \
+ roll $7, %ebp; \
+ xorq %rbp, %rsi; \
+ leaq (%r15, %rsi), %rbp; \
+ roll $9, %ebp; \
+ xorq %rbp, %r8; \
+ leaq (%rsi, %r8), %rbp; \
+ roll $13, %ebp; \
+ xorq %rbp, %r12; \
+ leaq (%r8, %r12), %rbp; \
+ roll $18, %ebp; \
+ xorq %rbp, %r15; \
+ movq %r15, 88(%rsp); \
+ movq 72(%rsp), %r15; \
+ leaq (%rsi, %rdx), %rbp; \
+ roll $7, %ebp; \
+ xorq %rbp, %rdi; \
+ leaq (%r9, %r15), %rbp; \
+ roll $7, %ebp; \
+ xorq %rbp, %rax; \
+ leaq (%rdx, %rdi), %rbp; \
+ roll $9, %ebp; \
+ xorq %rbp, %rcx; \
+ leaq (%r15, %rax), %rbp; \
+ roll $9, %ebp; \
+ xorq %rbp, %r8; \
+ leaq (%rdi, %rcx), %rbp; \
+ roll $13, %ebp; \
+ xorq %rbp, %rsi; \
+ leaq (%rax, %r8), %rbp; \
+ roll $13, %ebp; \
+ xorq %rbp, %r9; \
+ leaq (%rcx, %rsi), %rbp; \
+ roll $18, %ebp; \
+ xorq %rbp, %rdx; \
+ leaq (%r8, %r9), %rbp; \
+ roll $18, %ebp; \
+ xorq %rbp, %r15; \
+ movq 48(%rsp), %rbp; \
+ movq %r15, 72(%rsp); \
+ leaq (%r10, %rbp), %r15; \
+ roll $7, %r15d; \
+ xorq %r15, %r12; \
+ leaq (%rbp, %r12), %r15; \
+ roll $9, %r15d; \
+ xorq %r15, %r11; \
+ leaq (%r12, %r11), %r15; \
+ roll $13, %r15d; \
+ xorq %r15, %r10; \
+ leaq (%r11, %r10), %r15; \
+ roll $18, %r15d; \
+ xorq %r15, %rbp; \
+ movq 88(%rsp), %r15; \
+ movq %rbp, 48(%rsp); \
+ leaq (%rbx, %r15), %rbp; \
+ roll $7, %ebp; \
+ xorq %rbp, %r14; \
+ leaq (%r15, %r14), %rbp; \
+ roll $9, %ebp; \
+ xorq %rbp, %r13; \
+ leaq (%r14, %r13), %rbp; \
+ roll $13, %ebp; \
+ xorq %rbp, %rbx; \
+ leaq (%r13, %rbx), %rbp; \
+ roll $18, %ebp; \
+ xorq %rbp, %r15; \
+ movq %r15, 88(%rsp); \
- leaq (%r10, %rbp), %r15
- roll $7, %r15d
- xorq %r15, %r12
- leaq (%rbp, %r12), %r15
- roll $9, %r15d
- xorq %r15, %r11
- leaq (%r12, %r11), %r15
- roll $13, %r15d
- xorq %r15, %r10
- leaq (%r11, %r10), %r15
- roll $18, %r15d
- xorq %r15, %rbp
-
- movq 88(%rsp), %r15
- movq %rbp, 48(%rsp)
-
- leaq (%rbx, %r15), %rbp
- roll $7, %ebp
- xorq %rbp, %r14
- leaq (%r15, %r14), %rbp
- roll $9, %ebp
- xorq %rbp, %r13
- leaq (%r14, %r13), %rbp
- roll $13, %ebp
- xorq %rbp, %rbx
- leaq (%r13, %rbx), %rbp
- roll $18, %ebp
- xorq %rbp, %r15
-
- movq %r15, 88(%rsp)
-.endm
.text
.align 32
shrq $32, %r15
movq %r15, 88(%rsp)
- gen_salsa8_core_doubleround
- gen_salsa8_core_doubleround
- gen_salsa8_core_doubleround
- gen_salsa8_core_doubleround
+ gen_salsa8_core_doubleround()
+ gen_salsa8_core_doubleround()
+ gen_salsa8_core_doubleround()
+ gen_salsa8_core_doubleround()
movl %edx, %edx
shlq $32, %rdi
movq %rdx, %rsi
#endif
-.macro scrypt_core_cleanup
-#if defined(WIN64)
- popq %rsi
- popq %rdi
- movdqa 8(%rsp), %xmm6
- movdqa 24(%rsp), %xmm7
- movdqa 40(%rsp), %xmm8
- movdqa 56(%rsp), %xmm9
- movdqa 72(%rsp), %xmm10
- movdqa 88(%rsp), %xmm11
- movdqa 104(%rsp), %xmm12
- movdqa 120(%rsp), %xmm13
- movdqa 136(%rsp), %xmm14
- movdqa 152(%rsp), %xmm15
- addq $176, %rsp
-#endif
- popq %r15
- popq %r14
- popq %r13
- popq %r12
- popq %rbp
- popq %rbx
-.endm
+#define scrypt_core_cleanup() \
+ popq %r15; \
+ popq %r14; \
+ popq %r13; \
+ popq %r12; \
+ popq %rbp; \
+ popq %rbx; \
+
# GenuineIntel processors have fast SIMD
xorl %eax, %eax
movdqa %xmm15, 112(%rdi)
addq $136, %rsp
- scrypt_core_cleanup
+ scrypt_core_cleanup()
ret
-.macro xmm_salsa8_core_doubleround
- movdqa %xmm1, %xmm4
- paddd %xmm0, %xmm4
- movdqa %xmm4, %xmm5
- pslld $7, %xmm4
- psrld $25, %xmm5
- pxor %xmm4, %xmm3
- pxor %xmm5, %xmm3
- movdqa %xmm0, %xmm4
-
- paddd %xmm3, %xmm4
- movdqa %xmm4, %xmm5
- pslld $9, %xmm4
- psrld $23, %xmm5
- pxor %xmm4, %xmm2
- movdqa %xmm3, %xmm4
- pshufd $0x93, %xmm3, %xmm3
- pxor %xmm5, %xmm2
-
- paddd %xmm2, %xmm4
- movdqa %xmm4, %xmm5
- pslld $13, %xmm4
- psrld $19, %xmm5
- pxor %xmm4, %xmm1
- movdqa %xmm2, %xmm4
- pshufd $0x4e, %xmm2, %xmm2
- pxor %xmm5, %xmm1
-
- paddd %xmm1, %xmm4
- movdqa %xmm4, %xmm5
- pslld $18, %xmm4
- psrld $14, %xmm5
- pxor %xmm4, %xmm0
- pshufd $0x39, %xmm1, %xmm1
- pxor %xmm5, %xmm0
- movdqa %xmm3, %xmm4
+#define xmm_salsa8_core_doubleround() \
+ movdqa %xmm1, %xmm4; \
+ paddd %xmm0, %xmm4; \
+ movdqa %xmm4, %xmm5; \
+ pslld $7, %xmm4; \
+ psrld $25, %xmm5; \
+ pxor %xmm4, %xmm3; \
+ pxor %xmm5, %xmm3; \
+ movdqa %xmm0, %xmm4; \
+ paddd %xmm3, %xmm4; \
+ movdqa %xmm4, %xmm5; \
+ pslld $9, %xmm4; \
+ psrld $23, %xmm5; \
+ pxor %xmm4, %xmm2; \
+ movdqa %xmm3, %xmm4; \
+ pshufd $0x93, %xmm3, %xmm3; \
+ pxor %xmm5, %xmm2; \
+ paddd %xmm2, %xmm4; \
+ movdqa %xmm4, %xmm5; \
+ pslld $13, %xmm4; \
+ psrld $19, %xmm5; \
+ pxor %xmm4, %xmm1; \
+ movdqa %xmm2, %xmm4; \
+ pshufd $0x4e, %xmm2, %xmm2; \
+ pxor %xmm5, %xmm1; \
+ paddd %xmm1, %xmm4; \
+ movdqa %xmm4, %xmm5; \
+ pslld $18, %xmm4; \
+ psrld $14, %xmm5; \
+ pxor %xmm4, %xmm0; \
+ pshufd $0x39, %xmm1, %xmm1; \
+ pxor %xmm5, %xmm0; \
+ movdqa %xmm3, %xmm4; \
+ paddd %xmm0, %xmm4; \
+ movdqa %xmm4, %xmm5; \
+ pslld $7, %xmm4; \
+ psrld $25, %xmm5; \
+ pxor %xmm4, %xmm1; \
+ pxor %xmm5, %xmm1; \
+ movdqa %xmm0, %xmm4; \
+ paddd %xmm1, %xmm4; \
+ movdqa %xmm4, %xmm5; \
+ pslld $9, %xmm4; \
+ psrld $23, %xmm5; \
+ pxor %xmm4, %xmm2; \
+ movdqa %xmm1, %xmm4; \
+ pshufd $0x93, %xmm1, %xmm1; \
+ pxor %xmm5, %xmm2; \
+ paddd %xmm2, %xmm4; \
+ movdqa %xmm4, %xmm5; \
+ pslld $13, %xmm4; \
+ psrld $19, %xmm5; \
+ pxor %xmm4, %xmm3; \
+ movdqa %xmm2, %xmm4; \
+ pshufd $0x4e, %xmm2, %xmm2; \
+ pxor %xmm5, %xmm3; \
+ paddd %xmm3, %xmm4; \
+ movdqa %xmm4, %xmm5; \
+ pslld $18, %xmm4; \
+ psrld $14, %xmm5; \
+ pxor %xmm4, %xmm0; \
+ pshufd $0x39, %xmm3, %xmm3; \
+ pxor %xmm5, %xmm0; \
+
+
+#define xmm_salsa8_core() \
+ xmm_salsa8_core_doubleround(); \
+ xmm_salsa8_core_doubleround(); \
+ xmm_salsa8_core_doubleround(); \
+ xmm_salsa8_core_doubleround(); \
- paddd %xmm0, %xmm4
- movdqa %xmm4, %xmm5
- pslld $7, %xmm4
- psrld $25, %xmm5
- pxor %xmm4, %xmm1
- pxor %xmm5, %xmm1
- movdqa %xmm0, %xmm4
-
- paddd %xmm1, %xmm4
- movdqa %xmm4, %xmm5
- pslld $9, %xmm4
- psrld $23, %xmm5
- pxor %xmm4, %xmm2
- movdqa %xmm1, %xmm4
- pshufd $0x93, %xmm1, %xmm1
- pxor %xmm5, %xmm2
-
- paddd %xmm2, %xmm4
- movdqa %xmm4, %xmm5
- pslld $13, %xmm4
- psrld $19, %xmm5
- pxor %xmm4, %xmm3
- movdqa %xmm2, %xmm4
- pshufd $0x4e, %xmm2, %xmm2
- pxor %xmm5, %xmm3
-
- paddd %xmm3, %xmm4
- movdqa %xmm4, %xmm5
- pslld $18, %xmm4
- psrld $14, %xmm5
- pxor %xmm4, %xmm0
- pshufd $0x39, %xmm3, %xmm3
- pxor %xmm5, %xmm0
-.endm
-
-.macro xmm_salsa8_core
- xmm_salsa8_core_doubleround
- xmm_salsa8_core_doubleround
- xmm_salsa8_core_doubleround
- xmm_salsa8_core_doubleround
-.endm
.align 32
xmm_scrypt_core:
movdqa %xmm9, %xmm1
movdqa %xmm10, %xmm2
movdqa %xmm11, %xmm3
- xmm_salsa8_core
+ xmm_salsa8_core()
paddd %xmm0, %xmm8
paddd %xmm1, %xmm9
paddd %xmm2, %xmm10
movdqa %xmm13, %xmm1
movdqa %xmm14, %xmm2
movdqa %xmm15, %xmm3
- xmm_salsa8_core
+ xmm_salsa8_core()
paddd %xmm0, %xmm12
paddd %xmm1, %xmm13
paddd %xmm2, %xmm14
movdqa %xmm9, %xmm1
movdqa %xmm10, %xmm2
movdqa %xmm11, %xmm3
- xmm_salsa8_core
+ xmm_salsa8_core()
paddd %xmm0, %xmm8
paddd %xmm1, %xmm9
paddd %xmm2, %xmm10
movdqa %xmm13, %xmm1
movdqa %xmm14, %xmm2
movdqa %xmm15, %xmm3
- xmm_salsa8_core
+ xmm_salsa8_core()
paddd %xmm0, %xmm12
paddd %xmm1, %xmm13
paddd %xmm2, %xmm14
movl %ebx, 92(%rdi)
movl %eax, 76(%rdi)
- scrypt_core_cleanup
+ scrypt_core_cleanup()
ret
-
- .text
- .align 32
- .globl scrypt_best_throughput
- .globl _scrypt_best_throughput
-scrypt_best_throughput:
-_scrypt_best_throughput:
- pushq %rbx
- xorq %rax, %rax
- cpuid
- movl $3, %eax
- cmpl $0x444d4163, %ecx
- jne scrypt_best_throughput_exit
- cmpl $0x69746e65, %edx
- jne scrypt_best_throughput_exit
- cmpl $0x68747541, %ebx
- jne scrypt_best_throughput_exit
- movl $1, %eax
- cpuid
- andl $0x0ff00000, %eax
- movl $3, %eax
- jnz scrypt_best_throughput_exit
- movl $1, %eax
-scrypt_best_throughput_exit:
- popq %rbx
- ret
-
-
-.macro xmm_salsa8_core_2way_doubleround
- movdqa %xmm1, %xmm4
- movdqa %xmm9, %xmm6
- paddd %xmm0, %xmm4
- paddd %xmm8, %xmm6
- movdqa %xmm4, %xmm5
- movdqa %xmm6, %xmm7
- pslld $7, %xmm4
- pslld $7, %xmm6
- psrld $25, %xmm5
- psrld $25, %xmm7
- pxor %xmm4, %xmm3
- pxor %xmm6, %xmm11
- pxor %xmm5, %xmm3
- pxor %xmm7, %xmm11
- movdqa %xmm0, %xmm4
- movdqa %xmm8, %xmm6
-
- paddd %xmm3, %xmm4
- paddd %xmm11, %xmm6
- movdqa %xmm4, %xmm5
- movdqa %xmm6, %xmm7
- pslld $9, %xmm4
- pslld $9, %xmm6
- psrld $23, %xmm5
- psrld $23, %xmm7
- pxor %xmm4, %xmm2
- pxor %xmm6, %xmm10
- movdqa %xmm3, %xmm4
- movdqa %xmm11, %xmm6
- pshufd $0x93, %xmm3, %xmm3
- pshufd $0x93, %xmm11, %xmm11
- pxor %xmm5, %xmm2
- pxor %xmm7, %xmm10
-
- paddd %xmm2, %xmm4
- paddd %xmm10, %xmm6
- movdqa %xmm4, %xmm5
- movdqa %xmm6, %xmm7
- pslld $13, %xmm4
- pslld $13, %xmm6
- psrld $19, %xmm5
- psrld $19, %xmm7
- pxor %xmm4, %xmm1
- pxor %xmm6, %xmm9
- movdqa %xmm2, %xmm4
- movdqa %xmm10, %xmm6
- pshufd $0x4e, %xmm2, %xmm2
- pshufd $0x4e, %xmm10, %xmm10
- pxor %xmm5, %xmm1
- pxor %xmm7, %xmm9
-
- paddd %xmm1, %xmm4
- paddd %xmm9, %xmm6
- movdqa %xmm4, %xmm5
- movdqa %xmm6, %xmm7
- pslld $18, %xmm4
- pslld $18, %xmm6
- psrld $14, %xmm5
- psrld $14, %xmm7
- pxor %xmm4, %xmm0
- pxor %xmm6, %xmm8
- pshufd $0x39, %xmm1, %xmm1
- pshufd $0x39, %xmm9, %xmm9
- pxor %xmm5, %xmm0
- pxor %xmm7, %xmm8
- movdqa %xmm3, %xmm4
- movdqa %xmm11, %xmm6
-
- paddd %xmm0, %xmm4
- paddd %xmm8, %xmm6
- movdqa %xmm4, %xmm5
- movdqa %xmm6, %xmm7
- pslld $7, %xmm4
- pslld $7, %xmm6
- psrld $25, %xmm5
- psrld $25, %xmm7
- pxor %xmm4, %xmm1
- pxor %xmm6, %xmm9
- pxor %xmm5, %xmm1
- pxor %xmm7, %xmm9
- movdqa %xmm0, %xmm4
- movdqa %xmm8, %xmm6
-
- paddd %xmm1, %xmm4
- paddd %xmm9, %xmm6
- movdqa %xmm4, %xmm5
- movdqa %xmm6, %xmm7
- pslld $9, %xmm4
- pslld $9, %xmm6
- psrld $23, %xmm5
- psrld $23, %xmm7
- pxor %xmm4, %xmm2
- pxor %xmm6, %xmm10
- movdqa %xmm1, %xmm4
- movdqa %xmm9, %xmm6
- pshufd $0x93, %xmm1, %xmm1
- pshufd $0x93, %xmm9, %xmm9
- pxor %xmm5, %xmm2
- pxor %xmm7, %xmm10
-
- paddd %xmm2, %xmm4
- paddd %xmm10, %xmm6
- movdqa %xmm4, %xmm5
- movdqa %xmm6, %xmm7
- pslld $13, %xmm4
- pslld $13, %xmm6
- psrld $19, %xmm5
- psrld $19, %xmm7
- pxor %xmm4, %xmm3
- pxor %xmm6, %xmm11
- movdqa %xmm2, %xmm4
- movdqa %xmm10, %xmm6
- pshufd $0x4e, %xmm2, %xmm2
- pshufd $0x4e, %xmm10, %xmm10
- pxor %xmm5, %xmm3
- pxor %xmm7, %xmm11
-
- paddd %xmm3, %xmm4
- paddd %xmm11, %xmm6
- movdqa %xmm4, %xmm5
- movdqa %xmm6, %xmm7
- pslld $18, %xmm4
- pslld $18, %xmm6
- psrld $14, %xmm5
- psrld $14, %xmm7
- pxor %xmm4, %xmm0
- pxor %xmm6, %xmm8
- pshufd $0x39, %xmm3, %xmm3
- pshufd $0x39, %xmm11, %xmm11
- pxor %xmm5, %xmm0
- pxor %xmm7, %xmm8
-.endm
-
-.macro xmm_salsa8_core_2way
- xmm_salsa8_core_2way_doubleround
- xmm_salsa8_core_2way_doubleround
- xmm_salsa8_core_2way_doubleround
- xmm_salsa8_core_2way_doubleround
-.endm
-
-
- .text
- .align 32
- .globl scrypt_core_2way
- .globl _scrypt_core_2way
-scrypt_core_2way:
-_scrypt_core_2way:
- pushq %rbx
- pushq %rbp
-#if defined(WIN64)
- subq $176, %rsp
- movdqa %xmm6, 8(%rsp)
- movdqa %xmm7, 24(%rsp)
- movdqa %xmm8, 40(%rsp)
- movdqa %xmm9, 56(%rsp)
- movdqa %xmm10, 72(%rsp)
- movdqa %xmm11, 88(%rsp)
- movdqa %xmm12, 104(%rsp)
- movdqa %xmm13, 120(%rsp)
- movdqa %xmm14, 136(%rsp)
- movdqa %xmm15, 152(%rsp)
- pushq %rdi
- pushq %rsi
- movq %rcx, %rdi
- movq %rdx, %rsi
- movq %r8, %rdx
-#endif
- subq $264, %rsp
-
- scrypt_shuffle %rdi, 0, %rsp, 0
- scrypt_shuffle %rdi, 64, %rsp, 64
- scrypt_shuffle %rsi, 0, %rsp, 128
- scrypt_shuffle %rsi, 64, %rsp, 192
-
- movdqa 192(%rsp), %xmm12
- movdqa 208(%rsp), %xmm13
- movdqa 224(%rsp), %xmm14
- movdqa 240(%rsp), %xmm15
-
- movq %rdx, %rbp
- leaq 262144(%rdx), %rcx
-scrypt_core_2way_loop1:
- movdqa 0(%rsp), %xmm0
- movdqa 16(%rsp), %xmm1
- movdqa 32(%rsp), %xmm2
- movdqa 48(%rsp), %xmm3
- movdqa 64(%rsp), %xmm4
- movdqa 80(%rsp), %xmm5
- movdqa 96(%rsp), %xmm6
- movdqa 112(%rsp), %xmm7
- movdqa 128(%rsp), %xmm8
- movdqa 144(%rsp), %xmm9
- movdqa 160(%rsp), %xmm10
- movdqa 176(%rsp), %xmm11
- pxor %xmm4, %xmm0
- pxor %xmm5, %xmm1
- pxor %xmm6, %xmm2
- pxor %xmm7, %xmm3
- movdqa %xmm0, 0(%rbp)
- movdqa %xmm1, 16(%rbp)
- movdqa %xmm2, 32(%rbp)
- movdqa %xmm3, 48(%rbp)
- movdqa %xmm4, 64(%rbp)
- movdqa %xmm5, 80(%rbp)
- movdqa %xmm6, 96(%rbp)
- movdqa %xmm7, 112(%rbp)
- pxor %xmm12, %xmm8
- pxor %xmm13, %xmm9
- pxor %xmm14, %xmm10
- pxor %xmm15, %xmm11
- movdqa %xmm8, 128(%rbp)
- movdqa %xmm9, 144(%rbp)
- movdqa %xmm10, 160(%rbp)
- movdqa %xmm11, 176(%rbp)
- movdqa %xmm12, 192(%rbp)
- movdqa %xmm13, 208(%rbp)
- movdqa %xmm14, 224(%rbp)
- movdqa %xmm15, 240(%rbp)
-
- xmm_salsa8_core_2way
- paddd 0(%rbp), %xmm0
- paddd 16(%rbp), %xmm1
- paddd 32(%rbp), %xmm2
- paddd 48(%rbp), %xmm3
- paddd 128(%rbp), %xmm8
- paddd 144(%rbp), %xmm9
- paddd 160(%rbp), %xmm10
- paddd 176(%rbp), %xmm11
- movdqa %xmm0, 0(%rsp)
- movdqa %xmm1, 16(%rsp)
- movdqa %xmm2, 32(%rsp)
- movdqa %xmm3, 48(%rsp)
- movdqa %xmm8, 128(%rsp)
- movdqa %xmm9, 144(%rsp)
- movdqa %xmm10, 160(%rsp)
- movdqa %xmm11, 176(%rsp)
-
- pxor 64(%rsp), %xmm0
- pxor 80(%rsp), %xmm1
- pxor 96(%rsp), %xmm2
- pxor 112(%rsp), %xmm3
- pxor %xmm12, %xmm8
- pxor %xmm13, %xmm9
- pxor %xmm14, %xmm10
- pxor %xmm15, %xmm11
- movdqa %xmm0, 64(%rsp)
- movdqa %xmm1, 80(%rsp)
- movdqa %xmm2, 96(%rsp)
- movdqa %xmm3, 112(%rsp)
- movdqa %xmm8, %xmm12
- movdqa %xmm9, %xmm13
- movdqa %xmm10, %xmm14
- movdqa %xmm11, %xmm15
- xmm_salsa8_core_2way
- paddd 64(%rsp), %xmm0
- paddd 80(%rsp), %xmm1
- paddd 96(%rsp), %xmm2
- paddd 112(%rsp), %xmm3
- paddd %xmm8, %xmm12
- paddd %xmm9, %xmm13
- paddd %xmm10, %xmm14
- paddd %xmm11, %xmm15
- movdqa %xmm0, 64(%rsp)
- movdqa %xmm1, 80(%rsp)
- movdqa %xmm2, 96(%rsp)
- movdqa %xmm3, 112(%rsp)
-
- addq $256, %rbp
- cmpq %rcx, %rbp
- jne scrypt_core_2way_loop1
-
- movq $1024, %rcx
-scrypt_core_2way_loop2:
- movdqa 0(%rsp), %xmm0
- movdqa 16(%rsp), %xmm1
- movdqa 32(%rsp), %xmm2
- movdqa 48(%rsp), %xmm3
- movdqa 64(%rsp), %xmm4
- movdqa 80(%rsp), %xmm5
- movdqa 96(%rsp), %xmm6
- movdqa 112(%rsp), %xmm7
- movdqa 128(%rsp), %xmm8
- movdqa 144(%rsp), %xmm9
- movdqa 160(%rsp), %xmm10
- movdqa 176(%rsp), %xmm11
- movd %xmm4, %ebp
- andl $1023, %ebp
- shll $8, %ebp
- pxor 0(%rdx, %rbp), %xmm0
- pxor 16(%rdx, %rbp), %xmm1
- pxor 32(%rdx, %rbp), %xmm2
- pxor 48(%rdx, %rbp), %xmm3
- movd %xmm12, %ebx
- andl $1023, %ebx
- shll $8, %ebx
- addl $128, %ebx
- pxor 0(%rdx, %rbx), %xmm8
- pxor 16(%rdx, %rbx), %xmm9
- pxor 32(%rdx, %rbx), %xmm10
- pxor 48(%rdx, %rbx), %xmm11
-
- pxor %xmm4, %xmm0
- pxor %xmm5, %xmm1
- pxor %xmm6, %xmm2
- pxor %xmm7, %xmm3
- pxor %xmm12, %xmm8
- pxor %xmm13, %xmm9
- pxor %xmm14, %xmm10
- pxor %xmm15, %xmm11
- movdqa %xmm0, 0(%rsp)
- movdqa %xmm1, 16(%rsp)
- movdqa %xmm2, 32(%rsp)
- movdqa %xmm3, 48(%rsp)
- movdqa %xmm8, 128(%rsp)
- movdqa %xmm9, 144(%rsp)
- movdqa %xmm10, 160(%rsp)
- movdqa %xmm11, 176(%rsp)
- xmm_salsa8_core_2way
- paddd 0(%rsp), %xmm0
- paddd 16(%rsp), %xmm1
- paddd 32(%rsp), %xmm2
- paddd 48(%rsp), %xmm3
- paddd 128(%rsp), %xmm8
- paddd 144(%rsp), %xmm9
- paddd 160(%rsp), %xmm10
- paddd 176(%rsp), %xmm11
- movdqa %xmm0, 0(%rsp)
- movdqa %xmm1, 16(%rsp)
- movdqa %xmm2, 32(%rsp)
- movdqa %xmm3, 48(%rsp)
- movdqa %xmm8, 128(%rsp)
- movdqa %xmm9, 144(%rsp)
- movdqa %xmm10, 160(%rsp)
- movdqa %xmm11, 176(%rsp)
-
- pxor 64(%rdx, %rbp), %xmm0
- pxor 80(%rdx, %rbp), %xmm1
- pxor 96(%rdx, %rbp), %xmm2
- pxor 112(%rdx, %rbp), %xmm3
- pxor 64(%rdx, %rbx), %xmm8
- pxor 80(%rdx, %rbx), %xmm9
- pxor 96(%rdx, %rbx), %xmm10
- pxor 112(%rdx, %rbx), %xmm11
- pxor 64(%rsp), %xmm0
- pxor 80(%rsp), %xmm1
- pxor 96(%rsp), %xmm2
- pxor 112(%rsp), %xmm3
- pxor %xmm12, %xmm8
- pxor %xmm13, %xmm9
- pxor %xmm14, %xmm10
- pxor %xmm15, %xmm11
- movdqa %xmm0, 64(%rsp)
- movdqa %xmm1, 80(%rsp)
- movdqa %xmm2, 96(%rsp)
- movdqa %xmm3, 112(%rsp)
- movdqa %xmm8, %xmm12
- movdqa %xmm9, %xmm13
- movdqa %xmm10, %xmm14
- movdqa %xmm11, %xmm15
- xmm_salsa8_core_2way
- paddd 64(%rsp), %xmm0
- paddd 80(%rsp), %xmm1
- paddd 96(%rsp), %xmm2
- paddd 112(%rsp), %xmm3
- paddd %xmm8, %xmm12
- paddd %xmm9, %xmm13
- paddd %xmm10, %xmm14
- paddd %xmm11, %xmm15
- movdqa %xmm0, 64(%rsp)
- movdqa %xmm1, 80(%rsp)
- movdqa %xmm2, 96(%rsp)
- movdqa %xmm3, 112(%rsp)
-
- subq $1, %rcx
- ja scrypt_core_2way_loop2
-
- movdqa %xmm12, 192(%rsp)
- movdqa %xmm13, 208(%rsp)
- movdqa %xmm14, 224(%rsp)
- movdqa %xmm15, 240(%rsp)
-
- scrypt_shuffle %rsp, 0, %rdi, 0
- scrypt_shuffle %rsp, 64, %rdi, 64
- scrypt_shuffle %rsp, 128, %rsi, 0
- scrypt_shuffle %rsp, 192, %rsi, 64
-
- addq $264, %rsp
-#if defined(WIN64)
- popq %rsi
- popq %rdi
- movdqa 8(%rsp), %xmm6
- movdqa 24(%rsp), %xmm7
- movdqa 40(%rsp), %xmm8
- movdqa 56(%rsp), %xmm9
- movdqa 72(%rsp), %xmm10
- movdqa 88(%rsp), %xmm11
- movdqa 104(%rsp), %xmm12
- movdqa 120(%rsp), %xmm13
- movdqa 136(%rsp), %xmm14
- movdqa 152(%rsp), %xmm15
- addq $176, %rsp
-#endif
- popq %rbp
- popq %rbx
- ret
-
-
-.macro xmm_salsa8_core_3way_doubleround
- movdqa %xmm1, %xmm4
- movdqa %xmm9, %xmm6
- movdqa %xmm13, %xmm7
- paddd %xmm0, %xmm4
- paddd %xmm8, %xmm6
- paddd %xmm12, %xmm7
- movdqa %xmm4, %xmm5
- pslld $7, %xmm4
- psrld $25, %xmm5
- pxor %xmm4, %xmm3
- pxor %xmm5, %xmm3
- movdqa %xmm0, %xmm4
- movdqa %xmm6, %xmm5
- pslld $7, %xmm6
- psrld $25, %xmm5
- pxor %xmm6, %xmm11
- pxor %xmm5, %xmm11
- movdqa %xmm8, %xmm6
- movdqa %xmm7, %xmm5
- pslld $7, %xmm7
- psrld $25, %xmm5
- pxor %xmm7, %xmm15
- pxor %xmm5, %xmm15
- movdqa %xmm12, %xmm7
-
- paddd %xmm3, %xmm4
- paddd %xmm11, %xmm6
- paddd %xmm15, %xmm7
- movdqa %xmm4, %xmm5
- pslld $9, %xmm4
- psrld $23, %xmm5
- pxor %xmm4, %xmm2
- movdqa %xmm3, %xmm4
- pshufd $0x93, %xmm3, %xmm3
- pxor %xmm5, %xmm2
- movdqa %xmm6, %xmm5
- pslld $9, %xmm6
- psrld $23, %xmm5
- pxor %xmm6, %xmm10
- movdqa %xmm11, %xmm6
- pshufd $0x93, %xmm11, %xmm11
- pxor %xmm5, %xmm10
- movdqa %xmm7, %xmm5
- pslld $9, %xmm7
- psrld $23, %xmm5
- pxor %xmm7, %xmm14
- movdqa %xmm15, %xmm7
- pshufd $0x93, %xmm15, %xmm15
- pxor %xmm5, %xmm14
-
- paddd %xmm2, %xmm4
- paddd %xmm10, %xmm6
- paddd %xmm14, %xmm7
- movdqa %xmm4, %xmm5
- pslld $13, %xmm4
- psrld $19, %xmm5
- pxor %xmm4, %xmm1
- movdqa %xmm2, %xmm4
- pshufd $0x4e, %xmm2, %xmm2
- pxor %xmm5, %xmm1
- movdqa %xmm6, %xmm5
- pslld $13, %xmm6
- psrld $19, %xmm5
- pxor %xmm6, %xmm9
- movdqa %xmm10, %xmm6
- pshufd $0x4e, %xmm10, %xmm10
- pxor %xmm5, %xmm9
- movdqa %xmm7, %xmm5
- pslld $13, %xmm7
- psrld $19, %xmm5
- pxor %xmm7, %xmm13
- movdqa %xmm14, %xmm7
- pshufd $0x4e, %xmm14, %xmm14
- pxor %xmm5, %xmm13
-
- paddd %xmm1, %xmm4
- paddd %xmm9, %xmm6
- paddd %xmm13, %xmm7
- movdqa %xmm4, %xmm5
- pslld $18, %xmm4
- psrld $14, %xmm5
- pxor %xmm4, %xmm0
- pshufd $0x39, %xmm1, %xmm1
- pxor %xmm5, %xmm0
- movdqa %xmm3, %xmm4
- movdqa %xmm6, %xmm5
- pslld $18, %xmm6
- psrld $14, %xmm5
- pxor %xmm6, %xmm8
- pshufd $0x39, %xmm9, %xmm9
- pxor %xmm5, %xmm8
- movdqa %xmm11, %xmm6
- movdqa %xmm7, %xmm5
- pslld $18, %xmm7
- psrld $14, %xmm5
- pxor %xmm7, %xmm12
- pshufd $0x39, %xmm13, %xmm13
- pxor %xmm5, %xmm12
- movdqa %xmm15, %xmm7
-
- paddd %xmm0, %xmm4
- paddd %xmm8, %xmm6
- paddd %xmm12, %xmm7
- movdqa %xmm4, %xmm5
- pslld $7, %xmm4
- psrld $25, %xmm5
- pxor %xmm4, %xmm1
- pxor %xmm5, %xmm1
- movdqa %xmm0, %xmm4
- movdqa %xmm6, %xmm5
- pslld $7, %xmm6
- psrld $25, %xmm5
- pxor %xmm6, %xmm9
- pxor %xmm5, %xmm9
- movdqa %xmm8, %xmm6
- movdqa %xmm7, %xmm5
- pslld $7, %xmm7
- psrld $25, %xmm5
- pxor %xmm7, %xmm13
- pxor %xmm5, %xmm13
- movdqa %xmm12, %xmm7
-
- paddd %xmm1, %xmm4
- paddd %xmm9, %xmm6
- paddd %xmm13, %xmm7
- movdqa %xmm4, %xmm5
- pslld $9, %xmm4
- psrld $23, %xmm5
- pxor %xmm4, %xmm2
- movdqa %xmm1, %xmm4
- pshufd $0x93, %xmm1, %xmm1
- pxor %xmm5, %xmm2
- movdqa %xmm6, %xmm5
- pslld $9, %xmm6
- psrld $23, %xmm5
- pxor %xmm6, %xmm10
- movdqa %xmm9, %xmm6
- pshufd $0x93, %xmm9, %xmm9
- pxor %xmm5, %xmm10
- movdqa %xmm7, %xmm5
- pslld $9, %xmm7
- psrld $23, %xmm5
- pxor %xmm7, %xmm14
- movdqa %xmm13, %xmm7
- pshufd $0x93, %xmm13, %xmm13
- pxor %xmm5, %xmm14
-
- paddd %xmm2, %xmm4
- paddd %xmm10, %xmm6
- paddd %xmm14, %xmm7
- movdqa %xmm4, %xmm5
- pslld $13, %xmm4
- psrld $19, %xmm5
- pxor %xmm4, %xmm3
- movdqa %xmm2, %xmm4
- pshufd $0x4e, %xmm2, %xmm2
- pxor %xmm5, %xmm3
- movdqa %xmm6, %xmm5
- pslld $13, %xmm6
- psrld $19, %xmm5
- pxor %xmm6, %xmm11
- movdqa %xmm10, %xmm6
- pshufd $0x4e, %xmm10, %xmm10
- pxor %xmm5, %xmm11
- movdqa %xmm7, %xmm5
- pslld $13, %xmm7
- psrld $19, %xmm5
- pxor %xmm7, %xmm15
- movdqa %xmm14, %xmm7
- pshufd $0x4e, %xmm14, %xmm14
- pxor %xmm5, %xmm15
-
- paddd %xmm3, %xmm4
- paddd %xmm11, %xmm6
- paddd %xmm15, %xmm7
- movdqa %xmm4, %xmm5
- pslld $18, %xmm4
- psrld $14, %xmm5
- pxor %xmm4, %xmm0
- pshufd $0x39, %xmm3, %xmm3
- pxor %xmm5, %xmm0
- movdqa %xmm6, %xmm5
- pslld $18, %xmm6
- psrld $14, %xmm5
- pxor %xmm6, %xmm8
- pshufd $0x39, %xmm11, %xmm11
- pxor %xmm5, %xmm8
- movdqa %xmm7, %xmm5
- pslld $18, %xmm7
- psrld $14, %xmm5
- pxor %xmm7, %xmm12
- pshufd $0x39, %xmm15, %xmm15
- pxor %xmm5, %xmm12
-.endm
-
-.macro xmm_salsa8_core_3way
- xmm_salsa8_core_3way_doubleround
- xmm_salsa8_core_3way_doubleround
- xmm_salsa8_core_3way_doubleround
- xmm_salsa8_core_3way_doubleround
-.endm
-
- .text
- .align 32
- .globl scrypt_core_3way
- .globl _scrypt_core_3way
-scrypt_core_3way:
-_scrypt_core_3way:
- pushq %rbx
- pushq %rbp
-#if defined(WIN64)
- subq $176, %rsp
- movdqa %xmm6, 8(%rsp)
- movdqa %xmm7, 24(%rsp)
- movdqa %xmm8, 40(%rsp)
- movdqa %xmm9, 56(%rsp)
- movdqa %xmm10, 72(%rsp)
- movdqa %xmm11, 88(%rsp)
- movdqa %xmm12, 104(%rsp)
- movdqa %xmm13, 120(%rsp)
- movdqa %xmm14, 136(%rsp)
- movdqa %xmm15, 152(%rsp)
- pushq %rdi
- pushq %rsi
- movq %rcx, %rdi
- movq %rdx, %rsi
- movq %r8, %rdx
- movq %r9, %rcx
-#endif
- subq $392, %rsp
-
- scrypt_shuffle %rdi, 0, %rsp, 0
- scrypt_shuffle %rdi, 64, %rsp, 64
- scrypt_shuffle %rsi, 0, %rsp, 128
- scrypt_shuffle %rsi, 64, %rsp, 192
- scrypt_shuffle %rdx, 0, %rsp, 256
- scrypt_shuffle %rdx, 64, %rsp, 320
-
- movdqa 128+64(%rsp), %xmm8
- movdqa 128+80(%rsp), %xmm9
- movdqa 128+96(%rsp), %xmm10
- movdqa 128+112(%rsp), %xmm11
-
- movq %rcx, %rbp
- leaq 3*131072(%rcx), %rax
-scrypt_core_3way_loop1:
- movdqa %xmm8, %xmm12
- movdqa %xmm9, %xmm13
- movdqa %xmm10, %xmm14
- movdqa %xmm11, %xmm15
- movdqa 0(%rsp), %xmm0
- movdqa 16(%rsp), %xmm1
- movdqa 32(%rsp), %xmm2
- movdqa 48(%rsp), %xmm3
- movdqa 64(%rsp), %xmm4
- movdqa 80(%rsp), %xmm5
- movdqa 96(%rsp), %xmm6
- movdqa 112(%rsp), %xmm7
- movdqa 128+0(%rsp), %xmm8
- movdqa 128+16(%rsp), %xmm9
- movdqa 128+32(%rsp), %xmm10
- movdqa 128+48(%rsp), %xmm11
- pxor %xmm4, %xmm0
- pxor %xmm5, %xmm1
- pxor %xmm6, %xmm2
- pxor %xmm7, %xmm3
- movdqa %xmm0, 0(%rbp)
- movdqa %xmm1, 16(%rbp)
- movdqa %xmm2, 32(%rbp)
- movdqa %xmm3, 48(%rbp)
- movdqa %xmm4, 64(%rbp)
- movdqa %xmm5, 80(%rbp)
- movdqa %xmm6, 96(%rbp)
- movdqa %xmm7, 112(%rbp)
- pxor %xmm12, %xmm8
- pxor %xmm13, %xmm9
- pxor %xmm14, %xmm10
- pxor %xmm15, %xmm11
- movdqa %xmm8, 128+0(%rbp)
- movdqa %xmm9, 128+16(%rbp)
- movdqa %xmm10, 128+32(%rbp)
- movdqa %xmm11, 128+48(%rbp)
- movdqa %xmm12, 128+64(%rbp)
- movdqa %xmm13, 128+80(%rbp)
- movdqa %xmm14, 128+96(%rbp)
- movdqa %xmm15, 128+112(%rbp)
- movdqa 256+0(%rsp), %xmm12
- movdqa 256+16(%rsp), %xmm13
- movdqa 256+32(%rsp), %xmm14
- movdqa 256+48(%rsp), %xmm15
- movdqa 256+64(%rsp), %xmm4
- movdqa 256+80(%rsp), %xmm5
- movdqa 256+96(%rsp), %xmm6
- movdqa 256+112(%rsp), %xmm7
- pxor %xmm4, %xmm12
- pxor %xmm5, %xmm13
- pxor %xmm6, %xmm14
- pxor %xmm7, %xmm15
- movdqa %xmm12, 256+0(%rbp)
- movdqa %xmm13, 256+16(%rbp)
- movdqa %xmm14, 256+32(%rbp)
- movdqa %xmm15, 256+48(%rbp)
- movdqa %xmm4, 256+64(%rbp)
- movdqa %xmm5, 256+80(%rbp)
- movdqa %xmm6, 256+96(%rbp)
- movdqa %xmm7, 256+112(%rbp)
-
- xmm_salsa8_core_3way
- paddd 0(%rbp), %xmm0
- paddd 16(%rbp), %xmm1
- paddd 32(%rbp), %xmm2
- paddd 48(%rbp), %xmm3
- paddd 128+0(%rbp), %xmm8
- paddd 128+16(%rbp), %xmm9
- paddd 128+32(%rbp), %xmm10
- paddd 128+48(%rbp), %xmm11
- paddd 256+0(%rbp), %xmm12
- paddd 256+16(%rbp), %xmm13
- paddd 256+32(%rbp), %xmm14
- paddd 256+48(%rbp), %xmm15
- movdqa %xmm0, 0(%rsp)
- movdqa %xmm1, 16(%rsp)
- movdqa %xmm2, 32(%rsp)
- movdqa %xmm3, 48(%rsp)
- movdqa %xmm8, 128+0(%rsp)
- movdqa %xmm9, 128+16(%rsp)
- movdqa %xmm10, 128+32(%rsp)
- movdqa %xmm11, 128+48(%rsp)
- movdqa %xmm12, 256+0(%rsp)
- movdqa %xmm13, 256+16(%rsp)
- movdqa %xmm14, 256+32(%rsp)
- movdqa %xmm15, 256+48(%rsp)
-
- pxor 64(%rsp), %xmm0
- pxor 80(%rsp), %xmm1
- pxor 96(%rsp), %xmm2
- pxor 112(%rsp), %xmm3
- pxor 128+64(%rsp), %xmm8
- pxor 128+80(%rsp), %xmm9
- pxor 128+96(%rsp), %xmm10
- pxor 128+112(%rsp), %xmm11
- pxor 256+64(%rsp), %xmm12
- pxor 256+80(%rsp), %xmm13
- pxor 256+96(%rsp), %xmm14
- pxor 256+112(%rsp), %xmm15
- movdqa %xmm0, 64(%rsp)
- movdqa %xmm1, 80(%rsp)
- movdqa %xmm2, 96(%rsp)
- movdqa %xmm3, 112(%rsp)
- movdqa %xmm8, 128+64(%rsp)
- movdqa %xmm9, 128+80(%rsp)
- movdqa %xmm10, 128+96(%rsp)
- movdqa %xmm11, 128+112(%rsp)
- movdqa %xmm12, 256+64(%rsp)
- movdqa %xmm13, 256+80(%rsp)
- movdqa %xmm14, 256+96(%rsp)
- movdqa %xmm15, 256+112(%rsp)
- xmm_salsa8_core_3way
- paddd 64(%rsp), %xmm0
- paddd 80(%rsp), %xmm1
- paddd 96(%rsp), %xmm2
- paddd 112(%rsp), %xmm3
- paddd 128+64(%rsp), %xmm8
- paddd 128+80(%rsp), %xmm9
- paddd 128+96(%rsp), %xmm10
- paddd 128+112(%rsp), %xmm11
- paddd 256+64(%rsp), %xmm12
- paddd 256+80(%rsp), %xmm13
- paddd 256+96(%rsp), %xmm14
- paddd 256+112(%rsp), %xmm15
- movdqa %xmm0, 64(%rsp)
- movdqa %xmm1, 80(%rsp)
- movdqa %xmm2, 96(%rsp)
- movdqa %xmm3, 112(%rsp)
- movdqa %xmm8, 128+64(%rsp)
- movdqa %xmm9, 128+80(%rsp)
- movdqa %xmm10, 128+96(%rsp)
- movdqa %xmm11, 128+112(%rsp)
- movdqa %xmm12, 256+64(%rsp)
- movdqa %xmm13, 256+80(%rsp)
- movdqa %xmm14, 256+96(%rsp)
- movdqa %xmm15, 256+112(%rsp)
-
- addq $3*128, %rbp
- cmpq %rax, %rbp
- jne scrypt_core_3way_loop1
-
- movq $1024, %rax
-scrypt_core_3way_loop2:
- movl 64(%rsp), %ebp
- andl $1023, %ebp
- leal (%ebp, %ebp, 2), %ebp
- shll $7, %ebp
- movl 128+64(%rsp), %ebx
- andl $1023, %ebx
- leal (%ebx, %ebx, 2), %ebx
- shll $7, %ebx
- addl $128, %ebx
- movl 256+64(%rsp), %r8d
- andl $1023, %r8d
- leal (%r8d, %r8d, 2), %r8d
- shll $7, %r8d
- addl $256, %r8d
- movdqa 0(%rsp), %xmm0
- movdqa 16(%rsp), %xmm1
- movdqa 32(%rsp), %xmm2
- movdqa 48(%rsp), %xmm3
- movdqa 128+0(%rsp), %xmm8
- movdqa 128+16(%rsp), %xmm9
- movdqa 128+32(%rsp), %xmm10
- movdqa 128+48(%rsp), %xmm11
- movdqa 256+0(%rsp), %xmm12
- movdqa 256+16(%rsp), %xmm13
- movdqa 256+32(%rsp), %xmm14
- movdqa 256+48(%rsp), %xmm15
- pxor 0(%rcx, %rbp), %xmm0
- pxor 16(%rcx, %rbp), %xmm1
- pxor 32(%rcx, %rbp), %xmm2
- pxor 48(%rcx, %rbp), %xmm3
- pxor 0(%rcx, %rbx), %xmm8
- pxor 16(%rcx, %rbx), %xmm9
- pxor 32(%rcx, %rbx), %xmm10
- pxor 48(%rcx, %rbx), %xmm11
- pxor 0(%rcx, %r8), %xmm12
- pxor 16(%rcx, %r8), %xmm13
- pxor 32(%rcx, %r8), %xmm14
- pxor 48(%rcx, %r8), %xmm15
-
- pxor 64(%rsp), %xmm0
- pxor 80(%rsp), %xmm1
- pxor 96(%rsp), %xmm2
- pxor 112(%rsp), %xmm3
- pxor 128+64(%rsp), %xmm8
- pxor 128+80(%rsp), %xmm9
- pxor 128+96(%rsp), %xmm10
- pxor 128+112(%rsp), %xmm11
- pxor 256+64(%rsp), %xmm12
- pxor 256+80(%rsp), %xmm13
- pxor 256+96(%rsp), %xmm14
- pxor 256+112(%rsp), %xmm15
- movdqa %xmm0, 0(%rsp)
- movdqa %xmm1, 16(%rsp)
- movdqa %xmm2, 32(%rsp)
- movdqa %xmm3, 48(%rsp)
- movdqa %xmm8, 128+0(%rsp)
- movdqa %xmm9, 128+16(%rsp)
- movdqa %xmm10, 128+32(%rsp)
- movdqa %xmm11, 128+48(%rsp)
- movdqa %xmm12, 256+0(%rsp)
- movdqa %xmm13, 256+16(%rsp)
- movdqa %xmm14, 256+32(%rsp)
- movdqa %xmm15, 256+48(%rsp)
- xmm_salsa8_core_3way
- paddd 0(%rsp), %xmm0
- paddd 16(%rsp), %xmm1
- paddd 32(%rsp), %xmm2
- paddd 48(%rsp), %xmm3
- paddd 128+0(%rsp), %xmm8
- paddd 128+16(%rsp), %xmm9
- paddd 128+32(%rsp), %xmm10
- paddd 128+48(%rsp), %xmm11
- paddd 256+0(%rsp), %xmm12
- paddd 256+16(%rsp), %xmm13
- paddd 256+32(%rsp), %xmm14
- paddd 256+48(%rsp), %xmm15
- movdqa %xmm0, 0(%rsp)
- movdqa %xmm1, 16(%rsp)
- movdqa %xmm2, 32(%rsp)
- movdqa %xmm3, 48(%rsp)
- movdqa %xmm8, 128+0(%rsp)
- movdqa %xmm9, 128+16(%rsp)
- movdqa %xmm10, 128+32(%rsp)
- movdqa %xmm11, 128+48(%rsp)
- movdqa %xmm12, 256+0(%rsp)
- movdqa %xmm13, 256+16(%rsp)
- movdqa %xmm14, 256+32(%rsp)
- movdqa %xmm15, 256+48(%rsp)
-
- pxor 64(%rcx, %rbp), %xmm0
- pxor 80(%rcx, %rbp), %xmm1
- pxor 96(%rcx, %rbp), %xmm2
- pxor 112(%rcx, %rbp), %xmm3
- pxor 64(%rcx, %rbx), %xmm8
- pxor 80(%rcx, %rbx), %xmm9
- pxor 96(%rcx, %rbx), %xmm10
- pxor 112(%rcx, %rbx), %xmm11
- pxor 64(%rcx, %r8), %xmm12
- pxor 80(%rcx, %r8), %xmm13
- pxor 96(%rcx, %r8), %xmm14
- pxor 112(%rcx, %r8), %xmm15
- pxor 64(%rsp), %xmm0
- pxor 80(%rsp), %xmm1
- pxor 96(%rsp), %xmm2
- pxor 112(%rsp), %xmm3
- pxor 128+64(%rsp), %xmm8
- pxor 128+80(%rsp), %xmm9
- pxor 128+96(%rsp), %xmm10
- pxor 128+112(%rsp), %xmm11
- pxor 256+64(%rsp), %xmm12
- pxor 256+80(%rsp), %xmm13
- pxor 256+96(%rsp), %xmm14
- pxor 256+112(%rsp), %xmm15
- movdqa %xmm0, 64(%rsp)
- movdqa %xmm1, 80(%rsp)
- movdqa %xmm2, 96(%rsp)
- movdqa %xmm3, 112(%rsp)
- movdqa %xmm8, 128+64(%rsp)
- movdqa %xmm9, 128+80(%rsp)
- movdqa %xmm10, 128+96(%rsp)
- movdqa %xmm11, 128+112(%rsp)
- movdqa %xmm12, 256+64(%rsp)
- movdqa %xmm13, 256+80(%rsp)
- movdqa %xmm14, 256+96(%rsp)
- movdqa %xmm15, 256+112(%rsp)
- xmm_salsa8_core_3way
- paddd 64(%rsp), %xmm0
- paddd 80(%rsp), %xmm1
- paddd 96(%rsp), %xmm2
- paddd 112(%rsp), %xmm3
- paddd 128+64(%rsp), %xmm8
- paddd 128+80(%rsp), %xmm9
- paddd 128+96(%rsp), %xmm10
- paddd 128+112(%rsp), %xmm11
- paddd 256+64(%rsp), %xmm12
- paddd 256+80(%rsp), %xmm13
- paddd 256+96(%rsp), %xmm14
- paddd 256+112(%rsp), %xmm15
- movdqa %xmm0, 64(%rsp)
- movdqa %xmm1, 80(%rsp)
- movdqa %xmm2, 96(%rsp)
- movdqa %xmm3, 112(%rsp)
- movdqa %xmm8, 128+64(%rsp)
- movdqa %xmm9, 128+80(%rsp)
- movdqa %xmm10, 128+96(%rsp)
- movdqa %xmm11, 128+112(%rsp)
- movdqa %xmm12, 256+64(%rsp)
- movdqa %xmm13, 256+80(%rsp)
- movdqa %xmm14, 256+96(%rsp)
- movdqa %xmm15, 256+112(%rsp)
-
- subq $1, %rax
- ja scrypt_core_3way_loop2
-
- scrypt_shuffle %rsp, 0, %rdi, 0
- scrypt_shuffle %rsp, 64, %rdi, 64
- scrypt_shuffle %rsp, 128, %rsi, 0
- scrypt_shuffle %rsp, 192, %rsi, 64
- scrypt_shuffle %rsp, 256, %rdx, 0
- scrypt_shuffle %rsp, 320, %rdx, 64
-
addq $392, %rsp
#if defined(WIN64)
popq %rsi