-# Copyright 2011-2012 pooler@litecoinpool.org\r
-# All rights reserved.\r
-#\r
-# Redistribution and use in source and binary forms, with or without\r
-# modification, are permitted provided that the following conditions\r
-# are met:\r
-# 1. Redistributions of source code must retain the above copyright\r
-# notice, this list of conditions and the following disclaimer.\r
-# 2. Redistributions in binary form must reproduce the above copyright\r
-# notice, this list of conditions and the following disclaimer in the\r
-# documentation and/or other materials provided with the distribution.\r
-#\r
-# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND\r
-# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\r
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\r
-# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE\r
-# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\r
-# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS\r
-# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)\r
-# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT\r
-# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY\r
-# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF\r
-# SUCH DAMAGE.\r
-\r
-#if defined(__linux__) && defined(__ELF__)\r
- .section .note.GNU-stack,"",%progbits\r
-#endif\r
-\r
-#if defined(__x86_64__)\r
-\r
-.macro scrypt_shuffle src, so, dest, do\r
- movl \so+60(\src), %r8d\r
- movl \so+44(\src), %r9d\r
- movl \so+28(\src), %r10d\r
- movl \so+12(\src), %r11d\r
- movl %r8d, \do+12(\dest)\r
- movl %r9d, \do+28(\dest)\r
- movl %r10d, \do+44(\dest)\r
- movl %r11d, \do+60(\dest)\r
- movl \so+40(\src), %r8d\r
- movl \so+8(\src), %r9d\r
- movl \so+48(\src), %r10d\r
- movl \so+16(\src), %r11d\r
- movl %r8d, \do+8(\dest)\r
- movl %r9d, \do+40(\dest)\r
- movl %r10d, \do+16(\dest)\r
- movl %r11d, \do+48(\dest)\r
- movl \so+20(\src), %r8d\r
- movl \so+4(\src), %r9d\r
- movl \so+52(\src), %r10d\r
- movl \so+36(\src), %r11d\r
- movl %r8d, \do+4(\dest)\r
- movl %r9d, \do+20(\dest)\r
- movl %r10d, \do+36(\dest)\r
- movl %r11d, \do+52(\dest)\r
- movl \so+0(\src), %r8d\r
- movl \so+24(\src), %r9d\r
- movl \so+32(\src), %r10d\r
- movl \so+56(\src), %r11d\r
- movl %r8d, \do+0(\dest)\r
- movl %r9d, \do+24(\dest)\r
- movl %r10d, \do+32(\dest)\r
- movl %r11d, \do+56(\dest)\r
-.endm\r
-\r
-.macro gen_salsa8_core_doubleround\r
- movq 72(%rsp), %r15\r
-\r
- leaq (%r14, %rdx), %rbp\r
- roll $7, %ebp\r
- xorq %rbp, %r9\r
- leaq (%rdi, %r15), %rbp\r
- roll $7, %ebp\r
- xorq %rbp, %r10\r
- leaq (%rdx, %r9), %rbp\r
- roll $9, %ebp\r
- xorq %rbp, %r11\r
- leaq (%r15, %r10), %rbp\r
- roll $9, %ebp\r
- xorq %rbp, %r13\r
- leaq (%r9, %r11), %rbp\r
- roll $13, %ebp\r
- xorq %rbp, %r14\r
- leaq (%r10, %r13), %rbp\r
- roll $13, %ebp\r
- xorq %rbp, %rdi\r
- leaq (%r11, %r14), %rbp\r
- roll $18, %ebp\r
- xorq %rbp, %rdx\r
- leaq (%r13, %rdi), %rbp\r
- roll $18, %ebp\r
- xorq %rbp, %r15\r
-\r
- movq 48(%rsp), %rbp\r
- movq %r15, 72(%rsp)\r
-\r
- leaq (%rax, %rbp), %r15\r
- roll $7, %r15d\r
- xorq %r15, %rbx\r
- leaq (%rbp, %rbx), %r15\r
- roll $9, %r15d\r
- xorq %r15, %rcx\r
- leaq (%rbx, %rcx), %r15\r
- roll $13, %r15d\r
- xorq %r15, %rax\r
- leaq (%rcx, %rax), %r15\r
- roll $18, %r15d\r
- xorq %r15, %rbp\r
-\r
- movq 88(%rsp), %r15\r
- movq %rbp, 48(%rsp)\r
-\r
- leaq (%r12, %r15), %rbp\r
- roll $7, %ebp\r
- xorq %rbp, %rsi\r
- leaq (%r15, %rsi), %rbp\r
- roll $9, %ebp\r
- xorq %rbp, %r8\r
- leaq (%rsi, %r8), %rbp\r
- roll $13, %ebp\r
- xorq %rbp, %r12\r
- leaq (%r8, %r12), %rbp\r
- roll $18, %ebp\r
- xorq %rbp, %r15\r
-\r
- movq %r15, 88(%rsp)\r
- movq 72(%rsp), %r15\r
-\r
- leaq (%rsi, %rdx), %rbp\r
- roll $7, %ebp\r
- xorq %rbp, %rdi\r
- leaq (%r9, %r15), %rbp\r
- roll $7, %ebp\r
- xorq %rbp, %rax\r
- leaq (%rdx, %rdi), %rbp\r
- roll $9, %ebp\r
- xorq %rbp, %rcx\r
- leaq (%r15, %rax), %rbp\r
- roll $9, %ebp\r
- xorq %rbp, %r8\r
- leaq (%rdi, %rcx), %rbp\r
- roll $13, %ebp\r
- xorq %rbp, %rsi\r
- leaq (%rax, %r8), %rbp\r
- roll $13, %ebp\r
- xorq %rbp, %r9\r
- leaq (%rcx, %rsi), %rbp\r
- roll $18, %ebp\r
- xorq %rbp, %rdx\r
- leaq (%r8, %r9), %rbp\r
- roll $18, %ebp\r
- xorq %rbp, %r15\r
-\r
- movq 48(%rsp), %rbp\r
- movq %r15, 72(%rsp)\r
-\r
- leaq (%r10, %rbp), %r15\r
- roll $7, %r15d\r
- xorq %r15, %r12\r
- leaq (%rbp, %r12), %r15\r
- roll $9, %r15d\r
- xorq %r15, %r11\r
- leaq (%r12, %r11), %r15\r
- roll $13, %r15d\r
- xorq %r15, %r10\r
- leaq (%r11, %r10), %r15\r
- roll $18, %r15d\r
- xorq %r15, %rbp\r
-\r
- movq 88(%rsp), %r15\r
- movq %rbp, 48(%rsp)\r
-\r
- leaq (%rbx, %r15), %rbp\r
- roll $7, %ebp\r
- xorq %rbp, %r14\r
- leaq (%r15, %r14), %rbp\r
- roll $9, %ebp\r
- xorq %rbp, %r13\r
- leaq (%r14, %r13), %rbp\r
- roll $13, %ebp\r
- xorq %rbp, %rbx\r
- leaq (%r13, %rbx), %rbp\r
- roll $18, %ebp\r
- xorq %rbp, %r15\r
-\r
- movq %r15, 88(%rsp)\r
-.endm\r
-\r
- .text\r
- .align 32\r
-gen_salsa8_core:\r
- # 0: %rdx, %rdi, %rcx, %rsi\r
- movq 8(%rsp), %rdi\r
- movq %rdi, %rdx\r
- shrq $32, %rdi\r
- movq 16(%rsp), %rsi\r
- movq %rsi, %rcx\r
- shrq $32, %rsi\r
- # 1: %r9, 72(%rsp), %rax, %r8\r
- movq 24(%rsp), %r8\r
- movq %r8, %r9\r
- shrq $32, %r8\r
- movq %r8, 72(%rsp)\r
- movq 32(%rsp), %r8\r
- movq %r8, %rax\r
- shrq $32, %r8\r
- # 2: %r11, %r10, 48(%rsp), %r12\r
- movq 40(%rsp), %r10\r
- movq %r10, %r11\r
- shrq $32, %r10\r
- movq 48(%rsp), %r12\r
- #movq %r12, %r13\r
- #movq %r13, 48(%rsp)\r
- shrq $32, %r12\r
- # 3: %r14, %r13, %rbx, 88(%rsp)\r
- movq 56(%rsp), %r13\r
- movq %r13, %r14\r
- shrq $32, %r13\r
- movq 64(%rsp), %r15\r
- movq %r15, %rbx\r
- shrq $32, %r15\r
- movq %r15, 88(%rsp)\r
-\r
- gen_salsa8_core_doubleround\r
- gen_salsa8_core_doubleround\r
- gen_salsa8_core_doubleround\r
- gen_salsa8_core_doubleround\r
-\r
- movl %edx, %edx\r
- shlq $32, %rdi\r
- addq %rdi, %rdx\r
- movd %rdx, %xmm0\r
-\r
- movl %ecx, %ecx\r
- shlq $32, %rsi\r
- addq %rsi, %rcx\r
- movd %rcx, %xmm4\r
-\r
- movq 72(%rsp), %rdi\r
- movl %r9d, %r9d\r
- shlq $32, %rdi\r
- addq %rdi, %r9\r
- movd %r9, %xmm1\r
-\r
- movl %eax, %eax\r
- shlq $32, %r8\r
- addq %r8, %rax\r
- movd %rax, %xmm5\r
-\r
- movl %r11d, %r11d\r
- shlq $32, %r10\r
- addq %r10, %r11\r
- movd %r11, %xmm2\r
-\r
- movl 48(%rsp), %r8d\r
- shlq $32, %r12\r
- addq %r12, %r8\r
- movd %r8, %xmm6\r
-\r
- movl %r14d, %r14d\r
- shlq $32, %r13\r
- addq %r13, %r14\r
- movd %r14, %xmm3\r
-\r
- movq 88(%rsp), %rdi\r
- movl %ebx, %ebx\r
- shlq $32, %rdi\r
- addq %rdi, %rbx\r
- movd %rbx, %xmm7\r
-\r
- punpcklqdq %xmm4, %xmm0\r
- punpcklqdq %xmm5, %xmm1\r
- punpcklqdq %xmm6, %xmm2\r
- punpcklqdq %xmm7, %xmm3\r
-\r
- #movq %rdx, 8(%rsp)\r
- #movq %rcx, 16(%rsp)\r
- #movq %r9, 24(%rsp)\r
- #movq %rax, 32(%rsp)\r
- #movq %r11, 40(%rsp)\r
- #movq %r8, 48(%rsp)\r
- #movq %r14, 56(%rsp)\r
- #movq %rbx, 64(%rsp)\r
-\r
- ret\r
-\r
-\r
- .text\r
- .align 32\r
- .globl scrypt_core\r
- .globl _scrypt_core\r
-scrypt_core:\r
-_scrypt_core:\r
- pushq %rbx\r
- pushq %rbp\r
- pushq %r12\r
- pushq %r13\r
- pushq %r14\r
- pushq %r15\r
-#if defined(WIN64)\r
- subq $176, %rsp\r
- movdqa %xmm6, 8(%rsp)\r
- movdqa %xmm7, 24(%rsp)\r
- movdqa %xmm8, 40(%rsp)\r
- movdqa %xmm9, 56(%rsp)\r
- movdqa %xmm10, 72(%rsp)\r
- movdqa %xmm11, 88(%rsp)\r
- movdqa %xmm12, 104(%rsp)\r
- movdqa %xmm13, 120(%rsp)\r
- movdqa %xmm14, 136(%rsp)\r
- movdqa %xmm15, 152(%rsp)\r
- pushq %rdi\r
- pushq %rsi\r
- movq %rcx, %rdi\r
- movq %rdx, %rsi\r
-#endif\r
-\r
-.macro scrypt_core_cleanup\r
-#if defined(WIN64)\r
- popq %rsi\r
- popq %rdi\r
- movdqa 8(%rsp), %xmm6\r
- movdqa 24(%rsp), %xmm7\r
- movdqa 40(%rsp), %xmm8\r
- movdqa 56(%rsp), %xmm9\r
- movdqa 72(%rsp), %xmm10\r
- movdqa 88(%rsp), %xmm11\r
- movdqa 104(%rsp), %xmm12\r
- movdqa 120(%rsp), %xmm13\r
- movdqa 136(%rsp), %xmm14\r
- movdqa 152(%rsp), %xmm15\r
- addq $176, %rsp\r
-#endif\r
- popq %r15\r
- popq %r14\r
- popq %r13\r
- popq %r12\r
- popq %rbp\r
- popq %rbx\r
-.endm\r
-\r
- # GenuineIntel processors have fast SIMD\r
- xorl %eax, %eax\r
- cpuid\r
- cmpl $0x6c65746e, %ecx\r
- jne gen_scrypt_core\r
- cmpl $0x49656e69, %edx\r
- jne gen_scrypt_core\r
- cmpl $0x756e6547, %ebx\r
- je xmm_scrypt_core\r
-\r
-gen_scrypt_core:\r
- subq $136, %rsp\r
- movdqa 0(%rdi), %xmm8\r
- movdqa 16(%rdi), %xmm9\r
- movdqa 32(%rdi), %xmm10\r
- movdqa 48(%rdi), %xmm11\r
- movdqa 64(%rdi), %xmm12\r
- movdqa 80(%rdi), %xmm13\r
- movdqa 96(%rdi), %xmm14\r
- movdqa 112(%rdi), %xmm15\r
-\r
- leaq 131072(%rsi), %rcx\r
- movq %rdi, 104(%rsp)\r
- movq %rsi, 112(%rsp)\r
- movq %rcx, 120(%rsp)\r
-gen_scrypt_core_loop1:\r
- movdqa %xmm8, 0(%rsi)\r
- movdqa %xmm9, 16(%rsi)\r
- movdqa %xmm10, 32(%rsi)\r
- movdqa %xmm11, 48(%rsi)\r
- movdqa %xmm12, 64(%rsi)\r
- movdqa %xmm13, 80(%rsi)\r
- movdqa %xmm14, 96(%rsi)\r
- movdqa %xmm15, 112(%rsi)\r
-\r
- pxor %xmm12, %xmm8\r
- pxor %xmm13, %xmm9\r
- pxor %xmm14, %xmm10\r
- pxor %xmm15, %xmm11\r
- movdqa %xmm8, 0(%rsp)\r
- movdqa %xmm9, 16(%rsp)\r
- movdqa %xmm10, 32(%rsp)\r
- movdqa %xmm11, 48(%rsp)\r
- movq %rsi, 128(%rsp)\r
- call gen_salsa8_core\r
- paddd %xmm0, %xmm8\r
- paddd %xmm1, %xmm9\r
- paddd %xmm2, %xmm10\r
- paddd %xmm3, %xmm11\r
-\r
- pxor %xmm8, %xmm12\r
- pxor %xmm9, %xmm13\r
- pxor %xmm10, %xmm14\r
- pxor %xmm11, %xmm15\r
- movdqa %xmm12, 0(%rsp)\r
- movdqa %xmm13, 16(%rsp)\r
- movdqa %xmm14, 32(%rsp)\r
- movdqa %xmm15, 48(%rsp)\r
- call gen_salsa8_core\r
- movq 128(%rsp), %rsi\r
- paddd %xmm0, %xmm12\r
- paddd %xmm1, %xmm13\r
- paddd %xmm2, %xmm14\r
- paddd %xmm3, %xmm15\r
-\r
- addq $128, %rsi\r
- movq 120(%rsp), %rcx\r
- cmpq %rcx, %rsi\r
- jne gen_scrypt_core_loop1\r
-\r
- movq $1024, %rcx\r
-gen_scrypt_core_loop2:\r
- movq 112(%rsp), %rsi\r
- movd %xmm12, %edx\r
- andl $1023, %edx\r
- shll $7, %edx\r
- movdqa 0(%rsi, %rdx), %xmm0\r
- movdqa 16(%rsi, %rdx), %xmm1\r
- movdqa 32(%rsi, %rdx), %xmm2\r
- movdqa 48(%rsi, %rdx), %xmm3\r
- movdqa 64(%rsi, %rdx), %xmm4\r
- movdqa 80(%rsi, %rdx), %xmm5\r
- movdqa 96(%rsi, %rdx), %xmm6\r
- movdqa 112(%rsi, %rdx), %xmm7\r
- pxor %xmm0, %xmm8\r
- pxor %xmm1, %xmm9\r
- pxor %xmm2, %xmm10\r
- pxor %xmm3, %xmm11\r
- pxor %xmm4, %xmm12\r
- pxor %xmm5, %xmm13\r
- pxor %xmm6, %xmm14\r
- pxor %xmm7, %xmm15\r
-\r
- pxor %xmm12, %xmm8\r
- pxor %xmm13, %xmm9\r
- pxor %xmm14, %xmm10\r
- pxor %xmm15, %xmm11\r
- movdqa %xmm8, 0(%rsp)\r
- movdqa %xmm9, 16(%rsp)\r
- movdqa %xmm10, 32(%rsp)\r
- movdqa %xmm11, 48(%rsp)\r
- movq %rcx, 128(%rsp)\r
- call gen_salsa8_core\r
- paddd %xmm0, %xmm8\r
- paddd %xmm1, %xmm9\r
- paddd %xmm2, %xmm10\r
- paddd %xmm3, %xmm11\r
-\r
- pxor %xmm8, %xmm12\r
- pxor %xmm9, %xmm13\r
- pxor %xmm10, %xmm14\r
- pxor %xmm11, %xmm15\r
- movdqa %xmm12, 0(%rsp)\r
- movdqa %xmm13, 16(%rsp)\r
- movdqa %xmm14, 32(%rsp)\r
- movdqa %xmm15, 48(%rsp)\r
- call gen_salsa8_core\r
- movq 128(%rsp), %rcx\r
- paddd %xmm0, %xmm12\r
- paddd %xmm1, %xmm13\r
- paddd %xmm2, %xmm14\r
- paddd %xmm3, %xmm15\r
-\r
- subq $1, %rcx\r
- ja gen_scrypt_core_loop2\r
-\r
- movq 104(%rsp), %rdi\r
- movdqa %xmm8, 0(%rdi)\r
- movdqa %xmm9, 16(%rdi)\r
- movdqa %xmm10, 32(%rdi)\r
- movdqa %xmm11, 48(%rdi)\r
- movdqa %xmm12, 64(%rdi)\r
- movdqa %xmm13, 80(%rdi)\r
- movdqa %xmm14, 96(%rdi)\r
- movdqa %xmm15, 112(%rdi)\r
-\r
- addq $136, %rsp\r
- scrypt_core_cleanup\r
- ret\r
-\r
-\r
-.macro xmm_salsa8_core_doubleround\r
- movdqa %xmm1, %xmm4\r
- paddd %xmm0, %xmm4\r
- movdqa %xmm4, %xmm5\r
- pslld $7, %xmm4\r
- psrld $25, %xmm5\r
- pxor %xmm4, %xmm3\r
- pxor %xmm5, %xmm3\r
- movdqa %xmm0, %xmm4\r
-\r
- paddd %xmm3, %xmm4\r
- movdqa %xmm4, %xmm5\r
- pslld $9, %xmm4\r
- psrld $23, %xmm5\r
- pxor %xmm4, %xmm2\r
- movdqa %xmm3, %xmm4\r
- pshufd $0x93, %xmm3, %xmm3\r
- pxor %xmm5, %xmm2\r
-\r
- paddd %xmm2, %xmm4\r
- movdqa %xmm4, %xmm5\r
- pslld $13, %xmm4\r
- psrld $19, %xmm5\r
- pxor %xmm4, %xmm1\r
- movdqa %xmm2, %xmm4\r
- pshufd $0x4e, %xmm2, %xmm2\r
- pxor %xmm5, %xmm1\r
-\r
- paddd %xmm1, %xmm4\r
- movdqa %xmm4, %xmm5\r
- pslld $18, %xmm4\r
- psrld $14, %xmm5\r
- pxor %xmm4, %xmm0\r
- pshufd $0x39, %xmm1, %xmm1\r
- pxor %xmm5, %xmm0\r
- movdqa %xmm3, %xmm4\r
-\r
- paddd %xmm0, %xmm4\r
- movdqa %xmm4, %xmm5\r
- pslld $7, %xmm4\r
- psrld $25, %xmm5\r
- pxor %xmm4, %xmm1\r
- pxor %xmm5, %xmm1\r
- movdqa %xmm0, %xmm4\r
-\r
- paddd %xmm1, %xmm4\r
- movdqa %xmm4, %xmm5\r
- pslld $9, %xmm4\r
- psrld $23, %xmm5\r
- pxor %xmm4, %xmm2\r
- movdqa %xmm1, %xmm4\r
- pshufd $0x93, %xmm1, %xmm1\r
- pxor %xmm5, %xmm2\r
-\r
- paddd %xmm2, %xmm4\r
- movdqa %xmm4, %xmm5\r
- pslld $13, %xmm4\r
- psrld $19, %xmm5\r
- pxor %xmm4, %xmm3\r
- movdqa %xmm2, %xmm4\r
- pshufd $0x4e, %xmm2, %xmm2\r
- pxor %xmm5, %xmm3\r
-\r
- paddd %xmm3, %xmm4\r
- movdqa %xmm4, %xmm5\r
- pslld $18, %xmm4\r
- psrld $14, %xmm5\r
- pxor %xmm4, %xmm0\r
- pshufd $0x39, %xmm3, %xmm3\r
- pxor %xmm5, %xmm0\r
-.endm\r
-\r
-.macro xmm_salsa8_core\r
- xmm_salsa8_core_doubleround\r
- xmm_salsa8_core_doubleround\r
- xmm_salsa8_core_doubleround\r
- xmm_salsa8_core_doubleround\r
-.endm\r
-\r
- .align 32\r
-xmm_scrypt_core:\r
- # shuffle 1st block into %xmm8-%xmm11\r
- movl 60(%rdi), %edx\r
- movl 44(%rdi), %ecx\r
- movl 28(%rdi), %ebx\r
- movl 12(%rdi), %eax\r
- movd %edx, %xmm0\r
- movd %ecx, %xmm1\r
- movd %ebx, %xmm2\r
- movd %eax, %xmm3\r
- movl 40(%rdi), %ecx\r
- movl 24(%rdi), %ebx\r
- movl 8(%rdi), %eax\r
- movl 56(%rdi), %edx\r
- pshufd $0x93, %xmm0, %xmm0\r
- pshufd $0x93, %xmm1, %xmm1\r
- pshufd $0x93, %xmm2, %xmm2\r
- pshufd $0x93, %xmm3, %xmm3\r
- movd %ecx, %xmm4\r
- movd %ebx, %xmm5\r
- movd %eax, %xmm6\r
- movd %edx, %xmm7\r
- paddd %xmm4, %xmm0\r
- paddd %xmm5, %xmm1\r
- paddd %xmm6, %xmm2\r
- paddd %xmm7, %xmm3\r
- movl 20(%rdi), %ebx\r
- movl 4(%rdi), %eax\r
- movl 52(%rdi), %edx\r
- movl 36(%rdi), %ecx\r
- pshufd $0x93, %xmm0, %xmm0\r
- pshufd $0x93, %xmm1, %xmm1\r
- pshufd $0x93, %xmm2, %xmm2\r
- pshufd $0x93, %xmm3, %xmm3\r
- movd %ebx, %xmm4\r
- movd %eax, %xmm5\r
- movd %edx, %xmm6\r
- movd %ecx, %xmm7\r
- paddd %xmm4, %xmm0\r
- paddd %xmm5, %xmm1\r
- paddd %xmm6, %xmm2\r
- paddd %xmm7, %xmm3\r
- movl 0(%rdi), %eax\r
- movl 48(%rdi), %edx\r
- movl 32(%rdi), %ecx\r
- movl 16(%rdi), %ebx\r
- pshufd $0x93, %xmm0, %xmm0\r
- pshufd $0x93, %xmm1, %xmm1\r
- pshufd $0x93, %xmm2, %xmm2\r
- pshufd $0x93, %xmm3, %xmm3\r
- movd %eax, %xmm8\r
- movd %edx, %xmm9\r
- movd %ecx, %xmm10\r
- movd %ebx, %xmm11\r
- paddd %xmm0, %xmm8\r
- paddd %xmm1, %xmm9\r
- paddd %xmm2, %xmm10\r
- paddd %xmm3, %xmm11\r
-\r
- # shuffle 2nd block into %xmm12-%xmm15\r
- movl 124(%rdi), %edx\r
- movl 108(%rdi), %ecx\r
- movl 92(%rdi), %ebx\r
- movl 76(%rdi), %eax\r
- movd %edx, %xmm0\r
- movd %ecx, %xmm1\r
- movd %ebx, %xmm2\r
- movd %eax, %xmm3\r
- movl 104(%rdi), %ecx\r
- movl 88(%rdi), %ebx\r
- movl 72(%rdi), %eax\r
- movl 120(%rdi), %edx\r
- pshufd $0x93, %xmm0, %xmm0\r
- pshufd $0x93, %xmm1, %xmm1\r
- pshufd $0x93, %xmm2, %xmm2\r
- pshufd $0x93, %xmm3, %xmm3\r
- movd %ecx, %xmm4\r
- movd %ebx, %xmm5\r
- movd %eax, %xmm6\r
- movd %edx, %xmm7\r
- paddd %xmm4, %xmm0\r
- paddd %xmm5, %xmm1\r
- paddd %xmm6, %xmm2\r
- paddd %xmm7, %xmm3\r
- movl 84(%rdi), %ebx\r
- movl 68(%rdi), %eax\r
- movl 116(%rdi), %edx\r
- movl 100(%rdi), %ecx\r
- pshufd $0x93, %xmm0, %xmm0\r
- pshufd $0x93, %xmm1, %xmm1\r
- pshufd $0x93, %xmm2, %xmm2\r
- pshufd $0x93, %xmm3, %xmm3\r
- movd %ebx, %xmm4\r
- movd %eax, %xmm5\r
- movd %edx, %xmm6\r
- movd %ecx, %xmm7\r
- paddd %xmm4, %xmm0\r
- paddd %xmm5, %xmm1\r
- paddd %xmm6, %xmm2\r
- paddd %xmm7, %xmm3\r
- movl 64(%rdi), %eax\r
- movl 112(%rdi), %edx\r
- movl 96(%rdi), %ecx\r
- movl 80(%rdi), %ebx\r
- pshufd $0x93, %xmm0, %xmm0\r
- pshufd $0x93, %xmm1, %xmm1\r
- pshufd $0x93, %xmm2, %xmm2\r
- pshufd $0x93, %xmm3, %xmm3\r
- movd %eax, %xmm12\r
- movd %edx, %xmm13\r
- movd %ecx, %xmm14\r
- movd %ebx, %xmm15\r
- paddd %xmm0, %xmm12\r
- paddd %xmm1, %xmm13\r
- paddd %xmm2, %xmm14\r
- paddd %xmm3, %xmm15\r
-\r
- movq %rsi, %rdx\r
- leaq 131072(%rsi), %rcx\r
-xmm_scrypt_core_loop1:\r
- movdqa %xmm8, 0(%rdx)\r
- movdqa %xmm9, 16(%rdx)\r
- movdqa %xmm10, 32(%rdx)\r
- movdqa %xmm11, 48(%rdx)\r
- movdqa %xmm12, 64(%rdx)\r
- movdqa %xmm13, 80(%rdx)\r
- movdqa %xmm14, 96(%rdx)\r
- movdqa %xmm15, 112(%rdx)\r
-\r
- pxor %xmm12, %xmm8\r
- pxor %xmm13, %xmm9\r
- pxor %xmm14, %xmm10\r
- pxor %xmm15, %xmm11\r
- movdqa %xmm8, %xmm0\r
- movdqa %xmm9, %xmm1\r
- movdqa %xmm10, %xmm2\r
- movdqa %xmm11, %xmm3\r
- xmm_salsa8_core\r
- paddd %xmm0, %xmm8\r
- paddd %xmm1, %xmm9\r
- paddd %xmm2, %xmm10\r
- paddd %xmm3, %xmm11\r
-\r
- pxor %xmm8, %xmm12\r
- pxor %xmm9, %xmm13\r
- pxor %xmm10, %xmm14\r
- pxor %xmm11, %xmm15\r
- movdqa %xmm12, %xmm0\r
- movdqa %xmm13, %xmm1\r
- movdqa %xmm14, %xmm2\r
- movdqa %xmm15, %xmm3\r
- xmm_salsa8_core\r
- paddd %xmm0, %xmm12\r
- paddd %xmm1, %xmm13\r
- paddd %xmm2, %xmm14\r
- paddd %xmm3, %xmm15\r
-\r
- addq $128, %rdx\r
- cmpq %rcx, %rdx\r
- jne xmm_scrypt_core_loop1\r
-\r
- movq $1024, %rcx\r
-xmm_scrypt_core_loop2:\r
- movd %xmm12, %edx\r
- andl $1023, %edx\r
- shll $7, %edx\r
- movdqa 0(%rsi, %rdx), %xmm0\r
- movdqa 16(%rsi, %rdx), %xmm1\r
- movdqa 32(%rsi, %rdx), %xmm2\r
- movdqa 48(%rsi, %rdx), %xmm3\r
- movdqa 64(%rsi, %rdx), %xmm4\r
- movdqa 80(%rsi, %rdx), %xmm5\r
- movdqa 96(%rsi, %rdx), %xmm6\r
- movdqa 112(%rsi, %rdx), %xmm7\r
- pxor %xmm0, %xmm8\r
- pxor %xmm1, %xmm9\r
- pxor %xmm2, %xmm10\r
- pxor %xmm3, %xmm11\r
- pxor %xmm4, %xmm12\r
- pxor %xmm5, %xmm13\r
- pxor %xmm6, %xmm14\r
- pxor %xmm7, %xmm15\r
-\r
- pxor %xmm12, %xmm8\r
- pxor %xmm13, %xmm9\r
- pxor %xmm14, %xmm10\r
- pxor %xmm15, %xmm11\r
- movdqa %xmm8, %xmm0\r
- movdqa %xmm9, %xmm1\r
- movdqa %xmm10, %xmm2\r
- movdqa %xmm11, %xmm3\r
- xmm_salsa8_core\r
- paddd %xmm0, %xmm8\r
- paddd %xmm1, %xmm9\r
- paddd %xmm2, %xmm10\r
- paddd %xmm3, %xmm11\r
-\r
- pxor %xmm8, %xmm12\r
- pxor %xmm9, %xmm13\r
- pxor %xmm10, %xmm14\r
- pxor %xmm11, %xmm15\r
- movdqa %xmm12, %xmm0\r
- movdqa %xmm13, %xmm1\r
- movdqa %xmm14, %xmm2\r
- movdqa %xmm15, %xmm3\r
- xmm_salsa8_core\r
- paddd %xmm0, %xmm12\r
- paddd %xmm1, %xmm13\r
- paddd %xmm2, %xmm14\r
- paddd %xmm3, %xmm15\r
-\r
- subq $1, %rcx\r
- ja xmm_scrypt_core_loop2\r
-\r
- # re-shuffle 1st block back\r
- movd %xmm8, %eax\r
- movd %xmm9, %edx\r
- movd %xmm10, %ecx\r
- movd %xmm11, %ebx\r
- pshufd $0x39, %xmm8, %xmm8\r
- pshufd $0x39, %xmm9, %xmm9\r
- pshufd $0x39, %xmm10, %xmm10\r
- pshufd $0x39, %xmm11, %xmm11\r
- movl %eax, 0(%rdi)\r
- movl %edx, 48(%rdi)\r
- movl %ecx, 32(%rdi)\r
- movl %ebx, 16(%rdi)\r
- movd %xmm8, %ebx\r
- movd %xmm9, %eax\r
- movd %xmm10, %edx\r
- movd %xmm11, %ecx\r
- pshufd $0x39, %xmm8, %xmm8\r
- pshufd $0x39, %xmm9, %xmm9\r
- pshufd $0x39, %xmm10, %xmm10\r
- pshufd $0x39, %xmm11, %xmm11\r
- movl %ebx, 20(%rdi)\r
- movl %eax, 4(%rdi)\r
- movl %edx, 52(%rdi)\r
- movl %ecx, 36(%rdi)\r
- movd %xmm8, %ecx\r
- movd %xmm9, %ebx\r
- movd %xmm10, %eax\r
- movd %xmm11, %edx\r
- pshufd $0x39, %xmm8, %xmm8\r
- pshufd $0x39, %xmm9, %xmm9\r
- pshufd $0x39, %xmm10, %xmm10\r
- pshufd $0x39, %xmm11, %xmm11\r
- movl %ecx, 40(%rdi)\r
- movl %ebx, 24(%rdi)\r
- movl %eax, 8(%rdi)\r
- movl %edx, 56(%rdi)\r
- movd %xmm8, %edx\r
- movd %xmm9, %ecx\r
- movd %xmm10, %ebx\r
- movd %xmm11, %eax\r
- movl %edx, 60(%rdi)\r
- movl %ecx, 44(%rdi)\r
- movl %ebx, 28(%rdi)\r
- movl %eax, 12(%rdi)\r
-\r
- # re-shuffle 2nd block back\r
- movd %xmm12, %eax\r
- movd %xmm13, %edx\r
- movd %xmm14, %ecx\r
- movd %xmm15, %ebx\r
- pshufd $0x39, %xmm12, %xmm12\r
- pshufd $0x39, %xmm13, %xmm13\r
- pshufd $0x39, %xmm14, %xmm14\r
- pshufd $0x39, %xmm15, %xmm15\r
- movl %eax, 64(%rdi)\r
- movl %edx, 112(%rdi)\r
- movl %ecx, 96(%rdi)\r
- movl %ebx, 80(%rdi)\r
- movd %xmm12, %ebx\r
- movd %xmm13, %eax\r
- movd %xmm14, %edx\r
- movd %xmm15, %ecx\r
- pshufd $0x39, %xmm12, %xmm12\r
- pshufd $0x39, %xmm13, %xmm13\r
- pshufd $0x39, %xmm14, %xmm14\r
- pshufd $0x39, %xmm15, %xmm15\r
- movl %ebx, 84(%rdi)\r
- movl %eax, 68(%rdi)\r
- movl %edx, 116(%rdi)\r
- movl %ecx, 100(%rdi)\r
- movd %xmm12, %ecx\r
- movd %xmm13, %ebx\r
- movd %xmm14, %eax\r
- movd %xmm15, %edx\r
- pshufd $0x39, %xmm12, %xmm12\r
- pshufd $0x39, %xmm13, %xmm13\r
- pshufd $0x39, %xmm14, %xmm14\r
- pshufd $0x39, %xmm15, %xmm15\r
- movl %ecx, 104(%rdi)\r
- movl %ebx, 88(%rdi)\r
- movl %eax, 72(%rdi)\r
- movl %edx, 120(%rdi)\r
- movd %xmm12, %edx\r
- movd %xmm13, %ecx\r
- movd %xmm14, %ebx\r
- movd %xmm15, %eax\r
- movl %edx, 124(%rdi)\r
- movl %ecx, 108(%rdi)\r
- movl %ebx, 92(%rdi)\r
- movl %eax, 76(%rdi)\r
-\r
- scrypt_core_cleanup\r
- ret\r
-\r
-\r
- .text\r
- .align 32\r
- .globl scrypt_best_throughput\r
- .globl _scrypt_best_throughput\r
-scrypt_best_throughput:\r
-_scrypt_best_throughput:\r
- pushq %rbx\r
- xorq %rax, %rax\r
- cpuid\r
- movl $3, %eax\r
- cmpl $0x444d4163, %ecx\r
- jne scrypt_best_throughput_exit\r
- cmpl $0x69746e65, %edx\r
- jne scrypt_best_throughput_exit\r
- cmpl $0x68747541, %ebx\r
- jne scrypt_best_throughput_exit\r
- movl $1, %eax\r
- cpuid\r
- andl $0x0ff00000, %eax\r
- movl $3, %eax\r
- jnz scrypt_best_throughput_exit\r
- movl $1, %eax\r
-scrypt_best_throughput_exit:\r
- popq %rbx\r
- ret\r
-\r
-\r
-.macro xmm_salsa8_core_2way_doubleround\r
- movdqa %xmm1, %xmm4\r
- movdqa %xmm9, %xmm6\r
- paddd %xmm0, %xmm4\r
- paddd %xmm8, %xmm6\r
- movdqa %xmm4, %xmm5\r
- movdqa %xmm6, %xmm7\r
- pslld $7, %xmm4\r
- pslld $7, %xmm6\r
- psrld $25, %xmm5\r
- psrld $25, %xmm7\r
- pxor %xmm4, %xmm3\r
- pxor %xmm6, %xmm11\r
- pxor %xmm5, %xmm3\r
- pxor %xmm7, %xmm11\r
- movdqa %xmm0, %xmm4\r
- movdqa %xmm8, %xmm6\r
-\r
- paddd %xmm3, %xmm4\r
- paddd %xmm11, %xmm6\r
- movdqa %xmm4, %xmm5\r
- movdqa %xmm6, %xmm7\r
- pslld $9, %xmm4\r
- pslld $9, %xmm6\r
- psrld $23, %xmm5\r
- psrld $23, %xmm7\r
- pxor %xmm4, %xmm2\r
- pxor %xmm6, %xmm10\r
- movdqa %xmm3, %xmm4\r
- movdqa %xmm11, %xmm6\r
- pshufd $0x93, %xmm3, %xmm3\r
- pshufd $0x93, %xmm11, %xmm11\r
- pxor %xmm5, %xmm2\r
- pxor %xmm7, %xmm10\r
-\r
- paddd %xmm2, %xmm4\r
- paddd %xmm10, %xmm6\r
- movdqa %xmm4, %xmm5\r
- movdqa %xmm6, %xmm7\r
- pslld $13, %xmm4\r
- pslld $13, %xmm6\r
- psrld $19, %xmm5\r
- psrld $19, %xmm7\r
- pxor %xmm4, %xmm1\r
- pxor %xmm6, %xmm9\r
- movdqa %xmm2, %xmm4\r
- movdqa %xmm10, %xmm6\r
- pshufd $0x4e, %xmm2, %xmm2\r
- pshufd $0x4e, %xmm10, %xmm10\r
- pxor %xmm5, %xmm1\r
- pxor %xmm7, %xmm9\r
-\r
- paddd %xmm1, %xmm4\r
- paddd %xmm9, %xmm6\r
- movdqa %xmm4, %xmm5\r
- movdqa %xmm6, %xmm7\r
- pslld $18, %xmm4\r
- pslld $18, %xmm6\r
- psrld $14, %xmm5\r
- psrld $14, %xmm7\r
- pxor %xmm4, %xmm0\r
- pxor %xmm6, %xmm8\r
- pshufd $0x39, %xmm1, %xmm1\r
- pshufd $0x39, %xmm9, %xmm9\r
- pxor %xmm5, %xmm0\r
- pxor %xmm7, %xmm8\r
- movdqa %xmm3, %xmm4\r
- movdqa %xmm11, %xmm6\r
-\r
- paddd %xmm0, %xmm4\r
- paddd %xmm8, %xmm6\r
- movdqa %xmm4, %xmm5\r
- movdqa %xmm6, %xmm7\r
- pslld $7, %xmm4\r
- pslld $7, %xmm6\r
- psrld $25, %xmm5\r
- psrld $25, %xmm7\r
- pxor %xmm4, %xmm1\r
- pxor %xmm6, %xmm9\r
- pxor %xmm5, %xmm1\r
- pxor %xmm7, %xmm9\r
- movdqa %xmm0, %xmm4\r
- movdqa %xmm8, %xmm6\r
-\r
- paddd %xmm1, %xmm4\r
- paddd %xmm9, %xmm6\r
- movdqa %xmm4, %xmm5\r
- movdqa %xmm6, %xmm7\r
- pslld $9, %xmm4\r
- pslld $9, %xmm6\r
- psrld $23, %xmm5\r
- psrld $23, %xmm7\r
- pxor %xmm4, %xmm2\r
- pxor %xmm6, %xmm10\r
- movdqa %xmm1, %xmm4\r
- movdqa %xmm9, %xmm6\r
- pshufd $0x93, %xmm1, %xmm1\r
- pshufd $0x93, %xmm9, %xmm9\r
- pxor %xmm5, %xmm2\r
- pxor %xmm7, %xmm10\r
-\r
- paddd %xmm2, %xmm4\r
- paddd %xmm10, %xmm6\r
- movdqa %xmm4, %xmm5\r
- movdqa %xmm6, %xmm7\r
- pslld $13, %xmm4\r
- pslld $13, %xmm6\r
- psrld $19, %xmm5\r
- psrld $19, %xmm7\r
- pxor %xmm4, %xmm3\r
- pxor %xmm6, %xmm11\r
- movdqa %xmm2, %xmm4\r
- movdqa %xmm10, %xmm6\r
- pshufd $0x4e, %xmm2, %xmm2\r
- pshufd $0x4e, %xmm10, %xmm10\r
- pxor %xmm5, %xmm3\r
- pxor %xmm7, %xmm11\r
-\r
- paddd %xmm3, %xmm4\r
- paddd %xmm11, %xmm6\r
- movdqa %xmm4, %xmm5\r
- movdqa %xmm6, %xmm7\r
- pslld $18, %xmm4\r
- pslld $18, %xmm6\r
- psrld $14, %xmm5\r
- psrld $14, %xmm7\r
- pxor %xmm4, %xmm0\r
- pxor %xmm6, %xmm8\r
- pshufd $0x39, %xmm3, %xmm3\r
- pshufd $0x39, %xmm11, %xmm11\r
- pxor %xmm5, %xmm0\r
- pxor %xmm7, %xmm8\r
-.endm\r
-\r
-.macro xmm_salsa8_core_2way\r
- xmm_salsa8_core_2way_doubleround\r
- xmm_salsa8_core_2way_doubleround\r
- xmm_salsa8_core_2way_doubleround\r
- xmm_salsa8_core_2way_doubleround\r
-.endm\r
-\r
-\r
- .text\r
- .align 32\r
- .globl scrypt_core_2way\r
- .globl _scrypt_core_2way\r
-scrypt_core_2way:\r
-_scrypt_core_2way:\r
- pushq %rbx\r
- pushq %rbp\r
-#if defined(WIN64)\r
- subq $176, %rsp\r
- movdqa %xmm6, 8(%rsp)\r
- movdqa %xmm7, 24(%rsp)\r
- movdqa %xmm8, 40(%rsp)\r
- movdqa %xmm9, 56(%rsp)\r
- movdqa %xmm10, 72(%rsp)\r
- movdqa %xmm11, 88(%rsp)\r
- movdqa %xmm12, 104(%rsp)\r
- movdqa %xmm13, 120(%rsp)\r
- movdqa %xmm14, 136(%rsp)\r
- movdqa %xmm15, 152(%rsp)\r
- pushq %rdi\r
- pushq %rsi\r
- movq %rcx, %rdi\r
- movq %rdx, %rsi\r
- movq %r8, %rdx\r
-#endif\r
- subq $264, %rsp\r
-\r
- scrypt_shuffle %rdi, 0, %rsp, 0\r
- scrypt_shuffle %rdi, 64, %rsp, 64\r
- scrypt_shuffle %rsi, 0, %rsp, 128\r
- scrypt_shuffle %rsi, 64, %rsp, 192\r
-\r
- movdqa 192(%rsp), %xmm12\r
- movdqa 208(%rsp), %xmm13\r
- movdqa 224(%rsp), %xmm14\r
- movdqa 240(%rsp), %xmm15\r
-\r
- movq %rdx, %rbp\r
- leaq 262144(%rdx), %rcx\r
-scrypt_core_2way_loop1:\r
- movdqa 0(%rsp), %xmm0\r
- movdqa 16(%rsp), %xmm1\r
- movdqa 32(%rsp), %xmm2\r
- movdqa 48(%rsp), %xmm3\r
- movdqa 64(%rsp), %xmm4\r
- movdqa 80(%rsp), %xmm5\r
- movdqa 96(%rsp), %xmm6\r
- movdqa 112(%rsp), %xmm7\r
- movdqa 128(%rsp), %xmm8\r
- movdqa 144(%rsp), %xmm9\r
- movdqa 160(%rsp), %xmm10\r
- movdqa 176(%rsp), %xmm11\r
- pxor %xmm4, %xmm0\r
- pxor %xmm5, %xmm1\r
- pxor %xmm6, %xmm2\r
- pxor %xmm7, %xmm3\r
- movdqa %xmm0, 0(%rbp)\r
- movdqa %xmm1, 16(%rbp)\r
- movdqa %xmm2, 32(%rbp)\r
- movdqa %xmm3, 48(%rbp)\r
- movdqa %xmm4, 64(%rbp)\r
- movdqa %xmm5, 80(%rbp)\r
- movdqa %xmm6, 96(%rbp)\r
- movdqa %xmm7, 112(%rbp)\r
- pxor %xmm12, %xmm8\r
- pxor %xmm13, %xmm9\r
- pxor %xmm14, %xmm10\r
- pxor %xmm15, %xmm11\r
- movdqa %xmm8, 128(%rbp)\r
- movdqa %xmm9, 144(%rbp)\r
- movdqa %xmm10, 160(%rbp)\r
- movdqa %xmm11, 176(%rbp)\r
- movdqa %xmm12, 192(%rbp)\r
- movdqa %xmm13, 208(%rbp)\r
- movdqa %xmm14, 224(%rbp)\r
- movdqa %xmm15, 240(%rbp)\r
-\r
- xmm_salsa8_core_2way\r
- paddd 0(%rbp), %xmm0\r
- paddd 16(%rbp), %xmm1\r
- paddd 32(%rbp), %xmm2\r
- paddd 48(%rbp), %xmm3\r
- paddd 128(%rbp), %xmm8\r
- paddd 144(%rbp), %xmm9\r
- paddd 160(%rbp), %xmm10\r
- paddd 176(%rbp), %xmm11\r
- movdqa %xmm0, 0(%rsp)\r
- movdqa %xmm1, 16(%rsp)\r
- movdqa %xmm2, 32(%rsp)\r
- movdqa %xmm3, 48(%rsp)\r
- movdqa %xmm8, 128(%rsp)\r
- movdqa %xmm9, 144(%rsp)\r
- movdqa %xmm10, 160(%rsp)\r
- movdqa %xmm11, 176(%rsp)\r
-\r
- pxor 64(%rsp), %xmm0\r
- pxor 80(%rsp), %xmm1\r
- pxor 96(%rsp), %xmm2\r
- pxor 112(%rsp), %xmm3\r
- pxor %xmm12, %xmm8\r
- pxor %xmm13, %xmm9\r
- pxor %xmm14, %xmm10\r
- pxor %xmm15, %xmm11\r
- movdqa %xmm0, 64(%rsp)\r
- movdqa %xmm1, 80(%rsp)\r
- movdqa %xmm2, 96(%rsp)\r
- movdqa %xmm3, 112(%rsp)\r
- movdqa %xmm8, %xmm12\r
- movdqa %xmm9, %xmm13\r
- movdqa %xmm10, %xmm14\r
- movdqa %xmm11, %xmm15\r
- xmm_salsa8_core_2way\r
- paddd 64(%rsp), %xmm0\r
- paddd 80(%rsp), %xmm1\r
- paddd 96(%rsp), %xmm2\r
- paddd 112(%rsp), %xmm3\r
- paddd %xmm8, %xmm12\r
- paddd %xmm9, %xmm13\r
- paddd %xmm10, %xmm14\r
- paddd %xmm11, %xmm15\r
- movdqa %xmm0, 64(%rsp)\r
- movdqa %xmm1, 80(%rsp)\r
- movdqa %xmm2, 96(%rsp)\r
- movdqa %xmm3, 112(%rsp)\r
-\r
- addq $256, %rbp\r
- cmpq %rcx, %rbp\r
- jne scrypt_core_2way_loop1\r
-\r
- movq $1024, %rcx\r
-scrypt_core_2way_loop2:\r
- movdqa 0(%rsp), %xmm0\r
- movdqa 16(%rsp), %xmm1\r
- movdqa 32(%rsp), %xmm2\r
- movdqa 48(%rsp), %xmm3\r
- movdqa 64(%rsp), %xmm4\r
- movdqa 80(%rsp), %xmm5\r
- movdqa 96(%rsp), %xmm6\r
- movdqa 112(%rsp), %xmm7\r
- movdqa 128(%rsp), %xmm8\r
- movdqa 144(%rsp), %xmm9\r
- movdqa 160(%rsp), %xmm10\r
- movdqa 176(%rsp), %xmm11\r
- movd %xmm4, %ebp\r
- andl $1023, %ebp\r
- shll $8, %ebp\r
- pxor 0(%rdx, %rbp), %xmm0\r
- pxor 16(%rdx, %rbp), %xmm1\r
- pxor 32(%rdx, %rbp), %xmm2\r
- pxor 48(%rdx, %rbp), %xmm3\r
- movd %xmm12, %ebx\r
- andl $1023, %ebx\r
- shll $8, %ebx\r
- addl $128, %ebx\r
- pxor 0(%rdx, %rbx), %xmm8\r
- pxor 16(%rdx, %rbx), %xmm9\r
- pxor 32(%rdx, %rbx), %xmm10\r
- pxor 48(%rdx, %rbx), %xmm11\r
-\r
- pxor %xmm4, %xmm0\r
- pxor %xmm5, %xmm1\r
- pxor %xmm6, %xmm2\r
- pxor %xmm7, %xmm3\r
- pxor %xmm12, %xmm8\r
- pxor %xmm13, %xmm9\r
- pxor %xmm14, %xmm10\r
- pxor %xmm15, %xmm11\r
- movdqa %xmm0, 0(%rsp)\r
- movdqa %xmm1, 16(%rsp)\r
- movdqa %xmm2, 32(%rsp)\r
- movdqa %xmm3, 48(%rsp)\r
- movdqa %xmm8, 128(%rsp)\r
- movdqa %xmm9, 144(%rsp)\r
- movdqa %xmm10, 160(%rsp)\r
- movdqa %xmm11, 176(%rsp)\r
- xmm_salsa8_core_2way\r
- paddd 0(%rsp), %xmm0\r
- paddd 16(%rsp), %xmm1\r
- paddd 32(%rsp), %xmm2\r
- paddd 48(%rsp), %xmm3\r
- paddd 128(%rsp), %xmm8\r
- paddd 144(%rsp), %xmm9\r
- paddd 160(%rsp), %xmm10\r
- paddd 176(%rsp), %xmm11\r
- movdqa %xmm0, 0(%rsp)\r
- movdqa %xmm1, 16(%rsp)\r
- movdqa %xmm2, 32(%rsp)\r
- movdqa %xmm3, 48(%rsp)\r
- movdqa %xmm8, 128(%rsp)\r
- movdqa %xmm9, 144(%rsp)\r
- movdqa %xmm10, 160(%rsp)\r
- movdqa %xmm11, 176(%rsp)\r
-\r
- pxor 64(%rdx, %rbp), %xmm0\r
- pxor 80(%rdx, %rbp), %xmm1\r
- pxor 96(%rdx, %rbp), %xmm2\r
- pxor 112(%rdx, %rbp), %xmm3\r
- pxor 64(%rdx, %rbx), %xmm8\r
- pxor 80(%rdx, %rbx), %xmm9\r
- pxor 96(%rdx, %rbx), %xmm10\r
- pxor 112(%rdx, %rbx), %xmm11\r
- pxor 64(%rsp), %xmm0\r
- pxor 80(%rsp), %xmm1\r
- pxor 96(%rsp), %xmm2\r
- pxor 112(%rsp), %xmm3\r
- pxor %xmm12, %xmm8\r
- pxor %xmm13, %xmm9\r
- pxor %xmm14, %xmm10\r
- pxor %xmm15, %xmm11\r
- movdqa %xmm0, 64(%rsp)\r
- movdqa %xmm1, 80(%rsp)\r
- movdqa %xmm2, 96(%rsp)\r
- movdqa %xmm3, 112(%rsp)\r
- movdqa %xmm8, %xmm12\r
- movdqa %xmm9, %xmm13\r
- movdqa %xmm10, %xmm14\r
- movdqa %xmm11, %xmm15\r
- xmm_salsa8_core_2way\r
- paddd 64(%rsp), %xmm0\r
- paddd 80(%rsp), %xmm1\r
- paddd 96(%rsp), %xmm2\r
- paddd 112(%rsp), %xmm3\r
- paddd %xmm8, %xmm12\r
- paddd %xmm9, %xmm13\r
- paddd %xmm10, %xmm14\r
- paddd %xmm11, %xmm15\r
- movdqa %xmm0, 64(%rsp)\r
- movdqa %xmm1, 80(%rsp)\r
- movdqa %xmm2, 96(%rsp)\r
- movdqa %xmm3, 112(%rsp)\r
-\r
- subq $1, %rcx\r
- ja scrypt_core_2way_loop2\r
-\r
- movdqa %xmm12, 192(%rsp)\r
- movdqa %xmm13, 208(%rsp)\r
- movdqa %xmm14, 224(%rsp)\r
- movdqa %xmm15, 240(%rsp)\r
-\r
- scrypt_shuffle %rsp, 0, %rdi, 0\r
- scrypt_shuffle %rsp, 64, %rdi, 64\r
- scrypt_shuffle %rsp, 128, %rsi, 0\r
- scrypt_shuffle %rsp, 192, %rsi, 64\r
-\r
- addq $264, %rsp\r
-#if defined(WIN64)\r
- popq %rsi\r
- popq %rdi\r
- movdqa 8(%rsp), %xmm6\r
- movdqa 24(%rsp), %xmm7\r
- movdqa 40(%rsp), %xmm8\r
- movdqa 56(%rsp), %xmm9\r
- movdqa 72(%rsp), %xmm10\r
- movdqa 88(%rsp), %xmm11\r
- movdqa 104(%rsp), %xmm12\r
- movdqa 120(%rsp), %xmm13\r
- movdqa 136(%rsp), %xmm14\r
- movdqa 152(%rsp), %xmm15\r
- addq $176, %rsp\r
-#endif\r
- popq %rbp\r
- popq %rbx\r
- ret\r
-\r
-\r
-.macro xmm_salsa8_core_3way_doubleround\r
- movdqa %xmm1, %xmm4\r
- movdqa %xmm9, %xmm6\r
- movdqa %xmm13, %xmm7\r
- paddd %xmm0, %xmm4\r
- paddd %xmm8, %xmm6\r
- paddd %xmm12, %xmm7\r
- movdqa %xmm4, %xmm5\r
- pslld $7, %xmm4\r
- psrld $25, %xmm5\r
- pxor %xmm4, %xmm3\r
- pxor %xmm5, %xmm3\r
- movdqa %xmm0, %xmm4\r
- movdqa %xmm6, %xmm5\r
- pslld $7, %xmm6\r
- psrld $25, %xmm5\r
- pxor %xmm6, %xmm11\r
- pxor %xmm5, %xmm11\r
- movdqa %xmm8, %xmm6\r
- movdqa %xmm7, %xmm5\r
- pslld $7, %xmm7\r
- psrld $25, %xmm5\r
- pxor %xmm7, %xmm15\r
- pxor %xmm5, %xmm15\r
- movdqa %xmm12, %xmm7\r
-\r
- paddd %xmm3, %xmm4\r
- paddd %xmm11, %xmm6\r
- paddd %xmm15, %xmm7\r
- movdqa %xmm4, %xmm5\r
- pslld $9, %xmm4\r
- psrld $23, %xmm5\r
- pxor %xmm4, %xmm2\r
- movdqa %xmm3, %xmm4\r
- pshufd $0x93, %xmm3, %xmm3\r
- pxor %xmm5, %xmm2\r
- movdqa %xmm6, %xmm5\r
- pslld $9, %xmm6\r
- psrld $23, %xmm5\r
- pxor %xmm6, %xmm10\r
- movdqa %xmm11, %xmm6\r
- pshufd $0x93, %xmm11, %xmm11\r
- pxor %xmm5, %xmm10\r
- movdqa %xmm7, %xmm5\r
- pslld $9, %xmm7\r
- psrld $23, %xmm5\r
- pxor %xmm7, %xmm14\r
- movdqa %xmm15, %xmm7\r
- pshufd $0x93, %xmm15, %xmm15\r
- pxor %xmm5, %xmm14\r
-\r
- paddd %xmm2, %xmm4\r
- paddd %xmm10, %xmm6\r
- paddd %xmm14, %xmm7\r
- movdqa %xmm4, %xmm5\r
- pslld $13, %xmm4\r
- psrld $19, %xmm5\r
- pxor %xmm4, %xmm1\r
- movdqa %xmm2, %xmm4\r
- pshufd $0x4e, %xmm2, %xmm2\r
- pxor %xmm5, %xmm1\r
- movdqa %xmm6, %xmm5\r
- pslld $13, %xmm6\r
- psrld $19, %xmm5\r
- pxor %xmm6, %xmm9\r
- movdqa %xmm10, %xmm6\r
- pshufd $0x4e, %xmm10, %xmm10\r
- pxor %xmm5, %xmm9\r
- movdqa %xmm7, %xmm5\r
- pslld $13, %xmm7\r
- psrld $19, %xmm5\r
- pxor %xmm7, %xmm13\r
- movdqa %xmm14, %xmm7\r
- pshufd $0x4e, %xmm14, %xmm14\r
- pxor %xmm5, %xmm13\r
-\r
- paddd %xmm1, %xmm4\r
- paddd %xmm9, %xmm6\r
- paddd %xmm13, %xmm7\r
- movdqa %xmm4, %xmm5\r
- pslld $18, %xmm4\r
- psrld $14, %xmm5\r
- pxor %xmm4, %xmm0\r
- pshufd $0x39, %xmm1, %xmm1\r
- pxor %xmm5, %xmm0\r
- movdqa %xmm3, %xmm4\r
- movdqa %xmm6, %xmm5\r
- pslld $18, %xmm6\r
- psrld $14, %xmm5\r
- pxor %xmm6, %xmm8\r
- pshufd $0x39, %xmm9, %xmm9\r
- pxor %xmm5, %xmm8\r
- movdqa %xmm11, %xmm6\r
- movdqa %xmm7, %xmm5\r
- pslld $18, %xmm7\r
- psrld $14, %xmm5\r
- pxor %xmm7, %xmm12\r
- pshufd $0x39, %xmm13, %xmm13\r
- pxor %xmm5, %xmm12\r
- movdqa %xmm15, %xmm7\r
-\r
- paddd %xmm0, %xmm4\r
- paddd %xmm8, %xmm6\r
- paddd %xmm12, %xmm7\r
- movdqa %xmm4, %xmm5\r
- pslld $7, %xmm4\r
- psrld $25, %xmm5\r
- pxor %xmm4, %xmm1\r
- pxor %xmm5, %xmm1\r
- movdqa %xmm0, %xmm4\r
- movdqa %xmm6, %xmm5\r
- pslld $7, %xmm6\r
- psrld $25, %xmm5\r
- pxor %xmm6, %xmm9\r
- pxor %xmm5, %xmm9\r
- movdqa %xmm8, %xmm6\r
- movdqa %xmm7, %xmm5\r
- pslld $7, %xmm7\r
- psrld $25, %xmm5\r
- pxor %xmm7, %xmm13\r
- pxor %xmm5, %xmm13\r
- movdqa %xmm12, %xmm7\r
-\r
- paddd %xmm1, %xmm4\r
- paddd %xmm9, %xmm6\r
- paddd %xmm13, %xmm7\r
- movdqa %xmm4, %xmm5\r
- pslld $9, %xmm4\r
- psrld $23, %xmm5\r
- pxor %xmm4, %xmm2\r
- movdqa %xmm1, %xmm4\r
- pshufd $0x93, %xmm1, %xmm1\r
- pxor %xmm5, %xmm2\r
- movdqa %xmm6, %xmm5\r
- pslld $9, %xmm6\r
- psrld $23, %xmm5\r
- pxor %xmm6, %xmm10\r
- movdqa %xmm9, %xmm6\r
- pshufd $0x93, %xmm9, %xmm9\r
- pxor %xmm5, %xmm10\r
- movdqa %xmm7, %xmm5\r
- pslld $9, %xmm7\r
- psrld $23, %xmm5\r
- pxor %xmm7, %xmm14\r
- movdqa %xmm13, %xmm7\r
- pshufd $0x93, %xmm13, %xmm13\r
- pxor %xmm5, %xmm14\r
-\r
- paddd %xmm2, %xmm4\r
- paddd %xmm10, %xmm6\r
- paddd %xmm14, %xmm7\r
- movdqa %xmm4, %xmm5\r
- pslld $13, %xmm4\r
- psrld $19, %xmm5\r
- pxor %xmm4, %xmm3\r
- movdqa %xmm2, %xmm4\r
- pshufd $0x4e, %xmm2, %xmm2\r
- pxor %xmm5, %xmm3\r
- movdqa %xmm6, %xmm5\r
- pslld $13, %xmm6\r
- psrld $19, %xmm5\r
- pxor %xmm6, %xmm11\r
- movdqa %xmm10, %xmm6\r
- pshufd $0x4e, %xmm10, %xmm10\r
- pxor %xmm5, %xmm11\r
- movdqa %xmm7, %xmm5\r
- pslld $13, %xmm7\r
- psrld $19, %xmm5\r
- pxor %xmm7, %xmm15\r
- movdqa %xmm14, %xmm7\r
- pshufd $0x4e, %xmm14, %xmm14\r
- pxor %xmm5, %xmm15\r
-\r
- paddd %xmm3, %xmm4\r
- paddd %xmm11, %xmm6\r
- paddd %xmm15, %xmm7\r
- movdqa %xmm4, %xmm5\r
- pslld $18, %xmm4\r
- psrld $14, %xmm5\r
- pxor %xmm4, %xmm0\r
- pshufd $0x39, %xmm3, %xmm3\r
- pxor %xmm5, %xmm0\r
- movdqa %xmm6, %xmm5\r
- pslld $18, %xmm6\r
- psrld $14, %xmm5\r
- pxor %xmm6, %xmm8\r
- pshufd $0x39, %xmm11, %xmm11\r
- pxor %xmm5, %xmm8\r
- movdqa %xmm7, %xmm5\r
- pslld $18, %xmm7\r
- psrld $14, %xmm5\r
- pxor %xmm7, %xmm12\r
- pshufd $0x39, %xmm15, %xmm15\r
- pxor %xmm5, %xmm12\r
-.endm\r
-\r
-.macro xmm_salsa8_core_3way\r
- xmm_salsa8_core_3way_doubleround\r
- xmm_salsa8_core_3way_doubleround\r
- xmm_salsa8_core_3way_doubleround\r
- xmm_salsa8_core_3way_doubleround\r
-.endm\r
-\r
- .text\r
- .align 32\r
- .globl scrypt_core_3way\r
- .globl _scrypt_core_3way\r
-scrypt_core_3way:\r
-_scrypt_core_3way:\r
- pushq %rbx\r
- pushq %rbp\r
-#if defined(WIN64)\r
- subq $176, %rsp\r
- movdqa %xmm6, 8(%rsp)\r
- movdqa %xmm7, 24(%rsp)\r
- movdqa %xmm8, 40(%rsp)\r
- movdqa %xmm9, 56(%rsp)\r
- movdqa %xmm10, 72(%rsp)\r
- movdqa %xmm11, 88(%rsp)\r
- movdqa %xmm12, 104(%rsp)\r
- movdqa %xmm13, 120(%rsp)\r
- movdqa %xmm14, 136(%rsp)\r
- movdqa %xmm15, 152(%rsp)\r
- pushq %rdi\r
- pushq %rsi\r
- movq %rcx, %rdi\r
- movq %rdx, %rsi\r
- movq %r8, %rdx\r
- movq %r9, %rcx\r
-#endif\r
- subq $392, %rsp\r
-\r
- scrypt_shuffle %rdi, 0, %rsp, 0\r
- scrypt_shuffle %rdi, 64, %rsp, 64\r
- scrypt_shuffle %rsi, 0, %rsp, 128\r
- scrypt_shuffle %rsi, 64, %rsp, 192\r
- scrypt_shuffle %rdx, 0, %rsp, 256\r
- scrypt_shuffle %rdx, 64, %rsp, 320\r
-\r
- movdqa 128+64(%rsp), %xmm8\r
- movdqa 128+80(%rsp), %xmm9\r
- movdqa 128+96(%rsp), %xmm10\r
- movdqa 128+112(%rsp), %xmm11\r
-\r
- movq %rcx, %rbp\r
- leaq 3*131072(%rcx), %rax\r
-scrypt_core_3way_loop1:\r
- movdqa %xmm8, %xmm12\r
- movdqa %xmm9, %xmm13\r
- movdqa %xmm10, %xmm14\r
- movdqa %xmm11, %xmm15\r
- movdqa 0(%rsp), %xmm0\r
- movdqa 16(%rsp), %xmm1\r
- movdqa 32(%rsp), %xmm2\r
- movdqa 48(%rsp), %xmm3\r
- movdqa 64(%rsp), %xmm4\r
- movdqa 80(%rsp), %xmm5\r
- movdqa 96(%rsp), %xmm6\r
- movdqa 112(%rsp), %xmm7\r
- movdqa 128+0(%rsp), %xmm8\r
- movdqa 128+16(%rsp), %xmm9\r
- movdqa 128+32(%rsp), %xmm10\r
- movdqa 128+48(%rsp), %xmm11\r
- pxor %xmm4, %xmm0\r
- pxor %xmm5, %xmm1\r
- pxor %xmm6, %xmm2\r
- pxor %xmm7, %xmm3\r
- movdqa %xmm0, 0(%rbp)\r
- movdqa %xmm1, 16(%rbp)\r
- movdqa %xmm2, 32(%rbp)\r
- movdqa %xmm3, 48(%rbp)\r
- movdqa %xmm4, 64(%rbp)\r
- movdqa %xmm5, 80(%rbp)\r
- movdqa %xmm6, 96(%rbp)\r
- movdqa %xmm7, 112(%rbp)\r
- pxor %xmm12, %xmm8\r
- pxor %xmm13, %xmm9\r
- pxor %xmm14, %xmm10\r
- pxor %xmm15, %xmm11\r
- movdqa %xmm8, 128+0(%rbp)\r
- movdqa %xmm9, 128+16(%rbp)\r
- movdqa %xmm10, 128+32(%rbp)\r
- movdqa %xmm11, 128+48(%rbp)\r
- movdqa %xmm12, 128+64(%rbp)\r
- movdqa %xmm13, 128+80(%rbp)\r
- movdqa %xmm14, 128+96(%rbp)\r
- movdqa %xmm15, 128+112(%rbp)\r
- movdqa 256+0(%rsp), %xmm12\r
- movdqa 256+16(%rsp), %xmm13\r
- movdqa 256+32(%rsp), %xmm14\r
- movdqa 256+48(%rsp), %xmm15\r
- movdqa 256+64(%rsp), %xmm4\r
- movdqa 256+80(%rsp), %xmm5\r
- movdqa 256+96(%rsp), %xmm6\r
- movdqa 256+112(%rsp), %xmm7\r
- pxor %xmm4, %xmm12\r
- pxor %xmm5, %xmm13\r
- pxor %xmm6, %xmm14\r
- pxor %xmm7, %xmm15\r
- movdqa %xmm12, 256+0(%rbp)\r
- movdqa %xmm13, 256+16(%rbp)\r
- movdqa %xmm14, 256+32(%rbp)\r
- movdqa %xmm15, 256+48(%rbp)\r
- movdqa %xmm4, 256+64(%rbp)\r
- movdqa %xmm5, 256+80(%rbp)\r
- movdqa %xmm6, 256+96(%rbp)\r
- movdqa %xmm7, 256+112(%rbp)\r
-\r
- xmm_salsa8_core_3way\r
- paddd 0(%rbp), %xmm0\r
- paddd 16(%rbp), %xmm1\r
- paddd 32(%rbp), %xmm2\r
- paddd 48(%rbp), %xmm3\r
- paddd 128+0(%rbp), %xmm8\r
- paddd 128+16(%rbp), %xmm9\r
- paddd 128+32(%rbp), %xmm10\r
- paddd 128+48(%rbp), %xmm11\r
- paddd 256+0(%rbp), %xmm12\r
- paddd 256+16(%rbp), %xmm13\r
- paddd 256+32(%rbp), %xmm14\r
- paddd 256+48(%rbp), %xmm15\r
- movdqa %xmm0, 0(%rsp)\r
- movdqa %xmm1, 16(%rsp)\r
- movdqa %xmm2, 32(%rsp)\r
- movdqa %xmm3, 48(%rsp)\r
- movdqa %xmm8, 128+0(%rsp)\r
- movdqa %xmm9, 128+16(%rsp)\r
- movdqa %xmm10, 128+32(%rsp)\r
- movdqa %xmm11, 128+48(%rsp)\r
- movdqa %xmm12, 256+0(%rsp)\r
- movdqa %xmm13, 256+16(%rsp)\r
- movdqa %xmm14, 256+32(%rsp)\r
- movdqa %xmm15, 256+48(%rsp)\r
-\r
- pxor 64(%rsp), %xmm0\r
- pxor 80(%rsp), %xmm1\r
- pxor 96(%rsp), %xmm2\r
- pxor 112(%rsp), %xmm3\r
- pxor 128+64(%rsp), %xmm8\r
- pxor 128+80(%rsp), %xmm9\r
- pxor 128+96(%rsp), %xmm10\r
- pxor 128+112(%rsp), %xmm11\r
- pxor 256+64(%rsp), %xmm12\r
- pxor 256+80(%rsp), %xmm13\r
- pxor 256+96(%rsp), %xmm14\r
- pxor 256+112(%rsp), %xmm15\r
- movdqa %xmm0, 64(%rsp)\r
- movdqa %xmm1, 80(%rsp)\r
- movdqa %xmm2, 96(%rsp)\r
- movdqa %xmm3, 112(%rsp)\r
- movdqa %xmm8, 128+64(%rsp)\r
- movdqa %xmm9, 128+80(%rsp)\r
- movdqa %xmm10, 128+96(%rsp)\r
- movdqa %xmm11, 128+112(%rsp)\r
- movdqa %xmm12, 256+64(%rsp)\r
- movdqa %xmm13, 256+80(%rsp)\r
- movdqa %xmm14, 256+96(%rsp)\r
- movdqa %xmm15, 256+112(%rsp)\r
- xmm_salsa8_core_3way\r
- paddd 64(%rsp), %xmm0\r
- paddd 80(%rsp), %xmm1\r
- paddd 96(%rsp), %xmm2\r
- paddd 112(%rsp), %xmm3\r
- paddd 128+64(%rsp), %xmm8\r
- paddd 128+80(%rsp), %xmm9\r
- paddd 128+96(%rsp), %xmm10\r
- paddd 128+112(%rsp), %xmm11\r
- paddd 256+64(%rsp), %xmm12\r
- paddd 256+80(%rsp), %xmm13\r
- paddd 256+96(%rsp), %xmm14\r
- paddd 256+112(%rsp), %xmm15\r
- movdqa %xmm0, 64(%rsp)\r
- movdqa %xmm1, 80(%rsp)\r
- movdqa %xmm2, 96(%rsp)\r
- movdqa %xmm3, 112(%rsp)\r
- movdqa %xmm8, 128+64(%rsp)\r
- movdqa %xmm9, 128+80(%rsp)\r
- movdqa %xmm10, 128+96(%rsp)\r
- movdqa %xmm11, 128+112(%rsp)\r
- movdqa %xmm12, 256+64(%rsp)\r
- movdqa %xmm13, 256+80(%rsp)\r
- movdqa %xmm14, 256+96(%rsp)\r
- movdqa %xmm15, 256+112(%rsp)\r
-\r
- addq $3*128, %rbp\r
- cmpq %rax, %rbp\r
- jne scrypt_core_3way_loop1\r
-\r
- movq $1024, %rax\r
-scrypt_core_3way_loop2:\r
- movl 64(%rsp), %ebp\r
- andl $1023, %ebp\r
- leal (%ebp, %ebp, 2), %ebp\r
- shll $7, %ebp\r
- movl 128+64(%rsp), %ebx\r
- andl $1023, %ebx\r
- leal (%ebx, %ebx, 2), %ebx\r
- shll $7, %ebx\r
- addl $128, %ebx\r
- movl 256+64(%rsp), %r8d\r
- andl $1023, %r8d\r
- leal (%r8d, %r8d, 2), %r8d\r
- shll $7, %r8d\r
- addl $256, %r8d\r
- movdqa 0(%rsp), %xmm0\r
- movdqa 16(%rsp), %xmm1\r
- movdqa 32(%rsp), %xmm2\r
- movdqa 48(%rsp), %xmm3\r
- movdqa 128+0(%rsp), %xmm8\r
- movdqa 128+16(%rsp), %xmm9\r
- movdqa 128+32(%rsp), %xmm10\r
- movdqa 128+48(%rsp), %xmm11\r
- movdqa 256+0(%rsp), %xmm12\r
- movdqa 256+16(%rsp), %xmm13\r
- movdqa 256+32(%rsp), %xmm14\r
- movdqa 256+48(%rsp), %xmm15\r
- pxor 0(%rcx, %rbp), %xmm0\r
- pxor 16(%rcx, %rbp), %xmm1\r
- pxor 32(%rcx, %rbp), %xmm2\r
- pxor 48(%rcx, %rbp), %xmm3\r
- pxor 0(%rcx, %rbx), %xmm8\r
- pxor 16(%rcx, %rbx), %xmm9\r
- pxor 32(%rcx, %rbx), %xmm10\r
- pxor 48(%rcx, %rbx), %xmm11\r
- pxor 0(%rcx, %r8), %xmm12\r
- pxor 16(%rcx, %r8), %xmm13\r
- pxor 32(%rcx, %r8), %xmm14\r
- pxor 48(%rcx, %r8), %xmm15\r
-\r
- pxor 64(%rsp), %xmm0\r
- pxor 80(%rsp), %xmm1\r
- pxor 96(%rsp), %xmm2\r
- pxor 112(%rsp), %xmm3\r
- pxor 128+64(%rsp), %xmm8\r
- pxor 128+80(%rsp), %xmm9\r
- pxor 128+96(%rsp), %xmm10\r
- pxor 128+112(%rsp), %xmm11\r
- pxor 256+64(%rsp), %xmm12\r
- pxor 256+80(%rsp), %xmm13\r
- pxor 256+96(%rsp), %xmm14\r
- pxor 256+112(%rsp), %xmm15\r
- movdqa %xmm0, 0(%rsp)\r
- movdqa %xmm1, 16(%rsp)\r
- movdqa %xmm2, 32(%rsp)\r
- movdqa %xmm3, 48(%rsp)\r
- movdqa %xmm8, 128+0(%rsp)\r
- movdqa %xmm9, 128+16(%rsp)\r
- movdqa %xmm10, 128+32(%rsp)\r
- movdqa %xmm11, 128+48(%rsp)\r
- movdqa %xmm12, 256+0(%rsp)\r
- movdqa %xmm13, 256+16(%rsp)\r
- movdqa %xmm14, 256+32(%rsp)\r
- movdqa %xmm15, 256+48(%rsp)\r
- xmm_salsa8_core_3way\r
- paddd 0(%rsp), %xmm0\r
- paddd 16(%rsp), %xmm1\r
- paddd 32(%rsp), %xmm2\r
- paddd 48(%rsp), %xmm3\r
- paddd 128+0(%rsp), %xmm8\r
- paddd 128+16(%rsp), %xmm9\r
- paddd 128+32(%rsp), %xmm10\r
- paddd 128+48(%rsp), %xmm11\r
- paddd 256+0(%rsp), %xmm12\r
- paddd 256+16(%rsp), %xmm13\r
- paddd 256+32(%rsp), %xmm14\r
- paddd 256+48(%rsp), %xmm15\r
- movdqa %xmm0, 0(%rsp)\r
- movdqa %xmm1, 16(%rsp)\r
- movdqa %xmm2, 32(%rsp)\r
- movdqa %xmm3, 48(%rsp)\r
- movdqa %xmm8, 128+0(%rsp)\r
- movdqa %xmm9, 128+16(%rsp)\r
- movdqa %xmm10, 128+32(%rsp)\r
- movdqa %xmm11, 128+48(%rsp)\r
- movdqa %xmm12, 256+0(%rsp)\r
- movdqa %xmm13, 256+16(%rsp)\r
- movdqa %xmm14, 256+32(%rsp)\r
- movdqa %xmm15, 256+48(%rsp)\r
-\r
- pxor 64(%rcx, %rbp), %xmm0\r
- pxor 80(%rcx, %rbp), %xmm1\r
- pxor 96(%rcx, %rbp), %xmm2\r
- pxor 112(%rcx, %rbp), %xmm3\r
- pxor 64(%rcx, %rbx), %xmm8\r
- pxor 80(%rcx, %rbx), %xmm9\r
- pxor 96(%rcx, %rbx), %xmm10\r
- pxor 112(%rcx, %rbx), %xmm11\r
- pxor 64(%rcx, %r8), %xmm12\r
- pxor 80(%rcx, %r8), %xmm13\r
- pxor 96(%rcx, %r8), %xmm14\r
- pxor 112(%rcx, %r8), %xmm15\r
- pxor 64(%rsp), %xmm0\r
- pxor 80(%rsp), %xmm1\r
- pxor 96(%rsp), %xmm2\r
- pxor 112(%rsp), %xmm3\r
- pxor 128+64(%rsp), %xmm8\r
- pxor 128+80(%rsp), %xmm9\r
- pxor 128+96(%rsp), %xmm10\r
- pxor 128+112(%rsp), %xmm11\r
- pxor 256+64(%rsp), %xmm12\r
- pxor 256+80(%rsp), %xmm13\r
- pxor 256+96(%rsp), %xmm14\r
- pxor 256+112(%rsp), %xmm15\r
- movdqa %xmm0, 64(%rsp)\r
- movdqa %xmm1, 80(%rsp)\r
- movdqa %xmm2, 96(%rsp)\r
- movdqa %xmm3, 112(%rsp)\r
- movdqa %xmm8, 128+64(%rsp)\r
- movdqa %xmm9, 128+80(%rsp)\r
- movdqa %xmm10, 128+96(%rsp)\r
- movdqa %xmm11, 128+112(%rsp)\r
- movdqa %xmm12, 256+64(%rsp)\r
- movdqa %xmm13, 256+80(%rsp)\r
- movdqa %xmm14, 256+96(%rsp)\r
- movdqa %xmm15, 256+112(%rsp)\r
- xmm_salsa8_core_3way\r
- paddd 64(%rsp), %xmm0\r
- paddd 80(%rsp), %xmm1\r
- paddd 96(%rsp), %xmm2\r
- paddd 112(%rsp), %xmm3\r
- paddd 128+64(%rsp), %xmm8\r
- paddd 128+80(%rsp), %xmm9\r
- paddd 128+96(%rsp), %xmm10\r
- paddd 128+112(%rsp), %xmm11\r
- paddd 256+64(%rsp), %xmm12\r
- paddd 256+80(%rsp), %xmm13\r
- paddd 256+96(%rsp), %xmm14\r
- paddd 256+112(%rsp), %xmm15\r
- movdqa %xmm0, 64(%rsp)\r
- movdqa %xmm1, 80(%rsp)\r
- movdqa %xmm2, 96(%rsp)\r
- movdqa %xmm3, 112(%rsp)\r
- movdqa %xmm8, 128+64(%rsp)\r
- movdqa %xmm9, 128+80(%rsp)\r
- movdqa %xmm10, 128+96(%rsp)\r
- movdqa %xmm11, 128+112(%rsp)\r
- movdqa %xmm12, 256+64(%rsp)\r
- movdqa %xmm13, 256+80(%rsp)\r
- movdqa %xmm14, 256+96(%rsp)\r
- movdqa %xmm15, 256+112(%rsp)\r
-\r
- subq $1, %rax\r
- ja scrypt_core_3way_loop2\r
-\r
- scrypt_shuffle %rsp, 0, %rdi, 0\r
- scrypt_shuffle %rsp, 64, %rdi, 64\r
- scrypt_shuffle %rsp, 128, %rsi, 0\r
- scrypt_shuffle %rsp, 192, %rsi, 64\r
- scrypt_shuffle %rsp, 256, %rdx, 0\r
- scrypt_shuffle %rsp, 320, %rdx, 64\r
-\r
- addq $392, %rsp\r
-#if defined(WIN64)\r
- popq %rsi\r
- popq %rdi\r
- movdqa 8(%rsp), %xmm6\r
- movdqa 24(%rsp), %xmm7\r
- movdqa 40(%rsp), %xmm8\r
- movdqa 56(%rsp), %xmm9\r
- movdqa 72(%rsp), %xmm10\r
- movdqa 88(%rsp), %xmm11\r
- movdqa 104(%rsp), %xmm12\r
- movdqa 120(%rsp), %xmm13\r
- movdqa 136(%rsp), %xmm14\r
- movdqa 152(%rsp), %xmm15\r
- addq $176, %rsp\r
-#endif\r
- popq %rbp\r
- popq %rbx\r
- ret\r
-\r
+# Copyright 2011-2012 pooler@litecoinpool.org
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+# SUCH DAMAGE.
+
+#if defined(__linux__) && defined(__ELF__)
+ .section .note.GNU-stack,"",%progbits
+#endif
+
+#if defined(__x86_64__)
+
+.macro scrypt_shuffle src, so, dest, do
+ movl \so+60(\src), %r8d
+ movl \so+44(\src), %r9d
+ movl \so+28(\src), %r10d
+ movl \so+12(\src), %r11d
+ movl %r8d, \do+12(\dest)
+ movl %r9d, \do+28(\dest)
+ movl %r10d, \do+44(\dest)
+ movl %r11d, \do+60(\dest)
+ movl \so+40(\src), %r8d
+ movl \so+8(\src), %r9d
+ movl \so+48(\src), %r10d
+ movl \so+16(\src), %r11d
+ movl %r8d, \do+8(\dest)
+ movl %r9d, \do+40(\dest)
+ movl %r10d, \do+16(\dest)
+ movl %r11d, \do+48(\dest)
+ movl \so+20(\src), %r8d
+ movl \so+4(\src), %r9d
+ movl \so+52(\src), %r10d
+ movl \so+36(\src), %r11d
+ movl %r8d, \do+4(\dest)
+ movl %r9d, \do+20(\dest)
+ movl %r10d, \do+36(\dest)
+ movl %r11d, \do+52(\dest)
+ movl \so+0(\src), %r8d
+ movl \so+24(\src), %r9d
+ movl \so+32(\src), %r10d
+ movl \so+56(\src), %r11d
+ movl %r8d, \do+0(\dest)
+ movl %r9d, \do+24(\dest)
+ movl %r10d, \do+32(\dest)
+ movl %r11d, \do+56(\dest)
+.endm
+
+.macro gen_salsa8_core_doubleround
+ movq 72(%rsp), %r15
+
+ leaq (%r14, %rdx), %rbp
+ roll $7, %ebp
+ xorq %rbp, %r9
+ leaq (%rdi, %r15), %rbp
+ roll $7, %ebp
+ xorq %rbp, %r10
+ leaq (%rdx, %r9), %rbp
+ roll $9, %ebp
+ xorq %rbp, %r11
+ leaq (%r15, %r10), %rbp
+ roll $9, %ebp
+ xorq %rbp, %r13
+ leaq (%r9, %r11), %rbp
+ roll $13, %ebp
+ xorq %rbp, %r14
+ leaq (%r10, %r13), %rbp
+ roll $13, %ebp
+ xorq %rbp, %rdi
+ leaq (%r11, %r14), %rbp
+ roll $18, %ebp
+ xorq %rbp, %rdx
+ leaq (%r13, %rdi), %rbp
+ roll $18, %ebp
+ xorq %rbp, %r15
+
+ movq 48(%rsp), %rbp
+ movq %r15, 72(%rsp)
+
+ leaq (%rax, %rbp), %r15
+ roll $7, %r15d
+ xorq %r15, %rbx
+ leaq (%rbp, %rbx), %r15
+ roll $9, %r15d
+ xorq %r15, %rcx
+ leaq (%rbx, %rcx), %r15
+ roll $13, %r15d
+ xorq %r15, %rax
+ leaq (%rcx, %rax), %r15
+ roll $18, %r15d
+ xorq %r15, %rbp
+
+ movq 88(%rsp), %r15
+ movq %rbp, 48(%rsp)
+
+ leaq (%r12, %r15), %rbp
+ roll $7, %ebp
+ xorq %rbp, %rsi
+ leaq (%r15, %rsi), %rbp
+ roll $9, %ebp
+ xorq %rbp, %r8
+ leaq (%rsi, %r8), %rbp
+ roll $13, %ebp
+ xorq %rbp, %r12
+ leaq (%r8, %r12), %rbp
+ roll $18, %ebp
+ xorq %rbp, %r15
+
+ movq %r15, 88(%rsp)
+ movq 72(%rsp), %r15
+
+ leaq (%rsi, %rdx), %rbp
+ roll $7, %ebp
+ xorq %rbp, %rdi
+ leaq (%r9, %r15), %rbp
+ roll $7, %ebp
+ xorq %rbp, %rax
+ leaq (%rdx, %rdi), %rbp
+ roll $9, %ebp
+ xorq %rbp, %rcx
+ leaq (%r15, %rax), %rbp
+ roll $9, %ebp
+ xorq %rbp, %r8
+ leaq (%rdi, %rcx), %rbp
+ roll $13, %ebp
+ xorq %rbp, %rsi
+ leaq (%rax, %r8), %rbp
+ roll $13, %ebp
+ xorq %rbp, %r9
+ leaq (%rcx, %rsi), %rbp
+ roll $18, %ebp
+ xorq %rbp, %rdx
+ leaq (%r8, %r9), %rbp
+ roll $18, %ebp
+ xorq %rbp, %r15
+
+ movq 48(%rsp), %rbp
+ movq %r15, 72(%rsp)
+
+ leaq (%r10, %rbp), %r15
+ roll $7, %r15d
+ xorq %r15, %r12
+ leaq (%rbp, %r12), %r15
+ roll $9, %r15d
+ xorq %r15, %r11
+ leaq (%r12, %r11), %r15
+ roll $13, %r15d
+ xorq %r15, %r10
+ leaq (%r11, %r10), %r15
+ roll $18, %r15d
+ xorq %r15, %rbp
+
+ movq 88(%rsp), %r15
+ movq %rbp, 48(%rsp)
+
+ leaq (%rbx, %r15), %rbp
+ roll $7, %ebp
+ xorq %rbp, %r14
+ leaq (%r15, %r14), %rbp
+ roll $9, %ebp
+ xorq %rbp, %r13
+ leaq (%r14, %r13), %rbp
+ roll $13, %ebp
+ xorq %rbp, %rbx
+ leaq (%r13, %rbx), %rbp
+ roll $18, %ebp
+ xorq %rbp, %r15
+
+ movq %r15, 88(%rsp)
+.endm
+
+ .text
+ .align 32
+gen_salsa8_core:
+ # 0: %rdx, %rdi, %rcx, %rsi
+ movq 8(%rsp), %rdi
+ movq %rdi, %rdx
+ shrq $32, %rdi
+ movq 16(%rsp), %rsi
+ movq %rsi, %rcx
+ shrq $32, %rsi
+ # 1: %r9, 72(%rsp), %rax, %r8
+ movq 24(%rsp), %r8
+ movq %r8, %r9
+ shrq $32, %r8
+ movq %r8, 72(%rsp)
+ movq 32(%rsp), %r8
+ movq %r8, %rax
+ shrq $32, %r8
+ # 2: %r11, %r10, 48(%rsp), %r12
+ movq 40(%rsp), %r10
+ movq %r10, %r11
+ shrq $32, %r10
+ movq 48(%rsp), %r12
+ #movq %r12, %r13
+ #movq %r13, 48(%rsp)
+ shrq $32, %r12
+ # 3: %r14, %r13, %rbx, 88(%rsp)
+ movq 56(%rsp), %r13
+ movq %r13, %r14
+ shrq $32, %r13
+ movq 64(%rsp), %r15
+ movq %r15, %rbx
+ shrq $32, %r15
+ movq %r15, 88(%rsp)
+
+ gen_salsa8_core_doubleround
+ gen_salsa8_core_doubleround
+ gen_salsa8_core_doubleround
+ gen_salsa8_core_doubleround
+
+ movl %edx, %edx
+ shlq $32, %rdi
+ addq %rdi, %rdx
+ movd %rdx, %xmm0
+
+ movl %ecx, %ecx
+ shlq $32, %rsi
+ addq %rsi, %rcx
+ movd %rcx, %xmm4
+
+ movq 72(%rsp), %rdi
+ movl %r9d, %r9d
+ shlq $32, %rdi
+ addq %rdi, %r9
+ movd %r9, %xmm1
+
+ movl %eax, %eax
+ shlq $32, %r8
+ addq %r8, %rax
+ movd %rax, %xmm5
+
+ movl %r11d, %r11d
+ shlq $32, %r10
+ addq %r10, %r11
+ movd %r11, %xmm2
+
+ movl 48(%rsp), %r8d
+ shlq $32, %r12
+ addq %r12, %r8
+ movd %r8, %xmm6
+
+ movl %r14d, %r14d
+ shlq $32, %r13
+ addq %r13, %r14
+ movd %r14, %xmm3
+
+ movq 88(%rsp), %rdi
+ movl %ebx, %ebx
+ shlq $32, %rdi
+ addq %rdi, %rbx
+ movd %rbx, %xmm7
+
+ punpcklqdq %xmm4, %xmm0
+ punpcklqdq %xmm5, %xmm1
+ punpcklqdq %xmm6, %xmm2
+ punpcklqdq %xmm7, %xmm3
+
+ #movq %rdx, 8(%rsp)
+ #movq %rcx, 16(%rsp)
+ #movq %r9, 24(%rsp)
+ #movq %rax, 32(%rsp)
+ #movq %r11, 40(%rsp)
+ #movq %r8, 48(%rsp)
+ #movq %r14, 56(%rsp)
+ #movq %rbx, 64(%rsp)
+
+ ret
+
+
+ .text
+ .align 32
+ .globl scrypt_core
+ .globl _scrypt_core
+scrypt_core:
+_scrypt_core:
+ pushq %rbx
+ pushq %rbp
+ pushq %r12
+ pushq %r13
+ pushq %r14
+ pushq %r15
+#if defined(WIN64)
+ subq $176, %rsp
+ movdqa %xmm6, 8(%rsp)
+ movdqa %xmm7, 24(%rsp)
+ movdqa %xmm8, 40(%rsp)
+ movdqa %xmm9, 56(%rsp)
+ movdqa %xmm10, 72(%rsp)
+ movdqa %xmm11, 88(%rsp)
+ movdqa %xmm12, 104(%rsp)
+ movdqa %xmm13, 120(%rsp)
+ movdqa %xmm14, 136(%rsp)
+ movdqa %xmm15, 152(%rsp)
+ pushq %rdi
+ pushq %rsi
+ movq %rcx, %rdi
+ movq %rdx, %rsi
+#endif
+
+.macro scrypt_core_cleanup
+#if defined(WIN64)
+ popq %rsi
+ popq %rdi
+ movdqa 8(%rsp), %xmm6
+ movdqa 24(%rsp), %xmm7
+ movdqa 40(%rsp), %xmm8
+ movdqa 56(%rsp), %xmm9
+ movdqa 72(%rsp), %xmm10
+ movdqa 88(%rsp), %xmm11
+ movdqa 104(%rsp), %xmm12
+ movdqa 120(%rsp), %xmm13
+ movdqa 136(%rsp), %xmm14
+ movdqa 152(%rsp), %xmm15
+ addq $176, %rsp
+#endif
+ popq %r15
+ popq %r14
+ popq %r13
+ popq %r12
+ popq %rbp
+ popq %rbx
+.endm
+
+ # GenuineIntel processors have fast SIMD
+ xorl %eax, %eax
+ cpuid
+ cmpl $0x6c65746e, %ecx
+ jne gen_scrypt_core
+ cmpl $0x49656e69, %edx
+ jne gen_scrypt_core
+ cmpl $0x756e6547, %ebx
+ je xmm_scrypt_core
+
+gen_scrypt_core:
+ subq $136, %rsp
+ movdqa 0(%rdi), %xmm8
+ movdqa 16(%rdi), %xmm9
+ movdqa 32(%rdi), %xmm10
+ movdqa 48(%rdi), %xmm11
+ movdqa 64(%rdi), %xmm12
+ movdqa 80(%rdi), %xmm13
+ movdqa 96(%rdi), %xmm14
+ movdqa 112(%rdi), %xmm15
+
+ leaq 131072(%rsi), %rcx
+ movq %rdi, 104(%rsp)
+ movq %rsi, 112(%rsp)
+ movq %rcx, 120(%rsp)
+gen_scrypt_core_loop1:
+ movdqa %xmm8, 0(%rsi)
+ movdqa %xmm9, 16(%rsi)
+ movdqa %xmm10, 32(%rsi)
+ movdqa %xmm11, 48(%rsi)
+ movdqa %xmm12, 64(%rsi)
+ movdqa %xmm13, 80(%rsi)
+ movdqa %xmm14, 96(%rsi)
+ movdqa %xmm15, 112(%rsi)
+
+ pxor %xmm12, %xmm8
+ pxor %xmm13, %xmm9
+ pxor %xmm14, %xmm10
+ pxor %xmm15, %xmm11
+ movdqa %xmm8, 0(%rsp)
+ movdqa %xmm9, 16(%rsp)
+ movdqa %xmm10, 32(%rsp)
+ movdqa %xmm11, 48(%rsp)
+ movq %rsi, 128(%rsp)
+ call gen_salsa8_core
+ paddd %xmm0, %xmm8
+ paddd %xmm1, %xmm9
+ paddd %xmm2, %xmm10
+ paddd %xmm3, %xmm11
+
+ pxor %xmm8, %xmm12
+ pxor %xmm9, %xmm13
+ pxor %xmm10, %xmm14
+ pxor %xmm11, %xmm15
+ movdqa %xmm12, 0(%rsp)
+ movdqa %xmm13, 16(%rsp)
+ movdqa %xmm14, 32(%rsp)
+ movdqa %xmm15, 48(%rsp)
+ call gen_salsa8_core
+ movq 128(%rsp), %rsi
+ paddd %xmm0, %xmm12
+ paddd %xmm1, %xmm13
+ paddd %xmm2, %xmm14
+ paddd %xmm3, %xmm15
+
+ addq $128, %rsi
+ movq 120(%rsp), %rcx
+ cmpq %rcx, %rsi
+ jne gen_scrypt_core_loop1
+
+ movq $1024, %rcx
+gen_scrypt_core_loop2:
+ movq 112(%rsp), %rsi
+ movd %xmm12, %edx
+ andl $1023, %edx
+ shll $7, %edx
+ movdqa 0(%rsi, %rdx), %xmm0
+ movdqa 16(%rsi, %rdx), %xmm1
+ movdqa 32(%rsi, %rdx), %xmm2
+ movdqa 48(%rsi, %rdx), %xmm3
+ movdqa 64(%rsi, %rdx), %xmm4
+ movdqa 80(%rsi, %rdx), %xmm5
+ movdqa 96(%rsi, %rdx), %xmm6
+ movdqa 112(%rsi, %rdx), %xmm7
+ pxor %xmm0, %xmm8
+ pxor %xmm1, %xmm9
+ pxor %xmm2, %xmm10
+ pxor %xmm3, %xmm11
+ pxor %xmm4, %xmm12
+ pxor %xmm5, %xmm13
+ pxor %xmm6, %xmm14
+ pxor %xmm7, %xmm15
+
+ pxor %xmm12, %xmm8
+ pxor %xmm13, %xmm9
+ pxor %xmm14, %xmm10
+ pxor %xmm15, %xmm11
+ movdqa %xmm8, 0(%rsp)
+ movdqa %xmm9, 16(%rsp)
+ movdqa %xmm10, 32(%rsp)
+ movdqa %xmm11, 48(%rsp)
+ movq %rcx, 128(%rsp)
+ call gen_salsa8_core
+ paddd %xmm0, %xmm8
+ paddd %xmm1, %xmm9
+ paddd %xmm2, %xmm10
+ paddd %xmm3, %xmm11
+
+ pxor %xmm8, %xmm12
+ pxor %xmm9, %xmm13
+ pxor %xmm10, %xmm14
+ pxor %xmm11, %xmm15
+ movdqa %xmm12, 0(%rsp)
+ movdqa %xmm13, 16(%rsp)
+ movdqa %xmm14, 32(%rsp)
+ movdqa %xmm15, 48(%rsp)
+ call gen_salsa8_core
+ movq 128(%rsp), %rcx
+ paddd %xmm0, %xmm12
+ paddd %xmm1, %xmm13
+ paddd %xmm2, %xmm14
+ paddd %xmm3, %xmm15
+
+ subq $1, %rcx
+ ja gen_scrypt_core_loop2
+
+ movq 104(%rsp), %rdi
+ movdqa %xmm8, 0(%rdi)
+ movdqa %xmm9, 16(%rdi)
+ movdqa %xmm10, 32(%rdi)
+ movdqa %xmm11, 48(%rdi)
+ movdqa %xmm12, 64(%rdi)
+ movdqa %xmm13, 80(%rdi)
+ movdqa %xmm14, 96(%rdi)
+ movdqa %xmm15, 112(%rdi)
+
+ addq $136, %rsp
+ scrypt_core_cleanup
+ ret
+
+
+.macro xmm_salsa8_core_doubleround
+ movdqa %xmm1, %xmm4
+ paddd %xmm0, %xmm4
+ movdqa %xmm4, %xmm5
+ pslld $7, %xmm4
+ psrld $25, %xmm5
+ pxor %xmm4, %xmm3
+ pxor %xmm5, %xmm3
+ movdqa %xmm0, %xmm4
+
+ paddd %xmm3, %xmm4
+ movdqa %xmm4, %xmm5
+ pslld $9, %xmm4
+ psrld $23, %xmm5
+ pxor %xmm4, %xmm2
+ movdqa %xmm3, %xmm4
+ pshufd $0x93, %xmm3, %xmm3
+ pxor %xmm5, %xmm2
+
+ paddd %xmm2, %xmm4
+ movdqa %xmm4, %xmm5
+ pslld $13, %xmm4
+ psrld $19, %xmm5
+ pxor %xmm4, %xmm1
+ movdqa %xmm2, %xmm4
+ pshufd $0x4e, %xmm2, %xmm2
+ pxor %xmm5, %xmm1
+
+ paddd %xmm1, %xmm4
+ movdqa %xmm4, %xmm5
+ pslld $18, %xmm4
+ psrld $14, %xmm5
+ pxor %xmm4, %xmm0
+ pshufd $0x39, %xmm1, %xmm1
+ pxor %xmm5, %xmm0
+ movdqa %xmm3, %xmm4
+
+ paddd %xmm0, %xmm4
+ movdqa %xmm4, %xmm5
+ pslld $7, %xmm4
+ psrld $25, %xmm5
+ pxor %xmm4, %xmm1
+ pxor %xmm5, %xmm1
+ movdqa %xmm0, %xmm4
+
+ paddd %xmm1, %xmm4
+ movdqa %xmm4, %xmm5
+ pslld $9, %xmm4
+ psrld $23, %xmm5
+ pxor %xmm4, %xmm2
+ movdqa %xmm1, %xmm4
+ pshufd $0x93, %xmm1, %xmm1
+ pxor %xmm5, %xmm2
+
+ paddd %xmm2, %xmm4
+ movdqa %xmm4, %xmm5
+ pslld $13, %xmm4
+ psrld $19, %xmm5
+ pxor %xmm4, %xmm3
+ movdqa %xmm2, %xmm4
+ pshufd $0x4e, %xmm2, %xmm2
+ pxor %xmm5, %xmm3
+
+ paddd %xmm3, %xmm4
+ movdqa %xmm4, %xmm5
+ pslld $18, %xmm4
+ psrld $14, %xmm5
+ pxor %xmm4, %xmm0
+ pshufd $0x39, %xmm3, %xmm3
+ pxor %xmm5, %xmm0
+.endm
+
+.macro xmm_salsa8_core
+ xmm_salsa8_core_doubleround
+ xmm_salsa8_core_doubleround
+ xmm_salsa8_core_doubleround
+ xmm_salsa8_core_doubleround
+.endm
+
+ .align 32
+xmm_scrypt_core:
+ # shuffle 1st block into %xmm8-%xmm11
+ movl 60(%rdi), %edx
+ movl 44(%rdi), %ecx
+ movl 28(%rdi), %ebx
+ movl 12(%rdi), %eax
+ movd %edx, %xmm0
+ movd %ecx, %xmm1
+ movd %ebx, %xmm2
+ movd %eax, %xmm3
+ movl 40(%rdi), %ecx
+ movl 24(%rdi), %ebx
+ movl 8(%rdi), %eax
+ movl 56(%rdi), %edx
+ pshufd $0x93, %xmm0, %xmm0
+ pshufd $0x93, %xmm1, %xmm1
+ pshufd $0x93, %xmm2, %xmm2
+ pshufd $0x93, %xmm3, %xmm3
+ movd %ecx, %xmm4
+ movd %ebx, %xmm5
+ movd %eax, %xmm6
+ movd %edx, %xmm7
+ paddd %xmm4, %xmm0
+ paddd %xmm5, %xmm1
+ paddd %xmm6, %xmm2
+ paddd %xmm7, %xmm3
+ movl 20(%rdi), %ebx
+ movl 4(%rdi), %eax
+ movl 52(%rdi), %edx
+ movl 36(%rdi), %ecx
+ pshufd $0x93, %xmm0, %xmm0
+ pshufd $0x93, %xmm1, %xmm1
+ pshufd $0x93, %xmm2, %xmm2
+ pshufd $0x93, %xmm3, %xmm3
+ movd %ebx, %xmm4
+ movd %eax, %xmm5
+ movd %edx, %xmm6
+ movd %ecx, %xmm7
+ paddd %xmm4, %xmm0
+ paddd %xmm5, %xmm1
+ paddd %xmm6, %xmm2
+ paddd %xmm7, %xmm3
+ movl 0(%rdi), %eax
+ movl 48(%rdi), %edx
+ movl 32(%rdi), %ecx
+ movl 16(%rdi), %ebx
+ pshufd $0x93, %xmm0, %xmm0
+ pshufd $0x93, %xmm1, %xmm1
+ pshufd $0x93, %xmm2, %xmm2
+ pshufd $0x93, %xmm3, %xmm3
+ movd %eax, %xmm8
+ movd %edx, %xmm9
+ movd %ecx, %xmm10
+ movd %ebx, %xmm11
+ paddd %xmm0, %xmm8
+ paddd %xmm1, %xmm9
+ paddd %xmm2, %xmm10
+ paddd %xmm3, %xmm11
+
+ # shuffle 2nd block into %xmm12-%xmm15
+ movl 124(%rdi), %edx
+ movl 108(%rdi), %ecx
+ movl 92(%rdi), %ebx
+ movl 76(%rdi), %eax
+ movd %edx, %xmm0
+ movd %ecx, %xmm1
+ movd %ebx, %xmm2
+ movd %eax, %xmm3
+ movl 104(%rdi), %ecx
+ movl 88(%rdi), %ebx
+ movl 72(%rdi), %eax
+ movl 120(%rdi), %edx
+ pshufd $0x93, %xmm0, %xmm0
+ pshufd $0x93, %xmm1, %xmm1
+ pshufd $0x93, %xmm2, %xmm2
+ pshufd $0x93, %xmm3, %xmm3
+ movd %ecx, %xmm4
+ movd %ebx, %xmm5
+ movd %eax, %xmm6
+ movd %edx, %xmm7
+ paddd %xmm4, %xmm0
+ paddd %xmm5, %xmm1
+ paddd %xmm6, %xmm2
+ paddd %xmm7, %xmm3
+ movl 84(%rdi), %ebx
+ movl 68(%rdi), %eax
+ movl 116(%rdi), %edx
+ movl 100(%rdi), %ecx
+ pshufd $0x93, %xmm0, %xmm0
+ pshufd $0x93, %xmm1, %xmm1
+ pshufd $0x93, %xmm2, %xmm2
+ pshufd $0x93, %xmm3, %xmm3
+ movd %ebx, %xmm4
+ movd %eax, %xmm5
+ movd %edx, %xmm6
+ movd %ecx, %xmm7
+ paddd %xmm4, %xmm0
+ paddd %xmm5, %xmm1
+ paddd %xmm6, %xmm2
+ paddd %xmm7, %xmm3
+ movl 64(%rdi), %eax
+ movl 112(%rdi), %edx
+ movl 96(%rdi), %ecx
+ movl 80(%rdi), %ebx
+ pshufd $0x93, %xmm0, %xmm0
+ pshufd $0x93, %xmm1, %xmm1
+ pshufd $0x93, %xmm2, %xmm2
+ pshufd $0x93, %xmm3, %xmm3
+ movd %eax, %xmm12
+ movd %edx, %xmm13
+ movd %ecx, %xmm14
+ movd %ebx, %xmm15
+ paddd %xmm0, %xmm12
+ paddd %xmm1, %xmm13
+ paddd %xmm2, %xmm14
+ paddd %xmm3, %xmm15
+
+ movq %rsi, %rdx
+ leaq 131072(%rsi), %rcx
+xmm_scrypt_core_loop1:
+ movdqa %xmm8, 0(%rdx)
+ movdqa %xmm9, 16(%rdx)
+ movdqa %xmm10, 32(%rdx)
+ movdqa %xmm11, 48(%rdx)
+ movdqa %xmm12, 64(%rdx)
+ movdqa %xmm13, 80(%rdx)
+ movdqa %xmm14, 96(%rdx)
+ movdqa %xmm15, 112(%rdx)
+
+ pxor %xmm12, %xmm8
+ pxor %xmm13, %xmm9
+ pxor %xmm14, %xmm10
+ pxor %xmm15, %xmm11
+ movdqa %xmm8, %xmm0
+ movdqa %xmm9, %xmm1
+ movdqa %xmm10, %xmm2
+ movdqa %xmm11, %xmm3
+ xmm_salsa8_core
+ paddd %xmm0, %xmm8
+ paddd %xmm1, %xmm9
+ paddd %xmm2, %xmm10
+ paddd %xmm3, %xmm11
+
+ pxor %xmm8, %xmm12
+ pxor %xmm9, %xmm13
+ pxor %xmm10, %xmm14
+ pxor %xmm11, %xmm15
+ movdqa %xmm12, %xmm0
+ movdqa %xmm13, %xmm1
+ movdqa %xmm14, %xmm2
+ movdqa %xmm15, %xmm3
+ xmm_salsa8_core
+ paddd %xmm0, %xmm12
+ paddd %xmm1, %xmm13
+ paddd %xmm2, %xmm14
+ paddd %xmm3, %xmm15
+
+ addq $128, %rdx
+ cmpq %rcx, %rdx
+ jne xmm_scrypt_core_loop1
+
+ movq $1024, %rcx
+xmm_scrypt_core_loop2:
+ movd %xmm12, %edx
+ andl $1023, %edx
+ shll $7, %edx
+ movdqa 0(%rsi, %rdx), %xmm0
+ movdqa 16(%rsi, %rdx), %xmm1
+ movdqa 32(%rsi, %rdx), %xmm2
+ movdqa 48(%rsi, %rdx), %xmm3
+ movdqa 64(%rsi, %rdx), %xmm4
+ movdqa 80(%rsi, %rdx), %xmm5
+ movdqa 96(%rsi, %rdx), %xmm6
+ movdqa 112(%rsi, %rdx), %xmm7
+ pxor %xmm0, %xmm8
+ pxor %xmm1, %xmm9
+ pxor %xmm2, %xmm10
+ pxor %xmm3, %xmm11
+ pxor %xmm4, %xmm12
+ pxor %xmm5, %xmm13
+ pxor %xmm6, %xmm14
+ pxor %xmm7, %xmm15
+
+ pxor %xmm12, %xmm8
+ pxor %xmm13, %xmm9
+ pxor %xmm14, %xmm10
+ pxor %xmm15, %xmm11
+ movdqa %xmm8, %xmm0
+ movdqa %xmm9, %xmm1
+ movdqa %xmm10, %xmm2
+ movdqa %xmm11, %xmm3
+ xmm_salsa8_core
+ paddd %xmm0, %xmm8
+ paddd %xmm1, %xmm9
+ paddd %xmm2, %xmm10
+ paddd %xmm3, %xmm11
+
+ pxor %xmm8, %xmm12
+ pxor %xmm9, %xmm13
+ pxor %xmm10, %xmm14
+ pxor %xmm11, %xmm15
+ movdqa %xmm12, %xmm0
+ movdqa %xmm13, %xmm1
+ movdqa %xmm14, %xmm2
+ movdqa %xmm15, %xmm3
+ xmm_salsa8_core
+ paddd %xmm0, %xmm12
+ paddd %xmm1, %xmm13
+ paddd %xmm2, %xmm14
+ paddd %xmm3, %xmm15
+
+ subq $1, %rcx
+ ja xmm_scrypt_core_loop2
+
+ # re-shuffle 1st block back
+ movd %xmm8, %eax
+ movd %xmm9, %edx
+ movd %xmm10, %ecx
+ movd %xmm11, %ebx
+ pshufd $0x39, %xmm8, %xmm8
+ pshufd $0x39, %xmm9, %xmm9
+ pshufd $0x39, %xmm10, %xmm10
+ pshufd $0x39, %xmm11, %xmm11
+ movl %eax, 0(%rdi)
+ movl %edx, 48(%rdi)
+ movl %ecx, 32(%rdi)
+ movl %ebx, 16(%rdi)
+ movd %xmm8, %ebx
+ movd %xmm9, %eax
+ movd %xmm10, %edx
+ movd %xmm11, %ecx
+ pshufd $0x39, %xmm8, %xmm8
+ pshufd $0x39, %xmm9, %xmm9
+ pshufd $0x39, %xmm10, %xmm10
+ pshufd $0x39, %xmm11, %xmm11
+ movl %ebx, 20(%rdi)
+ movl %eax, 4(%rdi)
+ movl %edx, 52(%rdi)
+ movl %ecx, 36(%rdi)
+ movd %xmm8, %ecx
+ movd %xmm9, %ebx
+ movd %xmm10, %eax
+ movd %xmm11, %edx
+ pshufd $0x39, %xmm8, %xmm8
+ pshufd $0x39, %xmm9, %xmm9
+ pshufd $0x39, %xmm10, %xmm10
+ pshufd $0x39, %xmm11, %xmm11
+ movl %ecx, 40(%rdi)
+ movl %ebx, 24(%rdi)
+ movl %eax, 8(%rdi)
+ movl %edx, 56(%rdi)
+ movd %xmm8, %edx
+ movd %xmm9, %ecx
+ movd %xmm10, %ebx
+ movd %xmm11, %eax
+ movl %edx, 60(%rdi)
+ movl %ecx, 44(%rdi)
+ movl %ebx, 28(%rdi)
+ movl %eax, 12(%rdi)
+
+ # re-shuffle 2nd block back
+ movd %xmm12, %eax
+ movd %xmm13, %edx
+ movd %xmm14, %ecx
+ movd %xmm15, %ebx
+ pshufd $0x39, %xmm12, %xmm12
+ pshufd $0x39, %xmm13, %xmm13
+ pshufd $0x39, %xmm14, %xmm14
+ pshufd $0x39, %xmm15, %xmm15
+ movl %eax, 64(%rdi)
+ movl %edx, 112(%rdi)
+ movl %ecx, 96(%rdi)
+ movl %ebx, 80(%rdi)
+ movd %xmm12, %ebx
+ movd %xmm13, %eax
+ movd %xmm14, %edx
+ movd %xmm15, %ecx
+ pshufd $0x39, %xmm12, %xmm12
+ pshufd $0x39, %xmm13, %xmm13
+ pshufd $0x39, %xmm14, %xmm14
+ pshufd $0x39, %xmm15, %xmm15
+ movl %ebx, 84(%rdi)
+ movl %eax, 68(%rdi)
+ movl %edx, 116(%rdi)
+ movl %ecx, 100(%rdi)
+ movd %xmm12, %ecx
+ movd %xmm13, %ebx
+ movd %xmm14, %eax
+ movd %xmm15, %edx
+ pshufd $0x39, %xmm12, %xmm12
+ pshufd $0x39, %xmm13, %xmm13
+ pshufd $0x39, %xmm14, %xmm14
+ pshufd $0x39, %xmm15, %xmm15
+ movl %ecx, 104(%rdi)
+ movl %ebx, 88(%rdi)
+ movl %eax, 72(%rdi)
+ movl %edx, 120(%rdi)
+ movd %xmm12, %edx
+ movd %xmm13, %ecx
+ movd %xmm14, %ebx
+ movd %xmm15, %eax
+ movl %edx, 124(%rdi)
+ movl %ecx, 108(%rdi)
+ movl %ebx, 92(%rdi)
+ movl %eax, 76(%rdi)
+
+ scrypt_core_cleanup
+ ret
+
+
+ .text
+ .align 32
+ .globl scrypt_best_throughput
+ .globl _scrypt_best_throughput
+scrypt_best_throughput:
+_scrypt_best_throughput:
+ pushq %rbx
+ xorq %rax, %rax
+ cpuid
+ movl $3, %eax
+ cmpl $0x444d4163, %ecx
+ jne scrypt_best_throughput_exit
+ cmpl $0x69746e65, %edx
+ jne scrypt_best_throughput_exit
+ cmpl $0x68747541, %ebx
+ jne scrypt_best_throughput_exit
+ movl $1, %eax
+ cpuid
+ andl $0x0ff00000, %eax
+ movl $3, %eax
+ jnz scrypt_best_throughput_exit
+ movl $1, %eax
+scrypt_best_throughput_exit:
+ popq %rbx
+ ret
+
+
+.macro xmm_salsa8_core_2way_doubleround
+ movdqa %xmm1, %xmm4
+ movdqa %xmm9, %xmm6
+ paddd %xmm0, %xmm4
+ paddd %xmm8, %xmm6
+ movdqa %xmm4, %xmm5
+ movdqa %xmm6, %xmm7
+ pslld $7, %xmm4
+ pslld $7, %xmm6
+ psrld $25, %xmm5
+ psrld $25, %xmm7
+ pxor %xmm4, %xmm3
+ pxor %xmm6, %xmm11
+ pxor %xmm5, %xmm3
+ pxor %xmm7, %xmm11
+ movdqa %xmm0, %xmm4
+ movdqa %xmm8, %xmm6
+
+ paddd %xmm3, %xmm4
+ paddd %xmm11, %xmm6
+ movdqa %xmm4, %xmm5
+ movdqa %xmm6, %xmm7
+ pslld $9, %xmm4
+ pslld $9, %xmm6
+ psrld $23, %xmm5
+ psrld $23, %xmm7
+ pxor %xmm4, %xmm2
+ pxor %xmm6, %xmm10
+ movdqa %xmm3, %xmm4
+ movdqa %xmm11, %xmm6
+ pshufd $0x93, %xmm3, %xmm3
+ pshufd $0x93, %xmm11, %xmm11
+ pxor %xmm5, %xmm2
+ pxor %xmm7, %xmm10
+
+ paddd %xmm2, %xmm4
+ paddd %xmm10, %xmm6
+ movdqa %xmm4, %xmm5
+ movdqa %xmm6, %xmm7
+ pslld $13, %xmm4
+ pslld $13, %xmm6
+ psrld $19, %xmm5
+ psrld $19, %xmm7
+ pxor %xmm4, %xmm1
+ pxor %xmm6, %xmm9
+ movdqa %xmm2, %xmm4
+ movdqa %xmm10, %xmm6
+ pshufd $0x4e, %xmm2, %xmm2
+ pshufd $0x4e, %xmm10, %xmm10
+ pxor %xmm5, %xmm1
+ pxor %xmm7, %xmm9
+
+ paddd %xmm1, %xmm4
+ paddd %xmm9, %xmm6
+ movdqa %xmm4, %xmm5
+ movdqa %xmm6, %xmm7
+ pslld $18, %xmm4
+ pslld $18, %xmm6
+ psrld $14, %xmm5
+ psrld $14, %xmm7
+ pxor %xmm4, %xmm0
+ pxor %xmm6, %xmm8
+ pshufd $0x39, %xmm1, %xmm1
+ pshufd $0x39, %xmm9, %xmm9
+ pxor %xmm5, %xmm0
+ pxor %xmm7, %xmm8
+ movdqa %xmm3, %xmm4
+ movdqa %xmm11, %xmm6
+
+ paddd %xmm0, %xmm4
+ paddd %xmm8, %xmm6
+ movdqa %xmm4, %xmm5
+ movdqa %xmm6, %xmm7
+ pslld $7, %xmm4
+ pslld $7, %xmm6
+ psrld $25, %xmm5
+ psrld $25, %xmm7
+ pxor %xmm4, %xmm1
+ pxor %xmm6, %xmm9
+ pxor %xmm5, %xmm1
+ pxor %xmm7, %xmm9
+ movdqa %xmm0, %xmm4
+ movdqa %xmm8, %xmm6
+
+ paddd %xmm1, %xmm4
+ paddd %xmm9, %xmm6
+ movdqa %xmm4, %xmm5
+ movdqa %xmm6, %xmm7
+ pslld $9, %xmm4
+ pslld $9, %xmm6
+ psrld $23, %xmm5
+ psrld $23, %xmm7
+ pxor %xmm4, %xmm2
+ pxor %xmm6, %xmm10
+ movdqa %xmm1, %xmm4
+ movdqa %xmm9, %xmm6
+ pshufd $0x93, %xmm1, %xmm1
+ pshufd $0x93, %xmm9, %xmm9
+ pxor %xmm5, %xmm2
+ pxor %xmm7, %xmm10
+
+ paddd %xmm2, %xmm4
+ paddd %xmm10, %xmm6
+ movdqa %xmm4, %xmm5
+ movdqa %xmm6, %xmm7
+ pslld $13, %xmm4
+ pslld $13, %xmm6
+ psrld $19, %xmm5
+ psrld $19, %xmm7
+ pxor %xmm4, %xmm3
+ pxor %xmm6, %xmm11
+ movdqa %xmm2, %xmm4
+ movdqa %xmm10, %xmm6
+ pshufd $0x4e, %xmm2, %xmm2
+ pshufd $0x4e, %xmm10, %xmm10
+ pxor %xmm5, %xmm3
+ pxor %xmm7, %xmm11
+
+ paddd %xmm3, %xmm4
+ paddd %xmm11, %xmm6
+ movdqa %xmm4, %xmm5
+ movdqa %xmm6, %xmm7
+ pslld $18, %xmm4
+ pslld $18, %xmm6
+ psrld $14, %xmm5
+ psrld $14, %xmm7
+ pxor %xmm4, %xmm0
+ pxor %xmm6, %xmm8
+ pshufd $0x39, %xmm3, %xmm3
+ pshufd $0x39, %xmm11, %xmm11
+ pxor %xmm5, %xmm0
+ pxor %xmm7, %xmm8
+.endm
+
+.macro xmm_salsa8_core_2way
+ xmm_salsa8_core_2way_doubleround
+ xmm_salsa8_core_2way_doubleround
+ xmm_salsa8_core_2way_doubleround
+ xmm_salsa8_core_2way_doubleround
+.endm
+
+
+ .text
+ .align 32
+ .globl scrypt_core_2way
+ .globl _scrypt_core_2way
+scrypt_core_2way:
+_scrypt_core_2way:
+ pushq %rbx
+ pushq %rbp
+#if defined(WIN64)
+ subq $176, %rsp
+ movdqa %xmm6, 8(%rsp)
+ movdqa %xmm7, 24(%rsp)
+ movdqa %xmm8, 40(%rsp)
+ movdqa %xmm9, 56(%rsp)
+ movdqa %xmm10, 72(%rsp)
+ movdqa %xmm11, 88(%rsp)
+ movdqa %xmm12, 104(%rsp)
+ movdqa %xmm13, 120(%rsp)
+ movdqa %xmm14, 136(%rsp)
+ movdqa %xmm15, 152(%rsp)
+ pushq %rdi
+ pushq %rsi
+ movq %rcx, %rdi
+ movq %rdx, %rsi
+ movq %r8, %rdx
+#endif
+ subq $264, %rsp
+
+ scrypt_shuffle %rdi, 0, %rsp, 0
+ scrypt_shuffle %rdi, 64, %rsp, 64
+ scrypt_shuffle %rsi, 0, %rsp, 128
+ scrypt_shuffle %rsi, 64, %rsp, 192
+
+ movdqa 192(%rsp), %xmm12
+ movdqa 208(%rsp), %xmm13
+ movdqa 224(%rsp), %xmm14
+ movdqa 240(%rsp), %xmm15
+
+ movq %rdx, %rbp
+ leaq 262144(%rdx), %rcx
+scrypt_core_2way_loop1:
+ movdqa 0(%rsp), %xmm0
+ movdqa 16(%rsp), %xmm1
+ movdqa 32(%rsp), %xmm2
+ movdqa 48(%rsp), %xmm3
+ movdqa 64(%rsp), %xmm4
+ movdqa 80(%rsp), %xmm5
+ movdqa 96(%rsp), %xmm6
+ movdqa 112(%rsp), %xmm7
+ movdqa 128(%rsp), %xmm8
+ movdqa 144(%rsp), %xmm9
+ movdqa 160(%rsp), %xmm10
+ movdqa 176(%rsp), %xmm11
+ pxor %xmm4, %xmm0
+ pxor %xmm5, %xmm1
+ pxor %xmm6, %xmm2
+ pxor %xmm7, %xmm3
+ movdqa %xmm0, 0(%rbp)
+ movdqa %xmm1, 16(%rbp)
+ movdqa %xmm2, 32(%rbp)
+ movdqa %xmm3, 48(%rbp)
+ movdqa %xmm4, 64(%rbp)
+ movdqa %xmm5, 80(%rbp)
+ movdqa %xmm6, 96(%rbp)
+ movdqa %xmm7, 112(%rbp)
+ pxor %xmm12, %xmm8
+ pxor %xmm13, %xmm9
+ pxor %xmm14, %xmm10
+ pxor %xmm15, %xmm11
+ movdqa %xmm8, 128(%rbp)
+ movdqa %xmm9, 144(%rbp)
+ movdqa %xmm10, 160(%rbp)
+ movdqa %xmm11, 176(%rbp)
+ movdqa %xmm12, 192(%rbp)
+ movdqa %xmm13, 208(%rbp)
+ movdqa %xmm14, 224(%rbp)
+ movdqa %xmm15, 240(%rbp)
+
+ xmm_salsa8_core_2way
+ paddd 0(%rbp), %xmm0
+ paddd 16(%rbp), %xmm1
+ paddd 32(%rbp), %xmm2
+ paddd 48(%rbp), %xmm3
+ paddd 128(%rbp), %xmm8
+ paddd 144(%rbp), %xmm9
+ paddd 160(%rbp), %xmm10
+ paddd 176(%rbp), %xmm11
+ movdqa %xmm0, 0(%rsp)
+ movdqa %xmm1, 16(%rsp)
+ movdqa %xmm2, 32(%rsp)
+ movdqa %xmm3, 48(%rsp)
+ movdqa %xmm8, 128(%rsp)
+ movdqa %xmm9, 144(%rsp)
+ movdqa %xmm10, 160(%rsp)
+ movdqa %xmm11, 176(%rsp)
+
+ pxor 64(%rsp), %xmm0
+ pxor 80(%rsp), %xmm1
+ pxor 96(%rsp), %xmm2
+ pxor 112(%rsp), %xmm3
+ pxor %xmm12, %xmm8
+ pxor %xmm13, %xmm9
+ pxor %xmm14, %xmm10
+ pxor %xmm15, %xmm11
+ movdqa %xmm0, 64(%rsp)
+ movdqa %xmm1, 80(%rsp)
+ movdqa %xmm2, 96(%rsp)
+ movdqa %xmm3, 112(%rsp)
+ movdqa %xmm8, %xmm12
+ movdqa %xmm9, %xmm13
+ movdqa %xmm10, %xmm14
+ movdqa %xmm11, %xmm15
+ xmm_salsa8_core_2way
+ paddd 64(%rsp), %xmm0
+ paddd 80(%rsp), %xmm1
+ paddd 96(%rsp), %xmm2
+ paddd 112(%rsp), %xmm3
+ paddd %xmm8, %xmm12
+ paddd %xmm9, %xmm13
+ paddd %xmm10, %xmm14
+ paddd %xmm11, %xmm15
+ movdqa %xmm0, 64(%rsp)
+ movdqa %xmm1, 80(%rsp)
+ movdqa %xmm2, 96(%rsp)
+ movdqa %xmm3, 112(%rsp)
+
+ addq $256, %rbp
+ cmpq %rcx, %rbp
+ jne scrypt_core_2way_loop1
+
+ movq $1024, %rcx
+scrypt_core_2way_loop2:
+ movdqa 0(%rsp), %xmm0
+ movdqa 16(%rsp), %xmm1
+ movdqa 32(%rsp), %xmm2
+ movdqa 48(%rsp), %xmm3
+ movdqa 64(%rsp), %xmm4
+ movdqa 80(%rsp), %xmm5
+ movdqa 96(%rsp), %xmm6
+ movdqa 112(%rsp), %xmm7
+ movdqa 128(%rsp), %xmm8
+ movdqa 144(%rsp), %xmm9
+ movdqa 160(%rsp), %xmm10
+ movdqa 176(%rsp), %xmm11
+ movd %xmm4, %ebp
+ andl $1023, %ebp
+ shll $8, %ebp
+ pxor 0(%rdx, %rbp), %xmm0
+ pxor 16(%rdx, %rbp), %xmm1
+ pxor 32(%rdx, %rbp), %xmm2
+ pxor 48(%rdx, %rbp), %xmm3
+ movd %xmm12, %ebx
+ andl $1023, %ebx
+ shll $8, %ebx
+ addl $128, %ebx
+ pxor 0(%rdx, %rbx), %xmm8
+ pxor 16(%rdx, %rbx), %xmm9
+ pxor 32(%rdx, %rbx), %xmm10
+ pxor 48(%rdx, %rbx), %xmm11
+
+ pxor %xmm4, %xmm0
+ pxor %xmm5, %xmm1
+ pxor %xmm6, %xmm2
+ pxor %xmm7, %xmm3
+ pxor %xmm12, %xmm8
+ pxor %xmm13, %xmm9
+ pxor %xmm14, %xmm10
+ pxor %xmm15, %xmm11
+ movdqa %xmm0, 0(%rsp)
+ movdqa %xmm1, 16(%rsp)
+ movdqa %xmm2, 32(%rsp)
+ movdqa %xmm3, 48(%rsp)
+ movdqa %xmm8, 128(%rsp)
+ movdqa %xmm9, 144(%rsp)
+ movdqa %xmm10, 160(%rsp)
+ movdqa %xmm11, 176(%rsp)
+ xmm_salsa8_core_2way
+ paddd 0(%rsp), %xmm0
+ paddd 16(%rsp), %xmm1
+ paddd 32(%rsp), %xmm2
+ paddd 48(%rsp), %xmm3
+ paddd 128(%rsp), %xmm8
+ paddd 144(%rsp), %xmm9
+ paddd 160(%rsp), %xmm10
+ paddd 176(%rsp), %xmm11
+ movdqa %xmm0, 0(%rsp)
+ movdqa %xmm1, 16(%rsp)
+ movdqa %xmm2, 32(%rsp)
+ movdqa %xmm3, 48(%rsp)
+ movdqa %xmm8, 128(%rsp)
+ movdqa %xmm9, 144(%rsp)
+ movdqa %xmm10, 160(%rsp)
+ movdqa %xmm11, 176(%rsp)
+
+ pxor 64(%rdx, %rbp), %xmm0
+ pxor 80(%rdx, %rbp), %xmm1
+ pxor 96(%rdx, %rbp), %xmm2
+ pxor 112(%rdx, %rbp), %xmm3
+ pxor 64(%rdx, %rbx), %xmm8
+ pxor 80(%rdx, %rbx), %xmm9
+ pxor 96(%rdx, %rbx), %xmm10
+ pxor 112(%rdx, %rbx), %xmm11
+ pxor 64(%rsp), %xmm0
+ pxor 80(%rsp), %xmm1
+ pxor 96(%rsp), %xmm2
+ pxor 112(%rsp), %xmm3
+ pxor %xmm12, %xmm8
+ pxor %xmm13, %xmm9
+ pxor %xmm14, %xmm10
+ pxor %xmm15, %xmm11
+ movdqa %xmm0, 64(%rsp)
+ movdqa %xmm1, 80(%rsp)
+ movdqa %xmm2, 96(%rsp)
+ movdqa %xmm3, 112(%rsp)
+ movdqa %xmm8, %xmm12
+ movdqa %xmm9, %xmm13
+ movdqa %xmm10, %xmm14
+ movdqa %xmm11, %xmm15
+ xmm_salsa8_core_2way
+ paddd 64(%rsp), %xmm0
+ paddd 80(%rsp), %xmm1
+ paddd 96(%rsp), %xmm2
+ paddd 112(%rsp), %xmm3
+ paddd %xmm8, %xmm12
+ paddd %xmm9, %xmm13
+ paddd %xmm10, %xmm14
+ paddd %xmm11, %xmm15
+ movdqa %xmm0, 64(%rsp)
+ movdqa %xmm1, 80(%rsp)
+ movdqa %xmm2, 96(%rsp)
+ movdqa %xmm3, 112(%rsp)
+
+ subq $1, %rcx
+ ja scrypt_core_2way_loop2
+
+ movdqa %xmm12, 192(%rsp)
+ movdqa %xmm13, 208(%rsp)
+ movdqa %xmm14, 224(%rsp)
+ movdqa %xmm15, 240(%rsp)
+
+ scrypt_shuffle %rsp, 0, %rdi, 0
+ scrypt_shuffle %rsp, 64, %rdi, 64
+ scrypt_shuffle %rsp, 128, %rsi, 0
+ scrypt_shuffle %rsp, 192, %rsi, 64
+
+ addq $264, %rsp
+#if defined(WIN64)
+ popq %rsi
+ popq %rdi
+ movdqa 8(%rsp), %xmm6
+ movdqa 24(%rsp), %xmm7
+ movdqa 40(%rsp), %xmm8
+ movdqa 56(%rsp), %xmm9
+ movdqa 72(%rsp), %xmm10
+ movdqa 88(%rsp), %xmm11
+ movdqa 104(%rsp), %xmm12
+ movdqa 120(%rsp), %xmm13
+ movdqa 136(%rsp), %xmm14
+ movdqa 152(%rsp), %xmm15
+ addq $176, %rsp
+#endif
+ popq %rbp
+ popq %rbx
+ ret
+
+
+.macro xmm_salsa8_core_3way_doubleround
+ movdqa %xmm1, %xmm4
+ movdqa %xmm9, %xmm6
+ movdqa %xmm13, %xmm7
+ paddd %xmm0, %xmm4
+ paddd %xmm8, %xmm6
+ paddd %xmm12, %xmm7
+ movdqa %xmm4, %xmm5
+ pslld $7, %xmm4
+ psrld $25, %xmm5
+ pxor %xmm4, %xmm3
+ pxor %xmm5, %xmm3
+ movdqa %xmm0, %xmm4
+ movdqa %xmm6, %xmm5
+ pslld $7, %xmm6
+ psrld $25, %xmm5
+ pxor %xmm6, %xmm11
+ pxor %xmm5, %xmm11
+ movdqa %xmm8, %xmm6
+ movdqa %xmm7, %xmm5
+ pslld $7, %xmm7
+ psrld $25, %xmm5
+ pxor %xmm7, %xmm15
+ pxor %xmm5, %xmm15
+ movdqa %xmm12, %xmm7
+
+ paddd %xmm3, %xmm4
+ paddd %xmm11, %xmm6
+ paddd %xmm15, %xmm7
+ movdqa %xmm4, %xmm5
+ pslld $9, %xmm4
+ psrld $23, %xmm5
+ pxor %xmm4, %xmm2
+ movdqa %xmm3, %xmm4
+ pshufd $0x93, %xmm3, %xmm3
+ pxor %xmm5, %xmm2
+ movdqa %xmm6, %xmm5
+ pslld $9, %xmm6
+ psrld $23, %xmm5
+ pxor %xmm6, %xmm10
+ movdqa %xmm11, %xmm6
+ pshufd $0x93, %xmm11, %xmm11
+ pxor %xmm5, %xmm10
+ movdqa %xmm7, %xmm5
+ pslld $9, %xmm7
+ psrld $23, %xmm5
+ pxor %xmm7, %xmm14
+ movdqa %xmm15, %xmm7
+ pshufd $0x93, %xmm15, %xmm15
+ pxor %xmm5, %xmm14
+
+ paddd %xmm2, %xmm4
+ paddd %xmm10, %xmm6
+ paddd %xmm14, %xmm7
+ movdqa %xmm4, %xmm5
+ pslld $13, %xmm4
+ psrld $19, %xmm5
+ pxor %xmm4, %xmm1
+ movdqa %xmm2, %xmm4
+ pshufd $0x4e, %xmm2, %xmm2
+ pxor %xmm5, %xmm1
+ movdqa %xmm6, %xmm5
+ pslld $13, %xmm6
+ psrld $19, %xmm5
+ pxor %xmm6, %xmm9
+ movdqa %xmm10, %xmm6
+ pshufd $0x4e, %xmm10, %xmm10
+ pxor %xmm5, %xmm9
+ movdqa %xmm7, %xmm5
+ pslld $13, %xmm7
+ psrld $19, %xmm5
+ pxor %xmm7, %xmm13
+ movdqa %xmm14, %xmm7
+ pshufd $0x4e, %xmm14, %xmm14
+ pxor %xmm5, %xmm13
+
+ paddd %xmm1, %xmm4
+ paddd %xmm9, %xmm6
+ paddd %xmm13, %xmm7
+ movdqa %xmm4, %xmm5
+ pslld $18, %xmm4
+ psrld $14, %xmm5
+ pxor %xmm4, %xmm0
+ pshufd $0x39, %xmm1, %xmm1
+ pxor %xmm5, %xmm0
+ movdqa %xmm3, %xmm4
+ movdqa %xmm6, %xmm5
+ pslld $18, %xmm6
+ psrld $14, %xmm5
+ pxor %xmm6, %xmm8
+ pshufd $0x39, %xmm9, %xmm9
+ pxor %xmm5, %xmm8
+ movdqa %xmm11, %xmm6
+ movdqa %xmm7, %xmm5
+ pslld $18, %xmm7
+ psrld $14, %xmm5
+ pxor %xmm7, %xmm12
+ pshufd $0x39, %xmm13, %xmm13
+ pxor %xmm5, %xmm12
+ movdqa %xmm15, %xmm7
+
+ paddd %xmm0, %xmm4
+ paddd %xmm8, %xmm6
+ paddd %xmm12, %xmm7
+ movdqa %xmm4, %xmm5
+ pslld $7, %xmm4
+ psrld $25, %xmm5
+ pxor %xmm4, %xmm1
+ pxor %xmm5, %xmm1
+ movdqa %xmm0, %xmm4
+ movdqa %xmm6, %xmm5
+ pslld $7, %xmm6
+ psrld $25, %xmm5
+ pxor %xmm6, %xmm9
+ pxor %xmm5, %xmm9
+ movdqa %xmm8, %xmm6
+ movdqa %xmm7, %xmm5
+ pslld $7, %xmm7
+ psrld $25, %xmm5
+ pxor %xmm7, %xmm13
+ pxor %xmm5, %xmm13
+ movdqa %xmm12, %xmm7
+
+ paddd %xmm1, %xmm4
+ paddd %xmm9, %xmm6
+ paddd %xmm13, %xmm7
+ movdqa %xmm4, %xmm5
+ pslld $9, %xmm4
+ psrld $23, %xmm5
+ pxor %xmm4, %xmm2
+ movdqa %xmm1, %xmm4
+ pshufd $0x93, %xmm1, %xmm1
+ pxor %xmm5, %xmm2
+ movdqa %xmm6, %xmm5
+ pslld $9, %xmm6
+ psrld $23, %xmm5
+ pxor %xmm6, %xmm10
+ movdqa %xmm9, %xmm6
+ pshufd $0x93, %xmm9, %xmm9
+ pxor %xmm5, %xmm10
+ movdqa %xmm7, %xmm5
+ pslld $9, %xmm7
+ psrld $23, %xmm5
+ pxor %xmm7, %xmm14
+ movdqa %xmm13, %xmm7
+ pshufd $0x93, %xmm13, %xmm13
+ pxor %xmm5, %xmm14
+
+ paddd %xmm2, %xmm4
+ paddd %xmm10, %xmm6
+ paddd %xmm14, %xmm7
+ movdqa %xmm4, %xmm5
+ pslld $13, %xmm4
+ psrld $19, %xmm5
+ pxor %xmm4, %xmm3
+ movdqa %xmm2, %xmm4
+ pshufd $0x4e, %xmm2, %xmm2
+ pxor %xmm5, %xmm3
+ movdqa %xmm6, %xmm5
+ pslld $13, %xmm6
+ psrld $19, %xmm5
+ pxor %xmm6, %xmm11
+ movdqa %xmm10, %xmm6
+ pshufd $0x4e, %xmm10, %xmm10
+ pxor %xmm5, %xmm11
+ movdqa %xmm7, %xmm5
+ pslld $13, %xmm7
+ psrld $19, %xmm5
+ pxor %xmm7, %xmm15
+ movdqa %xmm14, %xmm7
+ pshufd $0x4e, %xmm14, %xmm14
+ pxor %xmm5, %xmm15
+
+ paddd %xmm3, %xmm4
+ paddd %xmm11, %xmm6
+ paddd %xmm15, %xmm7
+ movdqa %xmm4, %xmm5
+ pslld $18, %xmm4
+ psrld $14, %xmm5
+ pxor %xmm4, %xmm0
+ pshufd $0x39, %xmm3, %xmm3
+ pxor %xmm5, %xmm0
+ movdqa %xmm6, %xmm5
+ pslld $18, %xmm6
+ psrld $14, %xmm5
+ pxor %xmm6, %xmm8
+ pshufd $0x39, %xmm11, %xmm11
+ pxor %xmm5, %xmm8
+ movdqa %xmm7, %xmm5
+ pslld $18, %xmm7
+ psrld $14, %xmm5
+ pxor %xmm7, %xmm12
+ pshufd $0x39, %xmm15, %xmm15
+ pxor %xmm5, %xmm12
+.endm
+
+.macro xmm_salsa8_core_3way
+ xmm_salsa8_core_3way_doubleround
+ xmm_salsa8_core_3way_doubleround
+ xmm_salsa8_core_3way_doubleround
+ xmm_salsa8_core_3way_doubleround
+.endm
+
+ .text
+ .align 32
+ .globl scrypt_core_3way
+ .globl _scrypt_core_3way
+scrypt_core_3way:
+_scrypt_core_3way:
+ pushq %rbx
+ pushq %rbp
+#if defined(WIN64)
+ subq $176, %rsp
+ movdqa %xmm6, 8(%rsp)
+ movdqa %xmm7, 24(%rsp)
+ movdqa %xmm8, 40(%rsp)
+ movdqa %xmm9, 56(%rsp)
+ movdqa %xmm10, 72(%rsp)
+ movdqa %xmm11, 88(%rsp)
+ movdqa %xmm12, 104(%rsp)
+ movdqa %xmm13, 120(%rsp)
+ movdqa %xmm14, 136(%rsp)
+ movdqa %xmm15, 152(%rsp)
+ pushq %rdi
+ pushq %rsi
+ movq %rcx, %rdi
+ movq %rdx, %rsi
+ movq %r8, %rdx
+ movq %r9, %rcx
+#endif
+ subq $392, %rsp
+
+ scrypt_shuffle %rdi, 0, %rsp, 0
+ scrypt_shuffle %rdi, 64, %rsp, 64
+ scrypt_shuffle %rsi, 0, %rsp, 128
+ scrypt_shuffle %rsi, 64, %rsp, 192
+ scrypt_shuffle %rdx, 0, %rsp, 256
+ scrypt_shuffle %rdx, 64, %rsp, 320
+
+ movdqa 128+64(%rsp), %xmm8
+ movdqa 128+80(%rsp), %xmm9
+ movdqa 128+96(%rsp), %xmm10
+ movdqa 128+112(%rsp), %xmm11
+
+ movq %rcx, %rbp
+ leaq 3*131072(%rcx), %rax
+scrypt_core_3way_loop1:
+ movdqa %xmm8, %xmm12
+ movdqa %xmm9, %xmm13
+ movdqa %xmm10, %xmm14
+ movdqa %xmm11, %xmm15
+ movdqa 0(%rsp), %xmm0
+ movdqa 16(%rsp), %xmm1
+ movdqa 32(%rsp), %xmm2
+ movdqa 48(%rsp), %xmm3
+ movdqa 64(%rsp), %xmm4
+ movdqa 80(%rsp), %xmm5
+ movdqa 96(%rsp), %xmm6
+ movdqa 112(%rsp), %xmm7
+ movdqa 128+0(%rsp), %xmm8
+ movdqa 128+16(%rsp), %xmm9
+ movdqa 128+32(%rsp), %xmm10
+ movdqa 128+48(%rsp), %xmm11
+ pxor %xmm4, %xmm0
+ pxor %xmm5, %xmm1
+ pxor %xmm6, %xmm2
+ pxor %xmm7, %xmm3
+ movdqa %xmm0, 0(%rbp)
+ movdqa %xmm1, 16(%rbp)
+ movdqa %xmm2, 32(%rbp)
+ movdqa %xmm3, 48(%rbp)
+ movdqa %xmm4, 64(%rbp)
+ movdqa %xmm5, 80(%rbp)
+ movdqa %xmm6, 96(%rbp)
+ movdqa %xmm7, 112(%rbp)
+ pxor %xmm12, %xmm8
+ pxor %xmm13, %xmm9
+ pxor %xmm14, %xmm10
+ pxor %xmm15, %xmm11
+ movdqa %xmm8, 128+0(%rbp)
+ movdqa %xmm9, 128+16(%rbp)
+ movdqa %xmm10, 128+32(%rbp)
+ movdqa %xmm11, 128+48(%rbp)
+ movdqa %xmm12, 128+64(%rbp)
+ movdqa %xmm13, 128+80(%rbp)
+ movdqa %xmm14, 128+96(%rbp)
+ movdqa %xmm15, 128+112(%rbp)
+ movdqa 256+0(%rsp), %xmm12
+ movdqa 256+16(%rsp), %xmm13
+ movdqa 256+32(%rsp), %xmm14
+ movdqa 256+48(%rsp), %xmm15
+ movdqa 256+64(%rsp), %xmm4
+ movdqa 256+80(%rsp), %xmm5
+ movdqa 256+96(%rsp), %xmm6
+ movdqa 256+112(%rsp), %xmm7
+ pxor %xmm4, %xmm12
+ pxor %xmm5, %xmm13
+ pxor %xmm6, %xmm14
+ pxor %xmm7, %xmm15
+ movdqa %xmm12, 256+0(%rbp)
+ movdqa %xmm13, 256+16(%rbp)
+ movdqa %xmm14, 256+32(%rbp)
+ movdqa %xmm15, 256+48(%rbp)
+ movdqa %xmm4, 256+64(%rbp)
+ movdqa %xmm5, 256+80(%rbp)
+ movdqa %xmm6, 256+96(%rbp)
+ movdqa %xmm7, 256+112(%rbp)
+
+ xmm_salsa8_core_3way
+ paddd 0(%rbp), %xmm0
+ paddd 16(%rbp), %xmm1
+ paddd 32(%rbp), %xmm2
+ paddd 48(%rbp), %xmm3
+ paddd 128+0(%rbp), %xmm8
+ paddd 128+16(%rbp), %xmm9
+ paddd 128+32(%rbp), %xmm10
+ paddd 128+48(%rbp), %xmm11
+ paddd 256+0(%rbp), %xmm12
+ paddd 256+16(%rbp), %xmm13
+ paddd 256+32(%rbp), %xmm14
+ paddd 256+48(%rbp), %xmm15
+ movdqa %xmm0, 0(%rsp)
+ movdqa %xmm1, 16(%rsp)
+ movdqa %xmm2, 32(%rsp)
+ movdqa %xmm3, 48(%rsp)
+ movdqa %xmm8, 128+0(%rsp)
+ movdqa %xmm9, 128+16(%rsp)
+ movdqa %xmm10, 128+32(%rsp)
+ movdqa %xmm11, 128+48(%rsp)
+ movdqa %xmm12, 256+0(%rsp)
+ movdqa %xmm13, 256+16(%rsp)
+ movdqa %xmm14, 256+32(%rsp)
+ movdqa %xmm15, 256+48(%rsp)
+
+ pxor 64(%rsp), %xmm0
+ pxor 80(%rsp), %xmm1
+ pxor 96(%rsp), %xmm2
+ pxor 112(%rsp), %xmm3
+ pxor 128+64(%rsp), %xmm8
+ pxor 128+80(%rsp), %xmm9
+ pxor 128+96(%rsp), %xmm10
+ pxor 128+112(%rsp), %xmm11
+ pxor 256+64(%rsp), %xmm12
+ pxor 256+80(%rsp), %xmm13
+ pxor 256+96(%rsp), %xmm14
+ pxor 256+112(%rsp), %xmm15
+ movdqa %xmm0, 64(%rsp)
+ movdqa %xmm1, 80(%rsp)
+ movdqa %xmm2, 96(%rsp)
+ movdqa %xmm3, 112(%rsp)
+ movdqa %xmm8, 128+64(%rsp)
+ movdqa %xmm9, 128+80(%rsp)
+ movdqa %xmm10, 128+96(%rsp)
+ movdqa %xmm11, 128+112(%rsp)
+ movdqa %xmm12, 256+64(%rsp)
+ movdqa %xmm13, 256+80(%rsp)
+ movdqa %xmm14, 256+96(%rsp)
+ movdqa %xmm15, 256+112(%rsp)
+ xmm_salsa8_core_3way
+ paddd 64(%rsp), %xmm0
+ paddd 80(%rsp), %xmm1
+ paddd 96(%rsp), %xmm2
+ paddd 112(%rsp), %xmm3
+ paddd 128+64(%rsp), %xmm8
+ paddd 128+80(%rsp), %xmm9
+ paddd 128+96(%rsp), %xmm10
+ paddd 128+112(%rsp), %xmm11
+ paddd 256+64(%rsp), %xmm12
+ paddd 256+80(%rsp), %xmm13
+ paddd 256+96(%rsp), %xmm14
+ paddd 256+112(%rsp), %xmm15
+ movdqa %xmm0, 64(%rsp)
+ movdqa %xmm1, 80(%rsp)
+ movdqa %xmm2, 96(%rsp)
+ movdqa %xmm3, 112(%rsp)
+ movdqa %xmm8, 128+64(%rsp)
+ movdqa %xmm9, 128+80(%rsp)
+ movdqa %xmm10, 128+96(%rsp)
+ movdqa %xmm11, 128+112(%rsp)
+ movdqa %xmm12, 256+64(%rsp)
+ movdqa %xmm13, 256+80(%rsp)
+ movdqa %xmm14, 256+96(%rsp)
+ movdqa %xmm15, 256+112(%rsp)
+
+ addq $3*128, %rbp
+ cmpq %rax, %rbp
+ jne scrypt_core_3way_loop1
+
+ movq $1024, %rax
+scrypt_core_3way_loop2:
+ movl 64(%rsp), %ebp
+ andl $1023, %ebp
+ leal (%ebp, %ebp, 2), %ebp
+ shll $7, %ebp
+ movl 128+64(%rsp), %ebx
+ andl $1023, %ebx
+ leal (%ebx, %ebx, 2), %ebx
+ shll $7, %ebx
+ addl $128, %ebx
+ movl 256+64(%rsp), %r8d
+ andl $1023, %r8d
+ leal (%r8d, %r8d, 2), %r8d
+ shll $7, %r8d
+ addl $256, %r8d
+ movdqa 0(%rsp), %xmm0
+ movdqa 16(%rsp), %xmm1
+ movdqa 32(%rsp), %xmm2
+ movdqa 48(%rsp), %xmm3
+ movdqa 128+0(%rsp), %xmm8
+ movdqa 128+16(%rsp), %xmm9
+ movdqa 128+32(%rsp), %xmm10
+ movdqa 128+48(%rsp), %xmm11
+ movdqa 256+0(%rsp), %xmm12
+ movdqa 256+16(%rsp), %xmm13
+ movdqa 256+32(%rsp), %xmm14
+ movdqa 256+48(%rsp), %xmm15
+ pxor 0(%rcx, %rbp), %xmm0
+ pxor 16(%rcx, %rbp), %xmm1
+ pxor 32(%rcx, %rbp), %xmm2
+ pxor 48(%rcx, %rbp), %xmm3
+ pxor 0(%rcx, %rbx), %xmm8
+ pxor 16(%rcx, %rbx), %xmm9
+ pxor 32(%rcx, %rbx), %xmm10
+ pxor 48(%rcx, %rbx), %xmm11
+ pxor 0(%rcx, %r8), %xmm12
+ pxor 16(%rcx, %r8), %xmm13
+ pxor 32(%rcx, %r8), %xmm14
+ pxor 48(%rcx, %r8), %xmm15
+
+ pxor 64(%rsp), %xmm0
+ pxor 80(%rsp), %xmm1
+ pxor 96(%rsp), %xmm2
+ pxor 112(%rsp), %xmm3
+ pxor 128+64(%rsp), %xmm8
+ pxor 128+80(%rsp), %xmm9
+ pxor 128+96(%rsp), %xmm10
+ pxor 128+112(%rsp), %xmm11
+ pxor 256+64(%rsp), %xmm12
+ pxor 256+80(%rsp), %xmm13
+ pxor 256+96(%rsp), %xmm14
+ pxor 256+112(%rsp), %xmm15
+ movdqa %xmm0, 0(%rsp)
+ movdqa %xmm1, 16(%rsp)
+ movdqa %xmm2, 32(%rsp)
+ movdqa %xmm3, 48(%rsp)
+ movdqa %xmm8, 128+0(%rsp)
+ movdqa %xmm9, 128+16(%rsp)
+ movdqa %xmm10, 128+32(%rsp)
+ movdqa %xmm11, 128+48(%rsp)
+ movdqa %xmm12, 256+0(%rsp)
+ movdqa %xmm13, 256+16(%rsp)
+ movdqa %xmm14, 256+32(%rsp)
+ movdqa %xmm15, 256+48(%rsp)
+ xmm_salsa8_core_3way
+ paddd 0(%rsp), %xmm0
+ paddd 16(%rsp), %xmm1
+ paddd 32(%rsp), %xmm2
+ paddd 48(%rsp), %xmm3
+ paddd 128+0(%rsp), %xmm8
+ paddd 128+16(%rsp), %xmm9
+ paddd 128+32(%rsp), %xmm10
+ paddd 128+48(%rsp), %xmm11
+ paddd 256+0(%rsp), %xmm12
+ paddd 256+16(%rsp), %xmm13
+ paddd 256+32(%rsp), %xmm14
+ paddd 256+48(%rsp), %xmm15
+ movdqa %xmm0, 0(%rsp)
+ movdqa %xmm1, 16(%rsp)
+ movdqa %xmm2, 32(%rsp)
+ movdqa %xmm3, 48(%rsp)
+ movdqa %xmm8, 128+0(%rsp)
+ movdqa %xmm9, 128+16(%rsp)
+ movdqa %xmm10, 128+32(%rsp)
+ movdqa %xmm11, 128+48(%rsp)
+ movdqa %xmm12, 256+0(%rsp)
+ movdqa %xmm13, 256+16(%rsp)
+ movdqa %xmm14, 256+32(%rsp)
+ movdqa %xmm15, 256+48(%rsp)
+
+ pxor 64(%rcx, %rbp), %xmm0
+ pxor 80(%rcx, %rbp), %xmm1
+ pxor 96(%rcx, %rbp), %xmm2
+ pxor 112(%rcx, %rbp), %xmm3
+ pxor 64(%rcx, %rbx), %xmm8
+ pxor 80(%rcx, %rbx), %xmm9
+ pxor 96(%rcx, %rbx), %xmm10
+ pxor 112(%rcx, %rbx), %xmm11
+ pxor 64(%rcx, %r8), %xmm12
+ pxor 80(%rcx, %r8), %xmm13
+ pxor 96(%rcx, %r8), %xmm14
+ pxor 112(%rcx, %r8), %xmm15
+ pxor 64(%rsp), %xmm0
+ pxor 80(%rsp), %xmm1
+ pxor 96(%rsp), %xmm2
+ pxor 112(%rsp), %xmm3
+ pxor 128+64(%rsp), %xmm8
+ pxor 128+80(%rsp), %xmm9
+ pxor 128+96(%rsp), %xmm10
+ pxor 128+112(%rsp), %xmm11
+ pxor 256+64(%rsp), %xmm12
+ pxor 256+80(%rsp), %xmm13
+ pxor 256+96(%rsp), %xmm14
+ pxor 256+112(%rsp), %xmm15
+ movdqa %xmm0, 64(%rsp)
+ movdqa %xmm1, 80(%rsp)
+ movdqa %xmm2, 96(%rsp)
+ movdqa %xmm3, 112(%rsp)
+ movdqa %xmm8, 128+64(%rsp)
+ movdqa %xmm9, 128+80(%rsp)
+ movdqa %xmm10, 128+96(%rsp)
+ movdqa %xmm11, 128+112(%rsp)
+ movdqa %xmm12, 256+64(%rsp)
+ movdqa %xmm13, 256+80(%rsp)
+ movdqa %xmm14, 256+96(%rsp)
+ movdqa %xmm15, 256+112(%rsp)
+ xmm_salsa8_core_3way
+ paddd 64(%rsp), %xmm0
+ paddd 80(%rsp), %xmm1
+ paddd 96(%rsp), %xmm2
+ paddd 112(%rsp), %xmm3
+ paddd 128+64(%rsp), %xmm8
+ paddd 128+80(%rsp), %xmm9
+ paddd 128+96(%rsp), %xmm10
+ paddd 128+112(%rsp), %xmm11
+ paddd 256+64(%rsp), %xmm12
+ paddd 256+80(%rsp), %xmm13
+ paddd 256+96(%rsp), %xmm14
+ paddd 256+112(%rsp), %xmm15
+ movdqa %xmm0, 64(%rsp)
+ movdqa %xmm1, 80(%rsp)
+ movdqa %xmm2, 96(%rsp)
+ movdqa %xmm3, 112(%rsp)
+ movdqa %xmm8, 128+64(%rsp)
+ movdqa %xmm9, 128+80(%rsp)
+ movdqa %xmm10, 128+96(%rsp)
+ movdqa %xmm11, 128+112(%rsp)
+ movdqa %xmm12, 256+64(%rsp)
+ movdqa %xmm13, 256+80(%rsp)
+ movdqa %xmm14, 256+96(%rsp)
+ movdqa %xmm15, 256+112(%rsp)
+
+ subq $1, %rax
+ ja scrypt_core_3way_loop2
+
+ scrypt_shuffle %rsp, 0, %rdi, 0
+ scrypt_shuffle %rsp, 64, %rdi, 64
+ scrypt_shuffle %rsp, 128, %rsi, 0
+ scrypt_shuffle %rsp, 192, %rsi, 64
+ scrypt_shuffle %rsp, 256, %rdx, 0
+ scrypt_shuffle %rsp, 320, %rdx, 64
+
+ addq $392, %rsp
+#if defined(WIN64)
+ popq %rsi
+ popq %rdi
+ movdqa 8(%rsp), %xmm6
+ movdqa 24(%rsp), %xmm7
+ movdqa 40(%rsp), %xmm8
+ movdqa 56(%rsp), %xmm9
+ movdqa 72(%rsp), %xmm10
+ movdqa 88(%rsp), %xmm11
+ movdqa 104(%rsp), %xmm12
+ movdqa 120(%rsp), %xmm13
+ movdqa 136(%rsp), %xmm14
+ movdqa 152(%rsp), %xmm15
+ addq $176, %rsp
+#endif
+ popq %rbp
+ popq %rbx
+ ret
+
#endif
\ No newline at end of file