--- /dev/null
+# Copyright 2011 pooler@litecoinpool.org\r
+# All rights reserved.\r
+#\r
+# Redistribution and use in source and binary forms, with or without\r
+# modification, are permitted provided that the following conditions\r
+# are met:\r
+# 1. Redistributions of source code must retain the above copyright\r
+# notice, this list of conditions and the following disclaimer.\r
+# 2. Redistributions in binary form must reproduce the above copyright\r
+# notice, this list of conditions and the following disclaimer in the\r
+# documentation and/or other materials provided with the distribution.\r
+#\r
+# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND\r
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\r
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\r
+# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE\r
+# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\r
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS\r
+# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)\r
+# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT\r
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY\r
+# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF\r
+# SUCH DAMAGE.\r
+\r
+#if defined(__i386__)\r
+\r
+.macro gen_salsa8_core_quadround\r
+ movl 52(%esp), %ecx\r
+ movl 4(%esp), %edx\r
+ movl 20(%esp), %ebx\r
+ movl 8(%esp), %esi\r
+ leal (%ecx, %edx), %edi\r
+ roll $7, %edi\r
+ xorl %edi, %ebx\r
+ movl %ebx, 4(%esp)\r
+ movl 36(%esp), %edi\r
+ leal (%edx, %ebx), %ebp\r
+ roll $9, %ebp\r
+ xorl %ebp, %edi\r
+ movl 24(%esp), %ebp\r
+ movl %edi, 8(%esp)\r
+ addl %edi, %ebx\r
+ roll $13, %ebx\r
+ xorl %ebx, %ecx\r
+ movl 40(%esp), %ebx\r
+ movl %ecx, 20(%esp)\r
+ addl %edi, %ecx\r
+ roll $18, %ecx\r
+ leal (%esi, %ebp), %edi\r
+ roll $7, %edi\r
+ xorl %edi, %ebx\r
+ movl %ebx, 24(%esp)\r
+ movl 56(%esp), %edi\r
+ xorl %ecx, %edx\r
+ leal (%ebp, %ebx), %ecx\r
+ roll $9, %ecx\r
+ xorl %ecx, %edi\r
+ movl %edi, 36(%esp)\r
+ movl 28(%esp), %ecx\r
+ movl %edx, 28(%esp)\r
+ movl 44(%esp), %edx\r
+ addl %edi, %ebx\r
+ roll $13, %ebx\r
+ xorl %ebx, %esi\r
+ movl 60(%esp), %ebx\r
+ movl %esi, 40(%esp)\r
+ addl %edi, %esi\r
+ roll $18, %esi\r
+ leal (%ecx, %edx), %edi\r
+ roll $7, %edi\r
+ xorl %edi, %ebx\r
+ movl %ebx, 44(%esp)\r
+ movl 12(%esp), %edi\r
+ xorl %esi, %ebp\r
+ leal (%edx, %ebx), %esi\r
+ roll $9, %esi\r
+ xorl %esi, %edi\r
+ movl %edi, 12(%esp)\r
+ movl 48(%esp), %esi\r
+ movl %ebp, 48(%esp)\r
+ movl 64(%esp), %ebp\r
+ addl %edi, %ebx\r
+ roll $13, %ebx\r
+ xorl %ebx, %ecx\r
+ movl 16(%esp), %ebx\r
+ movl %ecx, 16(%esp)\r
+ addl %edi, %ecx\r
+ roll $18, %ecx\r
+ leal (%esi, %ebp), %edi\r
+ roll $7, %edi\r
+ xorl %edi, %ebx\r
+ movl 32(%esp), %edi\r
+ xorl %ecx, %edx\r
+ leal (%ebp, %ebx), %ecx\r
+ roll $9, %ecx\r
+ xorl %ecx, %edi\r
+ movl %edi, 32(%esp)\r
+ movl %ebx, %ecx\r
+ movl %edx, 52(%esp)\r
+ movl 28(%esp), %edx\r
+ addl %edi, %ebx\r
+ roll $13, %ebx\r
+ xorl %ebx, %esi\r
+ movl 40(%esp), %ebx\r
+ movl %esi, 28(%esp)\r
+ addl %edi, %esi\r
+ roll $18, %esi\r
+ leal (%ecx, %edx), %edi\r
+ roll $7, %edi\r
+ xorl %edi, %ebx\r
+ movl %ebx, 40(%esp)\r
+ movl 12(%esp), %edi\r
+ xorl %esi, %ebp\r
+ leal (%edx, %ebx), %esi\r
+ roll $9, %esi\r
+ xorl %esi, %edi\r
+ movl %edi, 12(%esp)\r
+ movl 4(%esp), %esi\r
+ movl %ebp, 4(%esp)\r
+ movl 48(%esp), %ebp\r
+ addl %edi, %ebx\r
+ roll $13, %ebx\r
+ xorl %ebx, %ecx\r
+ movl 16(%esp), %ebx\r
+ movl %ecx, 16(%esp)\r
+ addl %edi, %ecx\r
+ roll $18, %ecx\r
+ leal (%esi, %ebp), %edi\r
+ roll $7, %edi\r
+ xorl %edi, %ebx\r
+ movl %ebx, 48(%esp)\r
+ movl 32(%esp), %edi\r
+ xorl %ecx, %edx\r
+ leal (%ebp, %ebx), %ecx\r
+ roll $9, %ecx\r
+ xorl %ecx, %edi\r
+ movl %edi, 32(%esp)\r
+ movl 24(%esp), %ecx\r
+ movl %edx, 24(%esp)\r
+ movl 52(%esp), %edx\r
+ addl %edi, %ebx\r
+ roll $13, %ebx\r
+ xorl %ebx, %esi\r
+ movl 28(%esp), %ebx\r
+ movl %esi, 28(%esp)\r
+ addl %edi, %esi\r
+ roll $18, %esi\r
+ leal (%ecx, %edx), %edi\r
+ roll $7, %edi\r
+ xorl %edi, %ebx\r
+ movl %ebx, 52(%esp)\r
+ movl 8(%esp), %edi\r
+ xorl %esi, %ebp\r
+ leal (%edx, %ebx), %esi\r
+ roll $9, %esi\r
+ xorl %esi, %edi\r
+ movl %edi, 8(%esp)\r
+ movl 44(%esp), %esi\r
+ movl %ebp, 44(%esp)\r
+ movl 4(%esp), %ebp\r
+ addl %edi, %ebx\r
+ roll $13, %ebx\r
+ xorl %ebx, %ecx\r
+ movl 20(%esp), %ebx\r
+ movl %ecx, 4(%esp)\r
+ addl %edi, %ecx\r
+ roll $18, %ecx\r
+ leal (%esi, %ebp), %edi\r
+ roll $7, %edi\r
+ xorl %edi, %ebx\r
+ movl 36(%esp), %edi\r
+ xorl %ecx, %edx\r
+ leal (%ebp, %ebx), %ecx\r
+ roll $9, %ecx\r
+ xorl %ecx, %edi\r
+ movl %edi, 20(%esp)\r
+ movl %ebx, %ecx\r
+ movl %edx, 36(%esp)\r
+ movl 24(%esp), %edx\r
+ addl %edi, %ebx\r
+ roll $13, %ebx\r
+ xorl %ebx, %esi\r
+ movl 28(%esp), %ebx\r
+ movl %esi, 24(%esp)\r
+ addl %edi, %esi\r
+ roll $18, %esi\r
+ leal (%ecx, %edx), %edi\r
+ roll $7, %edi\r
+ xorl %edi, %ebx\r
+ movl %ebx, 28(%esp)\r
+ xorl %esi, %ebp\r
+ movl 8(%esp), %esi\r
+ leal (%edx, %ebx), %edi\r
+ roll $9, %edi\r
+ xorl %edi, %esi\r
+ movl 40(%esp), %edi\r
+ movl %ebp, 8(%esp)\r
+ movl 44(%esp), %ebp\r
+ movl %esi, 40(%esp)\r
+ addl %esi, %ebx\r
+ roll $13, %ebx\r
+ xorl %ebx, %ecx\r
+ movl 4(%esp), %ebx\r
+ movl %ecx, 44(%esp)\r
+ addl %esi, %ecx\r
+ roll $18, %ecx\r
+ leal (%edi, %ebp), %esi\r
+ roll $7, %esi\r
+ xorl %esi, %ebx\r
+ movl %ebx, 4(%esp)\r
+ movl 20(%esp), %esi\r
+ xorl %ecx, %edx\r
+ leal (%ebp, %ebx), %ecx\r
+ roll $9, %ecx\r
+ xorl %ecx, %esi\r
+ movl %esi, 56(%esp)\r
+ movl 48(%esp), %ecx\r
+ movl %edx, 20(%esp)\r
+ movl 36(%esp), %edx\r
+ addl %esi, %ebx\r
+ roll $13, %ebx\r
+ xorl %ebx, %edi\r
+ movl 24(%esp), %ebx\r
+ movl %edi, 24(%esp)\r
+ addl %esi, %edi\r
+ roll $18, %edi\r
+ leal (%ecx, %edx), %esi\r
+ roll $7, %esi\r
+ xorl %esi, %ebx\r
+ movl %ebx, 60(%esp)\r
+ movl 12(%esp), %esi\r
+ xorl %edi, %ebp\r
+ leal (%edx, %ebx), %edi\r
+ roll $9, %edi\r
+ xorl %edi, %esi\r
+ movl %esi, 12(%esp)\r
+ movl 52(%esp), %edi\r
+ movl %ebp, 36(%esp)\r
+ movl 8(%esp), %ebp\r
+ addl %esi, %ebx\r
+ roll $13, %ebx\r
+ xorl %ebx, %ecx\r
+ movl 16(%esp), %ebx\r
+ movl %ecx, 16(%esp)\r
+ addl %esi, %ecx\r
+ roll $18, %ecx\r
+ leal (%edi, %ebp), %esi\r
+ roll $7, %esi\r
+ xorl %esi, %ebx\r
+ movl 32(%esp), %esi\r
+ xorl %ecx, %edx\r
+ leal (%ebp, %ebx), %ecx\r
+ roll $9, %ecx\r
+ xorl %ecx, %esi\r
+ movl %esi, 32(%esp)\r
+ movl %ebx, %ecx\r
+ movl %edx, 48(%esp)\r
+ movl 20(%esp), %edx\r
+ addl %esi, %ebx\r
+ roll $13, %ebx\r
+ xorl %ebx, %edi\r
+ movl 24(%esp), %ebx\r
+ movl %edi, 20(%esp)\r
+ addl %esi, %edi\r
+ roll $18, %edi\r
+ leal (%ecx, %edx), %esi\r
+ roll $7, %esi\r
+ xorl %esi, %ebx\r
+ movl %ebx, 8(%esp)\r
+ movl 12(%esp), %esi\r
+ xorl %edi, %ebp\r
+ leal (%edx, %ebx), %edi\r
+ roll $9, %edi\r
+ xorl %edi, %esi\r
+ movl %esi, 12(%esp)\r
+ movl 28(%esp), %edi\r
+ movl %ebp, 52(%esp)\r
+ movl 36(%esp), %ebp\r
+ addl %esi, %ebx\r
+ roll $13, %ebx\r
+ xorl %ebx, %ecx\r
+ movl 16(%esp), %ebx\r
+ movl %ecx, 16(%esp)\r
+ addl %esi, %ecx\r
+ roll $18, %ecx\r
+ leal (%edi, %ebp), %esi\r
+ roll $7, %esi\r
+ xorl %esi, %ebx\r
+ movl %ebx, 28(%esp)\r
+ movl 32(%esp), %esi\r
+ xorl %ecx, %edx\r
+ leal (%ebp, %ebx), %ecx\r
+ roll $9, %ecx\r
+ xorl %ecx, %esi\r
+ movl %esi, 32(%esp)\r
+ movl 4(%esp), %ecx\r
+ movl %edx, 4(%esp)\r
+ movl 48(%esp), %edx\r
+ addl %esi, %ebx\r
+ roll $13, %ebx\r
+ xorl %ebx, %edi\r
+ movl 20(%esp), %ebx\r
+ movl %edi, 20(%esp)\r
+ addl %esi, %edi\r
+ roll $18, %edi\r
+ leal (%ecx, %edx), %esi\r
+ roll $7, %esi\r
+ xorl %esi, %ebx\r
+ movl %ebx, 48(%esp)\r
+ movl 40(%esp), %esi\r
+ xorl %edi, %ebp\r
+ leal (%edx, %ebx), %edi\r
+ roll $9, %edi\r
+ xorl %edi, %esi\r
+ movl %esi, 36(%esp)\r
+ movl 60(%esp), %edi\r
+ movl %ebp, 24(%esp)\r
+ movl 52(%esp), %ebp\r
+ addl %esi, %ebx\r
+ roll $13, %ebx\r
+ xorl %ebx, %ecx\r
+ movl 44(%esp), %ebx\r
+ movl %ecx, 40(%esp)\r
+ addl %esi, %ecx\r
+ roll $18, %ecx\r
+ leal (%edi, %ebp), %esi\r
+ roll $7, %esi\r
+ xorl %esi, %ebx\r
+ movl %ebx, 52(%esp)\r
+ movl 56(%esp), %esi\r
+ xorl %ecx, %edx\r
+ leal (%ebp, %ebx), %ecx\r
+ roll $9, %ecx\r
+ xorl %ecx, %esi\r
+ movl %esi, 56(%esp)\r
+ addl %esi, %ebx\r
+ movl %edx, 44(%esp)\r
+ roll $13, %ebx\r
+ xorl %ebx, %edi\r
+ movl %edi, 60(%esp)\r
+ addl %esi, %edi\r
+ roll $18, %edi\r
+ xorl %edi, %ebp\r
+ movl %ebp, 64(%esp)\r
+.endm\r
+\r
+ .text\r
+ .align 32\r
+gen_salsa8_core:\r
+ gen_salsa8_core_quadround\r
+ gen_salsa8_core_quadround\r
+ ret\r
+ \r
+ \r
+ .text\r
+ .align 32\r
+ .globl scrypt_core\r
+ .globl _scrypt_core\r
+scrypt_core:\r
+_scrypt_core:\r
+ pushl %ebx\r
+ pushl %ebp\r
+ pushl %edi\r
+ pushl %esi\r
+ \r
+ # Check for SSE2 availability\r
+ movl $1, %eax\r
+ cpuid\r
+ andl $0x04000000, %edx\r
+ jnz xmm_scrypt_core\r
+ \r
+gen_scrypt_core:\r
+ movl 20(%esp), %edi\r
+ movl 24(%esp), %esi\r
+ subl $72, %esp\r
+ \r
+.macro scrypt_core_macro1a p, q\r
+ movl \p(%edi), %eax\r
+ movl \q(%edi), %edx\r
+ movl %eax, \p(%esi)\r
+ movl %edx, \q(%esi)\r
+ xorl %edx, %eax\r
+ movl %eax, \p(%edi)\r
+ movl %eax, \p(%esp)\r
+.endm\r
+ \r
+.macro scrypt_core_macro1b p, q\r
+ movl \p(%edi), %eax\r
+ xorl \p(%esi, %edx), %eax\r
+ movl \q(%edi), %ebx\r
+ xorl \q(%esi, %edx), %ebx\r
+ movl %ebx, \q(%edi)\r
+ xorl %ebx, %eax\r
+ movl %eax, \p(%edi)\r
+ movl %eax, \p(%esp)\r
+.endm\r
+ \r
+.macro scrypt_core_macro2 p, q\r
+ movl \p(%esp), %eax\r
+ addl \p(%edi), %eax\r
+ movl %eax, \p(%edi)\r
+ xorl \q(%edi), %eax\r
+ movl %eax, \q(%edi)\r
+ movl %eax, \p(%esp)\r
+.endm\r
+ \r
+.macro scrypt_core_macro3 p, q\r
+ movl \p(%esp), %eax\r
+ addl \q(%edi), %eax\r
+ movl %eax, \q(%edi)\r
+.endm\r
+ \r
+ leal 131072(%esi), %ecx\r
+gen_scrypt_core_loop1:\r
+ movl %esi, 64(%esp)\r
+ movl %ecx, 68(%esp)\r
+ \r
+ scrypt_core_macro1a 0, 64\r
+ scrypt_core_macro1a 4, 68\r
+ scrypt_core_macro1a 8, 72\r
+ scrypt_core_macro1a 12, 76\r
+ scrypt_core_macro1a 16, 80\r
+ scrypt_core_macro1a 20, 84\r
+ scrypt_core_macro1a 24, 88\r
+ scrypt_core_macro1a 28, 92\r
+ scrypt_core_macro1a 32, 96\r
+ scrypt_core_macro1a 36, 100\r
+ scrypt_core_macro1a 40, 104\r
+ scrypt_core_macro1a 44, 108\r
+ scrypt_core_macro1a 48, 112\r
+ scrypt_core_macro1a 52, 116\r
+ scrypt_core_macro1a 56, 120\r
+ scrypt_core_macro1a 60, 124\r
+ \r
+ call gen_salsa8_core\r
+ \r
+ movl 92(%esp), %edi\r
+ scrypt_core_macro2 0, 64\r
+ scrypt_core_macro2 4, 68\r
+ scrypt_core_macro2 8, 72\r
+ scrypt_core_macro2 12, 76\r
+ scrypt_core_macro2 16, 80\r
+ scrypt_core_macro2 20, 84\r
+ scrypt_core_macro2 24, 88\r
+ scrypt_core_macro2 28, 92\r
+ scrypt_core_macro2 32, 96\r
+ scrypt_core_macro2 36, 100\r
+ scrypt_core_macro2 40, 104\r
+ scrypt_core_macro2 44, 108\r
+ scrypt_core_macro2 48, 112\r
+ scrypt_core_macro2 52, 116\r
+ scrypt_core_macro2 56, 120\r
+ scrypt_core_macro2 60, 124\r
+ \r
+ call gen_salsa8_core\r
+ \r
+ movl 92(%esp), %edi\r
+ scrypt_core_macro3 0, 64\r
+ scrypt_core_macro3 4, 68\r
+ scrypt_core_macro3 8, 72\r
+ scrypt_core_macro3 12, 76\r
+ scrypt_core_macro3 16, 80\r
+ scrypt_core_macro3 20, 84\r
+ scrypt_core_macro3 24, 88\r
+ scrypt_core_macro3 28, 92\r
+ scrypt_core_macro3 32, 96\r
+ scrypt_core_macro3 36, 100\r
+ scrypt_core_macro3 40, 104\r
+ scrypt_core_macro3 44, 108\r
+ scrypt_core_macro3 48, 112\r
+ scrypt_core_macro3 52, 116\r
+ scrypt_core_macro3 56, 120\r
+ scrypt_core_macro3 60, 124\r
+ \r
+ movl 64(%esp), %esi\r
+ movl 68(%esp), %ecx\r
+ addl $128, %esi\r
+ cmpl %ecx, %esi\r
+ jne gen_scrypt_core_loop1\r
+\r
+ movl 96(%esp), %esi\r
+ movl $1024, %ecx\r
+gen_scrypt_core_loop2:\r
+ movl %ecx, 68(%esp)\r
+ \r
+ movl 64(%edi), %edx\r
+ andl $1023, %edx\r
+ shll $7, %edx\r
+ \r
+ scrypt_core_macro1b 0, 64\r
+ scrypt_core_macro1b 4, 68\r
+ scrypt_core_macro1b 8, 72\r
+ scrypt_core_macro1b 12, 76\r
+ scrypt_core_macro1b 16, 80\r
+ scrypt_core_macro1b 20, 84\r
+ scrypt_core_macro1b 24, 88\r
+ scrypt_core_macro1b 28, 92\r
+ scrypt_core_macro1b 32, 96\r
+ scrypt_core_macro1b 36, 100\r
+ scrypt_core_macro1b 40, 104\r
+ scrypt_core_macro1b 44, 108\r
+ scrypt_core_macro1b 48, 112\r
+ scrypt_core_macro1b 52, 116\r
+ scrypt_core_macro1b 56, 120\r
+ scrypt_core_macro1b 60, 124\r
+ \r
+ call gen_salsa8_core\r
+ \r
+ movl 92(%esp), %edi\r
+ scrypt_core_macro2 0, 64\r
+ scrypt_core_macro2 4, 68\r
+ scrypt_core_macro2 8, 72\r
+ scrypt_core_macro2 12, 76\r
+ scrypt_core_macro2 16, 80\r
+ scrypt_core_macro2 20, 84\r
+ scrypt_core_macro2 24, 88\r
+ scrypt_core_macro2 28, 92\r
+ scrypt_core_macro2 32, 96\r
+ scrypt_core_macro2 36, 100\r
+ scrypt_core_macro2 40, 104\r
+ scrypt_core_macro2 44, 108\r
+ scrypt_core_macro2 48, 112\r
+ scrypt_core_macro2 52, 116\r
+ scrypt_core_macro2 56, 120\r
+ scrypt_core_macro2 60, 124\r
+ \r
+ call gen_salsa8_core\r
+ \r
+ movl 92(%esp), %edi\r
+ movl 96(%esp), %esi\r
+ scrypt_core_macro3 0, 64\r
+ scrypt_core_macro3 4, 68\r
+ scrypt_core_macro3 8, 72\r
+ scrypt_core_macro3 12, 76\r
+ scrypt_core_macro3 16, 80\r
+ scrypt_core_macro3 20, 84\r
+ scrypt_core_macro3 24, 88\r
+ scrypt_core_macro3 28, 92\r
+ scrypt_core_macro3 32, 96\r
+ scrypt_core_macro3 36, 100\r
+ scrypt_core_macro3 40, 104\r
+ scrypt_core_macro3 44, 108\r
+ scrypt_core_macro3 48, 112\r
+ scrypt_core_macro3 52, 116\r
+ scrypt_core_macro3 56, 120\r
+ scrypt_core_macro3 60, 124\r
+ \r
+ movl 68(%esp), %ecx\r
+ subl $1, %ecx\r
+ ja gen_scrypt_core_loop2\r
+ \r
+ addl $72, %esp\r
+ popl %esi\r
+ popl %edi\r
+ popl %ebp\r
+ popl %ebx\r
+ ret\r
+\r
+\r
+.macro xmm_salsa8_core_doubleround\r
+ movdqa %xmm1, %xmm4\r
+ paddd %xmm0, %xmm4\r
+ movdqa %xmm4, %xmm5\r
+ pslld $7, %xmm4\r
+ psrld $25, %xmm5\r
+ pxor %xmm4, %xmm3\r
+ pxor %xmm5, %xmm3\r
+ movdqa %xmm0, %xmm4\r
+ \r
+ paddd %xmm3, %xmm4\r
+ movdqa %xmm4, %xmm5\r
+ pslld $9, %xmm4\r
+ psrld $23, %xmm5\r
+ pxor %xmm4, %xmm2\r
+ movdqa %xmm3, %xmm4\r
+ pshufd $0x93, %xmm3, %xmm3\r
+ pxor %xmm5, %xmm2\r
+ \r
+ paddd %xmm2, %xmm4\r
+ movdqa %xmm4, %xmm5\r
+ pslld $13, %xmm4\r
+ psrld $19, %xmm5\r
+ pxor %xmm4, %xmm1\r
+ movdqa %xmm2, %xmm4\r
+ pshufd $0x4e, %xmm2, %xmm2\r
+ pxor %xmm5, %xmm1\r
+ \r
+ paddd %xmm1, %xmm4\r
+ movdqa %xmm4, %xmm5\r
+ pslld $18, %xmm4\r
+ psrld $14, %xmm5\r
+ pxor %xmm4, %xmm0\r
+ pshufd $0x39, %xmm1, %xmm1\r
+ pxor %xmm5, %xmm0\r
+ movdqa %xmm3, %xmm4\r
+ \r
+ paddd %xmm0, %xmm4\r
+ movdqa %xmm4, %xmm5\r
+ pslld $7, %xmm4\r
+ psrld $25, %xmm5\r
+ pxor %xmm4, %xmm1\r
+ pxor %xmm5, %xmm1\r
+ movdqa %xmm0, %xmm4\r
+ \r
+ paddd %xmm1, %xmm4\r
+ movdqa %xmm4, %xmm5\r
+ pslld $9, %xmm4\r
+ psrld $23, %xmm5\r
+ pxor %xmm4, %xmm2\r
+ movdqa %xmm1, %xmm4\r
+ pshufd $0x93, %xmm1, %xmm1\r
+ pxor %xmm5, %xmm2\r
+ \r
+ paddd %xmm2, %xmm4\r
+ movdqa %xmm4, %xmm5\r
+ pslld $13, %xmm4\r
+ psrld $19, %xmm5\r
+ pxor %xmm4, %xmm3\r
+ movdqa %xmm2, %xmm4\r
+ pshufd $0x4e, %xmm2, %xmm2\r
+ pxor %xmm5, %xmm3\r
+ \r
+ paddd %xmm3, %xmm4\r
+ movdqa %xmm4, %xmm5\r
+ pslld $18, %xmm4\r
+ psrld $14, %xmm5\r
+ pxor %xmm4, %xmm0\r
+ pshufd $0x39, %xmm3, %xmm3\r
+ pxor %xmm5, %xmm0\r
+.endm\r
+\r
+.macro xmm_salsa8_core\r
+ xmm_salsa8_core_doubleround\r
+ xmm_salsa8_core_doubleround\r
+ xmm_salsa8_core_doubleround\r
+ xmm_salsa8_core_doubleround\r
+.endm\r
+ \r
+ .align 32\r
+xmm_scrypt_core:\r
+ movl 20(%esp), %edi\r
+ movl 24(%esp), %esi\r
+ movl %esp, %ebp\r
+ subl $128, %esp\r
+ andl $-16, %esp\r
+ \r
+ # shuffle 1st block to (%esp)\r
+ movl 60(%edi), %edx\r
+ movl 44(%edi), %ecx\r
+ movl 28(%edi), %ebx\r
+ movl 12(%edi), %eax\r
+ movl %edx, 12(%esp)\r
+ movl %ecx, 28(%esp)\r
+ movl %ebx, 44(%esp)\r
+ movl %eax, 60(%esp)\r
+ movl 40(%edi), %ecx\r
+ movl 24(%edi), %ebx\r
+ movl 8(%edi), %eax\r
+ movl 56(%edi), %edx\r
+ movl %ecx, 8(%esp)\r
+ movl %ebx, 24(%esp)\r
+ movl %eax, 40(%esp)\r
+ movl %edx, 56(%esp)\r
+ movl 20(%edi), %ebx\r
+ movl 4(%edi), %eax\r
+ movl 52(%edi), %edx\r
+ movl 36(%edi), %ecx\r
+ movl %ebx, 4(%esp)\r
+ movl %eax, 20(%esp)\r
+ movl %edx, 36(%esp)\r
+ movl %ecx, 52(%esp)\r
+ movl 0(%edi), %eax\r
+ movl 48(%edi), %edx\r
+ movl 32(%edi), %ecx\r
+ movl 16(%edi), %ebx\r
+ movl %eax, 0(%esp)\r
+ movl %edx, 16(%esp)\r
+ movl %ecx, 32(%esp)\r
+ movl %ebx, 48(%esp)\r
+ \r
+ # shuffle 2nd block to 64(%esp)\r
+ movl 124(%edi), %edx\r
+ movl 108(%edi), %ecx\r
+ movl 92(%edi), %ebx\r
+ movl 76(%edi), %eax\r
+ movl %edx, 76(%esp)\r
+ movl %ecx, 92(%esp)\r
+ movl %ebx, 108(%esp)\r
+ movl %eax, 124(%esp)\r
+ movl 104(%edi), %ecx\r
+ movl 88(%edi), %ebx\r
+ movl 72(%edi), %eax\r
+ movl 120(%edi), %edx\r
+ movl %ecx, 72(%esp)\r
+ movl %ebx, 88(%esp)\r
+ movl %eax, 104(%esp)\r
+ movl %edx, 120(%esp)\r
+ movl 84(%edi), %ebx\r
+ movl 68(%edi), %eax\r
+ movl 116(%edi), %edx\r
+ movl 100(%edi), %ecx\r
+ movl %ebx, 68(%esp)\r
+ movl %eax, 84(%esp)\r
+ movl %edx, 100(%esp)\r
+ movl %ecx, 116(%esp)\r
+ movl 64(%edi), %eax\r
+ movl 112(%edi), %edx\r
+ movl 96(%edi), %ecx\r
+ movl 80(%edi), %ebx\r
+ movl %eax, 64(%esp)\r
+ movl %edx, 80(%esp)\r
+ movl %ecx, 96(%esp)\r
+ movl %ebx, 112(%esp)\r
+ \r
+ movl %esi, %edx\r
+ leal 131072(%esi), %ecx\r
+xmm_scrypt_core_loop1:\r
+ movdqa 0(%esp), %xmm0\r
+ movdqa 16(%esp), %xmm1\r
+ movdqa 32(%esp), %xmm2\r
+ movdqa 48(%esp), %xmm3\r
+ movdqa 64(%esp), %xmm4\r
+ movdqa 80(%esp), %xmm5\r
+ movdqa 96(%esp), %xmm6\r
+ movdqa 112(%esp), %xmm7\r
+ movdqa %xmm0, 0(%edx)\r
+ movdqa %xmm1, 16(%edx)\r
+ movdqa %xmm2, 32(%edx)\r
+ movdqa %xmm3, 48(%edx)\r
+ movdqa %xmm4, 64(%edx)\r
+ movdqa %xmm5, 80(%edx)\r
+ movdqa %xmm6, 96(%edx)\r
+ movdqa %xmm7, 112(%edx)\r
+ \r
+ pxor %xmm4, %xmm0\r
+ pxor %xmm5, %xmm1\r
+ pxor %xmm6, %xmm2\r
+ pxor %xmm7, %xmm3\r
+ movdqa %xmm0, 0(%esp)\r
+ movdqa %xmm1, 16(%esp)\r
+ movdqa %xmm2, 32(%esp)\r
+ movdqa %xmm3, 48(%esp)\r
+ xmm_salsa8_core\r
+ paddd 0(%esp), %xmm0\r
+ paddd 16(%esp), %xmm1\r
+ paddd 32(%esp), %xmm2\r
+ paddd 48(%esp), %xmm3\r
+ movdqa %xmm0, 0(%esp)\r
+ movdqa %xmm1, 16(%esp)\r
+ movdqa %xmm2, 32(%esp)\r
+ movdqa %xmm3, 48(%esp)\r
+ \r
+ pxor 64(%esp), %xmm0\r
+ pxor 80(%esp), %xmm1\r
+ pxor 96(%esp), %xmm2\r
+ pxor 112(%esp), %xmm3\r
+ movdqa %xmm0, 64(%esp)\r
+ movdqa %xmm1, 80(%esp)\r
+ movdqa %xmm2, 96(%esp)\r
+ movdqa %xmm3, 112(%esp)\r
+ xmm_salsa8_core\r
+ paddd 64(%esp), %xmm0\r
+ paddd 80(%esp), %xmm1\r
+ paddd 96(%esp), %xmm2\r
+ paddd 112(%esp), %xmm3\r
+ movdqa %xmm0, 64(%esp)\r
+ movdqa %xmm1, 80(%esp)\r
+ movdqa %xmm2, 96(%esp)\r
+ movdqa %xmm3, 112(%esp)\r
+ \r
+ addl $128, %edx\r
+ cmpl %ecx, %edx\r
+ jne xmm_scrypt_core_loop1\r
+ \r
+ movl $1024, %ecx\r
+xmm_scrypt_core_loop2:\r
+ movdqa 0(%esp), %xmm0\r
+ movdqa 16(%esp), %xmm1\r
+ movdqa 32(%esp), %xmm2\r
+ movdqa 48(%esp), %xmm3\r
+ movdqa 64(%esp), %xmm4\r
+ movdqa 80(%esp), %xmm5\r
+ movdqa 96(%esp), %xmm6\r
+ movdqa 112(%esp), %xmm7\r
+ movd %xmm4, %edx\r
+ andl $1023, %edx\r
+ shll $7, %edx\r
+ pxor 0(%esi, %edx), %xmm0\r
+ pxor 16(%esi, %edx), %xmm1\r
+ pxor 32(%esi, %edx), %xmm2\r
+ pxor 48(%esi, %edx), %xmm3\r
+ pxor 64(%esi, %edx), %xmm4\r
+ pxor 80(%esi, %edx), %xmm5\r
+ pxor 96(%esi, %edx), %xmm6\r
+ pxor 112(%esi, %edx), %xmm7\r
+ movdqa %xmm4, 64(%esp)\r
+ movdqa %xmm5, 80(%esp)\r
+ movdqa %xmm6, 96(%esp)\r
+ movdqa %xmm7, 112(%esp)\r
+ \r
+ pxor %xmm4, %xmm0\r
+ pxor %xmm5, %xmm1\r
+ pxor %xmm6, %xmm2\r
+ pxor %xmm7, %xmm3\r
+ movdqa %xmm0, 0(%esp)\r
+ movdqa %xmm1, 16(%esp)\r
+ movdqa %xmm2, 32(%esp)\r
+ movdqa %xmm3, 48(%esp)\r
+ xmm_salsa8_core\r
+ paddd 0(%esp), %xmm0\r
+ paddd 16(%esp), %xmm1\r
+ paddd 32(%esp), %xmm2\r
+ paddd 48(%esp), %xmm3\r
+ movdqa %xmm0, 0(%esp)\r
+ movdqa %xmm1, 16(%esp)\r
+ movdqa %xmm2, 32(%esp)\r
+ movdqa %xmm3, 48(%esp)\r
+ \r
+ pxor 64(%esp), %xmm0\r
+ pxor 80(%esp), %xmm1\r
+ pxor 96(%esp), %xmm2\r
+ pxor 112(%esp), %xmm3\r
+ movdqa %xmm0, 64(%esp)\r
+ movdqa %xmm1, 80(%esp)\r
+ movdqa %xmm2, 96(%esp)\r
+ movdqa %xmm3, 112(%esp)\r
+ xmm_salsa8_core\r
+ paddd 64(%esp), %xmm0\r
+ paddd 80(%esp), %xmm1\r
+ paddd 96(%esp), %xmm2\r
+ paddd 112(%esp), %xmm3\r
+ movdqa %xmm0, 64(%esp)\r
+ movdqa %xmm1, 80(%esp)\r
+ movdqa %xmm2, 96(%esp)\r
+ movdqa %xmm3, 112(%esp)\r
+ \r
+ subl $1, %ecx\r
+ ja xmm_scrypt_core_loop2\r
+ \r
+ # re-shuffle 1st block back\r
+ movl 60(%esp), %edx\r
+ movl 44(%esp), %ecx\r
+ movl 28(%esp), %ebx\r
+ movl 12(%esp), %eax\r
+ movl %edx, 12(%edi)\r
+ movl %ecx, 28(%edi)\r
+ movl %ebx, 44(%edi)\r
+ movl %eax, 60(%edi)\r
+ movl 40(%esp), %ecx\r
+ movl 24(%esp), %ebx\r
+ movl 8(%esp), %eax\r
+ movl 56(%esp), %edx\r
+ movl %ecx, 8(%edi)\r
+ movl %ebx, 24(%edi)\r
+ movl %eax, 40(%edi)\r
+ movl %edx, 56(%edi)\r
+ movl 20(%esp), %ebx\r
+ movl 4(%esp), %eax\r
+ movl 52(%esp), %edx\r
+ movl 36(%esp), %ecx\r
+ movl %ebx, 4(%edi)\r
+ movl %eax, 20(%edi)\r
+ movl %edx, 36(%edi)\r
+ movl %ecx, 52(%edi)\r
+ movl 0(%esp), %eax\r
+ movl 48(%esp), %edx\r
+ movl 32(%esp), %ecx\r
+ movl 16(%esp), %ebx\r
+ movl %eax, 0(%edi)\r
+ movl %edx, 16(%edi)\r
+ movl %ecx, 32(%edi)\r
+ movl %ebx, 48(%edi)\r
+ \r
+ # re-shuffle 2nd block back\r
+ movl 124(%esp), %edx\r
+ movl 108(%esp), %ecx\r
+ movl 92(%esp), %ebx\r
+ movl 76(%esp), %eax\r
+ movl %edx, 76(%edi)\r
+ movl %ecx, 92(%edi)\r
+ movl %ebx, 108(%edi)\r
+ movl %eax, 124(%edi)\r
+ movl 104(%esp), %ecx\r
+ movl 88(%esp), %ebx\r
+ movl 72(%esp), %eax\r
+ movl 120(%esp), %edx\r
+ movl %ecx, 72(%edi)\r
+ movl %ebx, 88(%edi)\r
+ movl %eax, 104(%edi)\r
+ movl %edx, 120(%edi)\r
+ movl 84(%esp), %ebx\r
+ movl 68(%esp), %eax\r
+ movl 116(%esp), %edx\r
+ movl 100(%esp), %ecx\r
+ movl %ebx, 68(%edi)\r
+ movl %eax, 84(%edi)\r
+ movl %edx, 100(%edi)\r
+ movl %ecx, 116(%edi)\r
+ movl 64(%esp), %eax\r
+ movl 112(%esp), %edx\r
+ movl 96(%esp), %ecx\r
+ movl 80(%esp), %ebx\r
+ movl %eax, 64(%edi)\r
+ movl %edx, 80(%edi)\r
+ movl %ecx, 96(%edi)\r
+ movl %ebx, 112(%edi)\r
+ \r
+ movl %ebp, %esp\r
+ popl %esi\r
+ popl %edi\r
+ popl %ebp\r
+ popl %ebx\r
+ ret\r
+\r
+#endif\r