.type __rsaz_512_reduce,\@abi-omnipotent
.align 32
__rsaz_512_reduce:
+.cfi_startproc
movq %r8, %rbx
imulq 128+8(%rsp), %rbx
movq 0(%rbp), %rax
jne .Lreduction_loop
ret
+.cfi_endproc
.size __rsaz_512_reduce,.-__rsaz_512_reduce
___
}
.type __rsaz_512_reducex,\@abi-omnipotent
.align 32
__rsaz_512_reducex:
+.cfi_startproc
#movq 128+8(%rsp), %rdx # pull $n0
imulq %r8, %rdx
xorq %rsi, %rsi # cf=0,of=0
jne .Lreduction_loopx
ret
+.cfi_endproc
.size __rsaz_512_reducex,.-__rsaz_512_reducex
___
}
.type __rsaz_512_subtract,\@abi-omnipotent
.align 32
__rsaz_512_subtract:
+.cfi_startproc
movq %r8, ($out)
movq %r9, 8($out)
movq %r10, 16($out)
movq %r15, 56($out)
ret
+.cfi_endproc
.size __rsaz_512_subtract,.-__rsaz_512_subtract
___
}
.type __rsaz_512_mul,\@abi-omnipotent
.align 32
__rsaz_512_mul:
+.cfi_startproc
leaq 8(%rsp), %rdi
movq ($ap), %rax
movq %r15, 56(%rdi)
ret
+.cfi_endproc
.size __rsaz_512_mul,.-__rsaz_512_mul
___
}
.type __rsaz_512_mulx,\@abi-omnipotent
.align 32
__rsaz_512_mulx:
+.cfi_startproc
mulx ($ap), %rbx, %r8 # initial %rdx preloaded by caller
mov \$-6, %rcx
mov %r15, 8+64+56(%rsp)
ret
+.cfi_endproc
.size __rsaz_512_mulx,.-__rsaz_512_mulx
___
}
.type rsaz_512_scatter4,\@abi-omnipotent
.align 16
rsaz_512_scatter4:
+.cfi_startproc
leaq ($out,$power,8), $out
movl \$8, %r9d
jmp .Loop_scatter
decl %r9d
jnz .Loop_scatter
ret
+.cfi_endproc
.size rsaz_512_scatter4,.-rsaz_512_scatter4
.globl rsaz_512_gather4
.type rsaz_512_gather4,\@abi-omnipotent
.align 16
rsaz_512_gather4:
+.cfi_startproc
___
$code.=<<___ if ($win64);
.LSEH_begin_rsaz_512_gather4:
$code.=<<___;
ret
.LSEH_end_rsaz_512_gather4:
+.cfi_endproc
.size rsaz_512_gather4,.-rsaz_512_gather4
.align 64