3 # ====================================================================
4 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
5 # project. The module is, however, dual licensed under OpenSSL and
6 # CRYPTOGAMS licenses depending on where you obtain it. For further
7 # details see http://www.openssl.org/~appro/cryptogams/.
8 # ====================================================================
11 # AES-NI-CTR+GHASH stitch.
15 # OpenSSL GCM implementation is organized in such way that its
16 # performance is rather close to the sum of its streamed components,
17 # in the context parallelized AES-NI CTR and modulo-scheduled
18 # PCLMULQDQ-enabled GHASH. Unfortunately, as no stitch implementation
19 # was observed to perform significantly better than the sum of the
20 # components on contemporary CPUs, the effort was deemed impossible to
21 # justify. This module is based on combination of Intel submissions,
22 # [1] and [2], with MOVBE twist suggested by Ilya Albrekht and Max
23 # Locktyukhin of Intel Corp. who verified that it reduces shuffles
24 # pressure with notable relative improvement, achieving 1.0 cycle per
25 # byte processed with 128-bit key on Haswell processor.
27 # [1] http://rt.openssl.org/Ticket/Display.html?id=2900&user=guest&pass=guest
28 # [2] http://www.intel.com/content/dam/www/public/us/en/documents/software-support/enabling-high-performance-gcm.pdf
32 if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
34 $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
36 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
37 ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
38 ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
39 die "can't locate x86_64-xlate.pl";
41 if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
42 =~ /GNU assembler version ([2-9]\.[0-9]+)/) {
43 $avx = ($1>=2.19) + ($1>=2.22);
46 if (!$avx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) &&
47 `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/) {
48 $avx = ($1>=2.09) + ($1>=2.10);
51 if (!$avx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
52 `ml64 2>&1` =~ /Version ([0-9]+)\./) {
53 $avx = ($1>=10) + ($1>=11);
56 if (!$avx && `$ENV{CC} -v 2>&1` =~ /LLVM ([3-9]\.[0-9]+)/) {
57 $avx = ($1>=3.0) + ($1>=3.1);
60 open OUT,"| \"$^X\" $xlate $flavour $output";
65 ($inp,$out,$len,$key,$ivp,$Xip)=("%rdi","%rsi","%rdx","%rcx","%r8","%r9");
68 $Z0,$Z1,$Z2,$Z3,$Xi) = map("%xmm$_",(0..8));
70 ($inout0,$inout1,$inout2,$inout3,$inout4,$inout5,$rndkey) = map("%xmm$_",(9..15));
72 ($counter,$rounds,$ret,$const,$in0,$end0)=("%ebx","%ebp","%r10","%r11","%r14","%r15");
77 .type _aesni_ctr32_ghash_6x,\@abi-omnipotent
79 _aesni_ctr32_ghash_6x:
80 vmovdqu 0x20($const),$T2 # borrow $T2, .Lone_msb
82 vpxor $Z0,$Z0,$Z0 # $Z0 = 0
83 vmovdqu 0x00-0x80($key),$rndkey
84 vpaddb $T2,$T1,$inout1
85 vpaddb $T2,$inout1,$inout2
86 vpaddb $T2,$inout2,$inout3
87 vpaddb $T2,$inout3,$inout4
88 vpaddb $T2,$inout4,$inout5
89 vpxor $rndkey,$T1,$inout0
90 vmovdqu $Z0,16+8(%rsp) # "$Z3" = 0
96 jc .Lhandle_ctr32 # discard $inout[1-5]?
97 vmovdqu 0x00-0x20($Xip),$Hkey # $Hkey^1
98 vpaddb $T2,$inout5,$T1 # next counter value
99 vpxor $rndkey,$inout1,$inout1
100 vpxor $rndkey,$inout2,$inout2
103 vmovdqu $T1,($ivp) # save next counter value
104 vpclmulqdq \$0x10,$Hkey,$Z3,$Z1
105 vpxor $rndkey,$inout3,$inout3
106 vmovups 0x10-0x80($key),$T2 # borrow $T2 for $rndkey
107 vpclmulqdq \$0x01,$Hkey,$Z3,$Z2
111 vaesenc $T2,$inout0,$inout0
112 vmovdqu 0x30+8(%rsp),$Ii # I[4]
113 vpxor $rndkey,$inout4,$inout4
114 vpclmulqdq \$0x00,$Hkey,$Z3,$T1
115 vaesenc $T2,$inout1,$inout1
116 vpxor $rndkey,$inout5,$inout5
118 vpclmulqdq \$0x11,$Hkey,$Z3,$Z3
119 vaesenc $T2,$inout2,$inout2
120 vmovdqu 0x10-0x20($Xip),$Hkey # $Hkey^2
122 vaesenc $T2,$inout3,$inout3
124 vpclmulqdq \$0x00,$Hkey,$Ii,$Z1
125 vpxor $Z0,$Xi,$Xi # modulo-scheduled
126 vaesenc $T2,$inout4,$inout4
129 vmovups 0x20-0x80($key),$rndkey
130 vpclmulqdq \$0x10,$Hkey,$Ii,$T1
131 vaesenc $T2,$inout5,$inout5
133 vpclmulqdq \$0x01,$Hkey,$Ii,$T2
135 vaesenc $rndkey,$inout0,$inout0
136 vpxor 16+8(%rsp),$Xi,$Xi # modulo-scheduled [vpxor $Z3,$Xi,$Xi]
137 vpclmulqdq \$0x11,$Hkey,$Ii,$Hkey
138 vmovdqu 0x40+8(%rsp),$Ii # I[3]
139 vaesenc $rndkey,$inout1,$inout1
140 movbe 0x58($in0),%r13
141 vaesenc $rndkey,$inout2,$inout2
142 movbe 0x50($in0),%r12
143 vaesenc $rndkey,$inout3,$inout3
144 mov %r13,0x20+8(%rsp)
145 vaesenc $rndkey,$inout4,$inout4
146 mov %r12,0x28+8(%rsp)
147 vmovdqu 0x30-0x20($Xip),$Z1 # borrow $Z1 for $Hkey^3
148 vaesenc $rndkey,$inout5,$inout5
150 vmovups 0x30-0x80($key),$rndkey
152 vpclmulqdq \$0x00,$Z1,$Ii,$T1
153 vaesenc $rndkey,$inout0,$inout0
155 vpclmulqdq \$0x10,$Z1,$Ii,$T2
156 vaesenc $rndkey,$inout1,$inout1
158 vpclmulqdq \$0x01,$Z1,$Ii,$Hkey
159 vaesenc $rndkey,$inout2,$inout2
160 vpclmulqdq \$0x11,$Z1,$Ii,$Z1
161 vmovdqu 0x50+8(%rsp),$Ii # I[2]
162 vaesenc $rndkey,$inout3,$inout3
163 vaesenc $rndkey,$inout4,$inout4
165 vmovdqu 0x40-0x20($Xip),$T1 # borrow $T1 for $Hkey^4
166 vaesenc $rndkey,$inout5,$inout5
168 vmovups 0x40-0x80($key),$rndkey
170 vpclmulqdq \$0x00,$T1,$Ii,$T2
171 vaesenc $rndkey,$inout0,$inout0
173 vpclmulqdq \$0x10,$T1,$Ii,$Hkey
174 vaesenc $rndkey,$inout1,$inout1
175 movbe 0x48($in0),%r13
177 vpclmulqdq \$0x01,$T1,$Ii,$Z1
178 vaesenc $rndkey,$inout2,$inout2
179 movbe 0x40($in0),%r12
180 vpclmulqdq \$0x11,$T1,$Ii,$T1
181 vmovdqu 0x60+8(%rsp),$Ii # I[1]
182 vaesenc $rndkey,$inout3,$inout3
183 mov %r13,0x30+8(%rsp)
184 vaesenc $rndkey,$inout4,$inout4
185 mov %r12,0x38+8(%rsp)
187 vmovdqu 0x60-0x20($Xip),$T2 # borrow $T2 for $Hkey^5
188 vaesenc $rndkey,$inout5,$inout5
190 vmovups 0x50-0x80($key),$rndkey
192 vpclmulqdq \$0x00,$T2,$Ii,$Hkey
193 vaesenc $rndkey,$inout0,$inout0
195 vpclmulqdq \$0x10,$T2,$Ii,$Z1
196 vaesenc $rndkey,$inout1,$inout1
197 movbe 0x38($in0),%r13
199 vpclmulqdq \$0x01,$T2,$Ii,$T1
200 vpxor 0x70+8(%rsp),$Xi,$Xi # accumulate I[0]
201 vaesenc $rndkey,$inout2,$inout2
202 movbe 0x30($in0),%r12
203 vpclmulqdq \$0x11,$T2,$Ii,$T2
204 vaesenc $rndkey,$inout3,$inout3
205 mov %r13,0x40+8(%rsp)
206 vaesenc $rndkey,$inout4,$inout4
207 mov %r12,0x48+8(%rsp)
209 vmovdqu 0x70-0x20($Xip),$Hkey # $Hkey^6
210 vaesenc $rndkey,$inout5,$inout5
212 vmovups 0x60-0x80($key),$rndkey
214 vpclmulqdq \$0x10,$Hkey,$Xi,$Z1
215 vaesenc $rndkey,$inout0,$inout0
217 vpclmulqdq \$0x01,$Hkey,$Xi,$T1
218 vaesenc $rndkey,$inout1,$inout1
219 movbe 0x28($in0),%r13
221 vpclmulqdq \$0x00,$Hkey,$Xi,$T2
222 vaesenc $rndkey,$inout2,$inout2
223 movbe 0x20($in0),%r12
224 vpclmulqdq \$0x11,$Hkey,$Xi,$Xi
225 vaesenc $rndkey,$inout3,$inout3
226 mov %r13,0x50+8(%rsp)
227 vaesenc $rndkey,$inout4,$inout4
228 mov %r12,0x58+8(%rsp)
230 vaesenc $rndkey,$inout5,$inout5
233 vmovups 0x70-0x80($key),$rndkey
236 vmovdqu 0x10($const),$Hkey # .Lpoly
238 vaesenc $rndkey,$inout0,$inout0
240 vaesenc $rndkey,$inout1,$inout1
242 movbe 0x18($in0),%r13
243 vaesenc $rndkey,$inout2,$inout2
244 movbe 0x10($in0),%r12
245 vpalignr \$8,$Z0,$Z0,$Ii # 1st phase
246 vpclmulqdq \$0x10,$Hkey,$Z0,$Z0
247 mov %r13,0x60+8(%rsp)
248 vaesenc $rndkey,$inout3,$inout3
249 mov %r12,0x68+8(%rsp)
250 vaesenc $rndkey,$inout4,$inout4
251 vmovups 0x80-0x80($key),$T1 # borrow $T1 for $rndkey
252 vaesenc $rndkey,$inout5,$inout5
254 vaesenc $T1,$inout0,$inout0
255 vmovups 0x90-0x80($key),$rndkey
256 vaesenc $T1,$inout1,$inout1
258 vaesenc $T1,$inout2,$inout2
260 vaesenc $T1,$inout3,$inout3
262 movbe 0x08($in0),%r13
263 vaesenc $T1,$inout4,$inout4
264 movbe 0x00($in0),%r12
265 vaesenc $T1,$inout5,$inout5
266 vmovups 0xa0-0x80($key),$T1
268 jb .Lenc_tail # 128-bit key
270 vaesenc $rndkey,$inout0,$inout0
271 vaesenc $rndkey,$inout1,$inout1
272 vaesenc $rndkey,$inout2,$inout2
273 vaesenc $rndkey,$inout3,$inout3
274 vaesenc $rndkey,$inout4,$inout4
275 vaesenc $rndkey,$inout5,$inout5
277 vaesenc $T1,$inout0,$inout0
278 vaesenc $T1,$inout1,$inout1
279 vaesenc $T1,$inout2,$inout2
280 vaesenc $T1,$inout3,$inout3
281 vaesenc $T1,$inout4,$inout4
282 vmovups 0xb0-0x80($key),$rndkey
283 vaesenc $T1,$inout5,$inout5
284 vmovups 0xc0-0x80($key),$T1
285 je .Lenc_tail # 192-bit key
287 vaesenc $rndkey,$inout0,$inout0
288 vaesenc $rndkey,$inout1,$inout1
289 vaesenc $rndkey,$inout2,$inout2
290 vaesenc $rndkey,$inout3,$inout3
291 vaesenc $rndkey,$inout4,$inout4
292 vaesenc $rndkey,$inout5,$inout5
294 vaesenc $T1,$inout0,$inout0
295 vaesenc $T1,$inout1,$inout1
296 vaesenc $T1,$inout2,$inout2
297 vaesenc $T1,$inout3,$inout3
298 vaesenc $T1,$inout4,$inout4
299 vmovups 0xd0-0x80($key),$rndkey
300 vaesenc $T1,$inout5,$inout5
301 vmovups 0xe0-0x80($key),$T1
302 jmp .Lenc_tail # 256-bit key
306 vmovdqu ($const),$Ii # borrow $Ii for .Lbswap_mask
307 vpshufb $Ii,$T1,$Z2 # byte-swap counter
308 vmovdqu 0x30($const),$Z1 # borrow $Z1, .Ltwo_lsb
309 vpaddd 0x40($const),$Z2,$inout1 # .Lone_lsb
310 vpaddd $Z1,$Z2,$inout2
311 vmovdqu 0x00-0x20($Xip),$Hkey # $Hkey^1
312 vpaddd $Z1,$inout1,$inout3
313 vpshufb $Ii,$inout1,$inout1
314 vpaddd $Z1,$inout2,$inout4
315 vpshufb $Ii,$inout2,$inout2
316 vpxor $rndkey,$inout1,$inout1
317 vpaddd $Z1,$inout3,$inout5
318 vpshufb $Ii,$inout3,$inout3
319 vpxor $rndkey,$inout2,$inout2
320 vpaddd $Z1,$inout4,$T1 # byte-swapped next counter value
321 vpshufb $Ii,$inout4,$inout4
322 vpshufb $Ii,$inout5,$inout5
323 vpshufb $Ii,$T1,$T1 # next counter value
328 vaesenc $rndkey,$inout0,$inout0
329 vmovdqu $Z3,16+8(%rsp) # postpone vpxor $Z3,$Xi,$Xi
330 vpalignr \$8,$Z0,$Z0,$Xi # 2nd phase
331 vaesenc $rndkey,$inout1,$inout1
332 vpclmulqdq \$0x10,$Hkey,$Z0,$Z0
333 vpxor 0x00($inp),$T1,$T2
334 vaesenc $rndkey,$inout2,$inout2
335 vpxor 0x10($inp),$T1,$Ii
336 vaesenc $rndkey,$inout3,$inout3
337 vpxor 0x20($inp),$T1,$Z1
338 vaesenc $rndkey,$inout4,$inout4
339 vpxor 0x30($inp),$T1,$Z2
340 vaesenc $rndkey,$inout5,$inout5
341 vpxor 0x40($inp),$T1,$Z3
342 vpxor 0x50($inp),$T1,$Hkey
343 vmovdqu ($ivp),$T1 # load next counter value
345 vaesenclast $T2,$inout0,$inout0
346 vmovdqu 0x20($const),$T2 # borrow $T2, .Lone_msb
347 vaesenclast $Ii,$inout1,$inout1
349 mov %r13,0x70+8(%rsp)
351 vaesenclast $Z1,$inout2,$inout2
353 mov %r12,0x78+8(%rsp)
355 vmovdqu 0x00-0x80($key),$rndkey
356 vaesenclast $Z2,$inout3,$inout3
358 vaesenclast $Z3, $inout4,$inout4
360 vaesenclast $Hkey,$inout5,$inout5
367 vmovups $inout0,-0x60($out) # save output
368 vpxor $rndkey,$T1,$inout0
369 vmovups $inout1,-0x50($out)
370 vmovdqa $Ii,$inout1 # 0 latency
371 vmovups $inout2,-0x40($out)
372 vmovdqa $Z1,$inout2 # 0 latency
373 vmovups $inout3,-0x30($out)
374 vmovdqa $Z2,$inout3 # 0 latency
375 vmovups $inout4,-0x20($out)
376 vmovdqa $Z3,$inout4 # 0 latency
377 vmovups $inout5,-0x10($out)
378 vmovdqa $Hkey,$inout5 # 0 latency
379 vmovdqu 0x20+8(%rsp),$Z3 # I[5]
383 vpxor 16+8(%rsp),$Xi,$Xi # modulo-scheduled
384 vpxor $Z0,$Xi,$Xi # modulo-scheduled
387 .size _aesni_ctr32_ghash_6x,.-_aesni_ctr32_ghash_6x
389 ######################################################################
391 # size_t aesni_gcm_[en|de]crypt(const void *inp, void *out, size_t len,
392 # const AES_KEY *key, unsigned char iv[16],
393 # struct { u128 Xi,H,Htbl[9]; } *Xip);
395 .globl aesni_gcm_decrypt
396 .type aesni_gcm_decrypt,\@function,6
400 cmp \$0x60,$len # minimal accepted length
403 lea (%rsp),%rax # save stack pointer
411 $code.=<<___ if ($win64);
413 movaps %xmm6,-0xd8(%rax)
414 movaps %xmm7,-0xc8(%rax)
415 movaps %xmm8,-0xb8(%rax)
416 movaps %xmm9,-0xa8(%rax)
417 movaps %xmm10,-0x98(%rax)
418 movaps %xmm11,-0x88(%rax)
419 movaps %xmm12,-0x78(%rax)
420 movaps %xmm13,-0x68(%rax)
421 movaps %xmm14,-0x58(%rax)
422 movaps %xmm15,-0x48(%rax)
428 vmovdqu ($ivp),$T1 # input counter value
430 mov 12($ivp),$counter
431 lea .Lbswap_mask(%rip),$const
432 lea -0x80($key),$in0 # borrow $in0
433 mov \$0xf80,$end0 # borrow $end0
434 vmovdqu ($Xip),$Xi # load Xi
435 and \$-128,%rsp # ensure stack alignment
436 vmovdqu ($const),$Ii # borrow $Ii for .Lbswap_mask
437 lea 0x80($key),$key # size optimization
438 lea 0x20+0x20($Xip),$Xip # size optimization
439 mov 0xf0-0x80($key),$rounds
445 jc .Ldec_no_key_aliasing
447 jnc .Ldec_no_key_aliasing
448 sub $end0,%rsp # avoid aliasing with key
449 .Ldec_no_key_aliasing:
451 vmovdqu 0x50($inp),$Z3 # I[5]
453 vmovdqu 0x40($inp),$Z0
454 lea -0xc0($inp,$len),$end0
455 vmovdqu 0x30($inp),$Z1
458 vmovdqu 0x20($inp),$Z2
459 vpshufb $Ii,$Z3,$Z3 # passed to _aesni_ctr32_ghash_6x
460 vmovdqu 0x10($inp),$T2
464 vmovdqu $Z0,0x30(%rsp)
466 vmovdqu $Z1,0x40(%rsp)
468 vmovdqu $Z2,0x50(%rsp)
469 vpshufb $Ii,$Hkey,$Hkey
470 vmovdqu $T2,0x60(%rsp)
471 vmovdqu $Hkey,0x70(%rsp)
473 call _aesni_ctr32_ghash_6x
475 vmovups $inout0,-0x60($out) # save output
476 vmovups $inout1,-0x50($out)
477 vmovups $inout2,-0x40($out)
478 vmovups $inout3,-0x30($out)
479 vmovups $inout4,-0x20($out)
480 vmovups $inout5,-0x10($out)
482 vpshufb ($const),$Xi,$Xi # .Lbswap_mask
483 vmovdqu $Xi,-0x40($Xip) # output Xi
487 $code.=<<___ if ($win64);
488 movaps -0xd8(%rax),%xmm6
489 movaps -0xd8(%rax),%xmm7
490 movaps -0xb8(%rax),%xmm8
491 movaps -0xa8(%rax),%xmm9
492 movaps -0x98(%rax),%xmm10
493 movaps -0x88(%rax),%xmm11
494 movaps -0x78(%rax),%xmm12
495 movaps -0x68(%rax),%xmm13
496 movaps -0x58(%rax),%xmm14
497 movaps -0x48(%rax),%xmm15
506 lea (%rax),%rsp # restore %rsp
508 mov $ret,%rax # return value
510 .size aesni_gcm_decrypt,.-aesni_gcm_decrypt
514 .type _aesni_ctr32_6x,\@abi-omnipotent
517 vmovdqu 0x00-0x80($key),$Z0 # borrow $Z0 for $rndkey
518 vmovdqu 0x20($const),$T2 # borrow $T2, .Lone_msb
520 vmovups 0x10-0x80($key),$rndkey
521 lea 0x20-0x80($key),%r12
522 vpxor $Z0,$T1,$inout0
525 vpaddb $T2,$T1,$inout1
526 vpaddb $T2,$inout1,$inout2
527 vpxor $Z0,$inout1,$inout1
528 vpaddb $T2,$inout2,$inout3
529 vpxor $Z0,$inout2,$inout2
530 vpaddb $T2,$inout3,$inout4
531 vpxor $Z0,$inout3,$inout3
532 vpaddb $T2,$inout4,$inout5
533 vpxor $Z0,$inout4,$inout4
534 vpaddb $T2,$inout5,$T1
535 vpxor $Z0,$inout5,$inout5
540 vaesenc $rndkey,$inout0,$inout0
541 vaesenc $rndkey,$inout1,$inout1
542 vaesenc $rndkey,$inout2,$inout2
543 vaesenc $rndkey,$inout3,$inout3
544 vaesenc $rndkey,$inout4,$inout4
545 vaesenc $rndkey,$inout5,$inout5
546 vmovups (%r12),$rndkey
551 vmovdqu (%r12),$Hkey # last round key
552 vaesenc $rndkey,$inout0,$inout0
553 vpxor 0x00($inp),$Hkey,$Z0
554 vaesenc $rndkey,$inout1,$inout1
555 vpxor 0x10($inp),$Hkey,$Z1
556 vaesenc $rndkey,$inout2,$inout2
557 vpxor 0x20($inp),$Hkey,$Z2
558 vaesenc $rndkey,$inout3,$inout3
559 vpxor 0x30($inp),$Hkey,$Xi
560 vaesenc $rndkey,$inout4,$inout4
561 vpxor 0x40($inp),$Hkey,$T2
562 vaesenc $rndkey,$inout5,$inout5
563 vpxor 0x50($inp),$Hkey,$Hkey
566 vaesenclast $Z0,$inout0,$inout0
567 vaesenclast $Z1,$inout1,$inout1
568 vaesenclast $Z2,$inout2,$inout2
569 vaesenclast $Xi,$inout3,$inout3
570 vaesenclast $T2,$inout4,$inout4
571 vaesenclast $Hkey,$inout5,$inout5
572 vmovups $inout0,0x00($out)
573 vmovups $inout1,0x10($out)
574 vmovups $inout2,0x20($out)
575 vmovups $inout3,0x30($out)
576 vmovups $inout4,0x40($out)
577 vmovups $inout5,0x50($out)
583 vpshufb $Ii,$T1,$Z2 # byte-swap counter
584 vmovdqu 0x30($const),$Z1 # borrow $Z1, .Ltwo_lsb
585 vpaddd 0x40($const),$Z2,$inout1 # .Lone_lsb
586 vpaddd $Z1,$Z2,$inout2
587 vpaddd $Z1,$inout1,$inout3
588 vpshufb $Ii,$inout1,$inout1
589 vpaddd $Z1,$inout2,$inout4
590 vpshufb $Ii,$inout2,$inout2
591 vpxor $Z0,$inout1,$inout1
592 vpaddd $Z1,$inout3,$inout5
593 vpshufb $Ii,$inout3,$inout3
594 vpxor $Z0,$inout2,$inout2
595 vpaddd $Z1,$inout4,$T1 # byte-swapped next counter value
596 vpshufb $Ii,$inout4,$inout4
597 vpxor $Z0,$inout3,$inout3
598 vpshufb $Ii,$inout5,$inout5
599 vpxor $Z0,$inout4,$inout4
600 vpshufb $Ii,$T1,$T1 # next counter value
601 vpxor $Z0,$inout5,$inout5
603 .size _aesni_ctr32_6x,.-_aesni_ctr32_6x
605 .globl aesni_gcm_encrypt
606 .type aesni_gcm_encrypt,\@function,6
610 cmp \$0x60*3,$len # minimal accepted length
613 lea (%rsp),%rax # save stack pointer
621 $code.=<<___ if ($win64);
623 movaps %xmm6,-0xd8(%rax)
624 movaps %xmm7,-0xc8(%rax)
625 movaps %xmm8,-0xb8(%rax)
626 movaps %xmm9,-0xa8(%rax)
627 movaps %xmm10,-0x98(%rax)
628 movaps %xmm11,-0x88(%rax)
629 movaps %xmm12,-0x78(%rax)
630 movaps %xmm13,-0x68(%rax)
631 movaps %xmm14,-0x58(%rax)
632 movaps %xmm15,-0x48(%rax)
638 vmovdqu ($ivp),$T1 # input counter value
640 mov 12($ivp),$counter
641 lea .Lbswap_mask(%rip),$const
642 lea -0x80($key),$in0 # borrow $in0
643 mov \$0xf80,$end0 # borrow $end0
644 lea 0x80($key),$key # size optimization
645 vmovdqu ($const),$Ii # borrow $Ii for .Lbswap_mask
646 and \$-128,%rsp # ensure stack alignment
647 mov 0xf0-0x80($key),$rounds
652 jc .Lenc_no_key_aliasing
654 jnc .Lenc_no_key_aliasing
655 sub $end0,%rsp # avoid aliasing with key
656 .Lenc_no_key_aliasing:
659 lea -0xc0($out,$len),$end0
663 vpshufb $Ii,$inout0,$Xi # save bswapped output on stack
664 vpshufb $Ii,$inout1,$T2
665 vmovdqu $Xi,0x70(%rsp)
666 vpshufb $Ii,$inout2,$Z0
667 vmovdqu $T2,0x60(%rsp)
668 vpshufb $Ii,$inout3,$Z1
669 vmovdqu $Z0,0x50(%rsp)
670 vpshufb $Ii,$inout4,$Z2
671 vmovdqu $Z1,0x40(%rsp)
672 vpshufb $Ii,$inout5,$Z3 # passed to _aesni_ctr32_ghash_6x
673 vmovdqu $Z2,0x30(%rsp)
677 vmovdqu ($Xip),$Xi # load Xi
678 lea 0x20+0x20($Xip),$Xip # size optimization
683 call _aesni_ctr32_ghash_6x
684 vmovdqu 0x20(%rsp),$Z3 # I[5]
685 vmovdqu ($const),$Ii # borrow $Ii for .Lbswap_mask
686 vmovdqu 0x00-0x20($Xip),$Hkey # $Hkey^1
687 vpunpckhqdq $Z3,$Z3,$T1
688 vmovdqu 0x20-0x20($Xip),$rndkey # borrow $rndkey for $HK
689 vmovups $inout0,-0x60($out) # save output
690 vpshufb $Ii,$inout0,$inout0 # but keep bswapped copy
692 vmovups $inout1,-0x50($out)
693 vpshufb $Ii,$inout1,$inout1
694 vmovups $inout2,-0x40($out)
695 vpshufb $Ii,$inout2,$inout2
696 vmovups $inout3,-0x30($out)
697 vpshufb $Ii,$inout3,$inout3
698 vmovups $inout4,-0x20($out)
699 vpshufb $Ii,$inout4,$inout4
700 vmovups $inout5,-0x10($out)
701 vpshufb $Ii,$inout5,$inout5
702 vmovdqu $inout0,0x10(%rsp) # free $inout0
704 { my ($HK,$T3)=($rndkey,$inout0);
707 vmovdqu 0x30(%rsp),$Z2 # I[4]
708 vmovdqu 0x10-0x20($Xip),$Ii # borrow $Ii for $Hkey^2
709 vpunpckhqdq $Z2,$Z2,$T2
710 vpclmulqdq \$0x00,$Hkey,$Z3,$Z1
712 vpclmulqdq \$0x11,$Hkey,$Z3,$Z3
713 vpclmulqdq \$0x00,$HK,$T1,$T1
715 vmovdqu 0x40(%rsp),$T3 # I[3]
716 vpclmulqdq \$0x00,$Ii,$Z2,$Z0
717 vmovdqu 0x30-0x20($Xip),$Hkey # $Hkey^3
719 vpunpckhqdq $T3,$T3,$Z1
720 vpclmulqdq \$0x11,$Ii,$Z2,$Z2
723 vpclmulqdq \$0x10,$HK,$T2,$T2
724 vmovdqu 0x50-0x20($Xip),$HK
727 vmovdqu 0x50(%rsp),$T1 # I[2]
728 vpclmulqdq \$0x00,$Hkey,$T3,$Z3
729 vmovdqu 0x40-0x20($Xip),$Ii # borrow $Ii for $Hkey^4
731 vpunpckhqdq $T1,$T1,$Z0
732 vpclmulqdq \$0x11,$Hkey,$T3,$T3
735 vpclmulqdq \$0x00,$HK,$Z1,$Z1
738 vmovdqu 0x60(%rsp),$T2 # I[1]
739 vpclmulqdq \$0x00,$Ii,$T1,$Z2
740 vmovdqu 0x60-0x20($Xip),$Hkey # $Hkey^5
742 vpunpckhqdq $T2,$T2,$Z3
743 vpclmulqdq \$0x11,$Ii,$T1,$T1
746 vpclmulqdq \$0x10,$HK,$Z0,$Z0
747 vmovdqu 0x80-0x20($Xip),$HK
750 vpxor 0x70(%rsp),$Xi,$Xi # accumulate I[0]
751 vpclmulqdq \$0x00,$Hkey,$T2,$Z1
752 vmovdqu 0x70-0x20($Xip),$Ii # borrow $Ii for $Hkey^6
753 vpunpckhqdq $Xi,$Xi,$T3
755 vpclmulqdq \$0x11,$Hkey,$T2,$T2
758 vpclmulqdq \$0x00,$HK,$Z3,$Z3
761 vpclmulqdq \$0x00,$Ii,$Xi,$Z2
762 vmovdqu 0x00-0x20($Xip),$Hkey # $Hkey^1
763 vpunpckhqdq $inout5,$inout5,$T1
764 vpclmulqdq \$0x11,$Ii,$Xi,$Xi
765 vpxor $inout5,$T1,$T1
767 vpclmulqdq \$0x10,$HK,$T3,$T3
768 vmovdqu 0x20-0x20($Xip),$HK
772 vmovdqu 0x10-0x20($Xip),$Ii # borrow $Ii for $Hkey^2
773 vpxor $Z1,$Z3,$T3 # aggregated Karatsuba post-processing
774 vpclmulqdq \$0x00,$Hkey,$inout5,$Z0
776 vpunpckhqdq $inout4,$inout4,$T2
777 vpclmulqdq \$0x11,$Hkey,$inout5,$inout5
778 vpxor $inout4,$T2,$T2
780 vpclmulqdq \$0x00,$HK,$T1,$T1
785 vpclmulqdq \$0x00,$Ii,$inout4,$Z1
786 vmovdqu 0x30-0x20($Xip),$Hkey # $Hkey^3
788 vpunpckhqdq $inout3,$inout3,$T3
789 vpclmulqdq \$0x11,$Ii,$inout4,$inout4
790 vpxor $inout3,$T3,$T3
791 vpxor $inout5,$inout4,$inout4
792 vpalignr \$8,$Xi,$Xi,$inout5 # 1st phase
793 vpclmulqdq \$0x10,$HK,$T2,$T2
794 vmovdqu 0x50-0x20($Xip),$HK
797 vpclmulqdq \$0x00,$Hkey,$inout3,$Z0
798 vmovdqu 0x40-0x20($Xip),$Ii # borrow $Ii for $Hkey^4
800 vpunpckhqdq $inout2,$inout2,$T1
801 vpclmulqdq \$0x11,$Hkey,$inout3,$inout3
802 vpxor $inout2,$T1,$T1
803 vpxor $inout4,$inout3,$inout3
804 vxorps 0x10(%rsp),$Z3,$Z3 # accumulate $inout0
805 vpclmulqdq \$0x00,$HK,$T3,$T3
808 vpclmulqdq \$0x10,0x10($const),$Xi,$Xi
809 vxorps $inout5,$Xi,$Xi
811 vpclmulqdq \$0x00,$Ii,$inout2,$Z1
812 vmovdqu 0x60-0x20($Xip),$Hkey # $Hkey^5
814 vpunpckhqdq $inout1,$inout1,$T2
815 vpclmulqdq \$0x11,$Ii,$inout2,$inout2
816 vpxor $inout1,$T2,$T2
817 vpalignr \$8,$Xi,$Xi,$inout5 # 2nd phase
818 vpxor $inout3,$inout2,$inout2
819 vpclmulqdq \$0x10,$HK,$T1,$T1
820 vmovdqu 0x80-0x20($Xip),$HK
823 vxorps $Z3,$inout5,$inout5
824 vpclmulqdq \$0x10,0x10($const),$Xi,$Xi
825 vxorps $inout5,$Xi,$Xi
827 vpclmulqdq \$0x00,$Hkey,$inout1,$Z0
828 vmovdqu 0x70-0x20($Xip),$Ii # borrow $Ii for $Hkey^6
830 vpunpckhqdq $Xi,$Xi,$T3
831 vpclmulqdq \$0x11,$Hkey,$inout1,$inout1
833 vpxor $inout2,$inout1,$inout1
834 vpclmulqdq \$0x00,$HK,$T2,$T2
837 vpclmulqdq \$0x00,$Ii,$Xi,$Z1
838 vpclmulqdq \$0x11,$Ii,$Xi,$Z3
840 vpclmulqdq \$0x10,$HK,$T3,$Z2
841 vpxor $inout1,$Z3,$Z3
844 vpxor $Z1,$Z3,$Z0 # aggregated Karatsuba post-processing
847 vmovdqu 0x10($const),$Hkey # .Lpoly
852 vpalignr \$8,$Xi,$Xi,$T2 # 1st phase
853 vpclmulqdq \$0x10,$Hkey,$Xi,$Xi
856 vpalignr \$8,$Xi,$Xi,$T2 # 2nd phase
857 vpclmulqdq \$0x10,$Hkey,$Xi,$Xi
863 vpshufb ($const),$Xi,$Xi # .Lbswap_mask
864 vmovdqu $Xi,-0x40($Xip) # output Xi
868 $code.=<<___ if ($win64);
869 movaps -0xd8(%rax),%xmm6
870 movaps -0xc8(%rax),%xmm7
871 movaps -0xb8(%rax),%xmm8
872 movaps -0xa8(%rax),%xmm9
873 movaps -0x98(%rax),%xmm10
874 movaps -0x88(%rax),%xmm11
875 movaps -0x78(%rax),%xmm12
876 movaps -0x68(%rax),%xmm13
877 movaps -0x58(%rax),%xmm14
878 movaps -0x48(%rax),%xmm15
887 lea (%rax),%rsp # restore %rsp
889 mov $ret,%rax # return value
891 .size aesni_gcm_encrypt,.-aesni_gcm_encrypt
897 .byte 15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0
899 .byte 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xc2
901 .byte 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1
903 .byte 2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
905 .byte 1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
906 .asciz "AES-NI GCM module for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
916 .extern __imp_RtlVirtualUnwind
917 .type gcm_se_handler,\@abi-omnipotent
931 mov 120($context),%rax # pull context->Rax
932 mov 248($context),%rbx # pull context->Rip
934 mov 8($disp),%rsi # disp->ImageBase
935 mov 56($disp),%r11 # disp->HandlerData
937 mov 0(%r11),%r10d # HandlerData[0]
938 lea (%rsi,%r10),%r10 # prologue label
939 cmp %r10,%rbx # context->Rip<prologue label
942 mov 152($context),%rax # pull context->Rsp
944 mov 4(%r11),%r10d # HandlerData[1]
945 lea (%rsi,%r10),%r10 # epilogue label
946 cmp %r10,%rbx # context->Rip>=epilogue label
947 jae .Lcommon_seh_tail
949 mov 120($context),%rax # pull context->Rax
957 mov %r15,240($context)
958 mov %r14,232($context)
959 mov %r13,224($context)
960 mov %r12,216($context)
961 mov %rbp,160($context)
962 mov %rbx,144($context)
964 lea -0xd8(%rax),%rsi # %xmm save area
965 lea 512($context),%rdi # & context.Xmm6
966 mov \$20,%ecx # 10*sizeof(%xmm0)/sizeof(%rax)
967 .long 0xa548f3fc # cld; rep movsq
972 mov %rax,152($context) # restore context->Rsp
973 mov %rsi,168($context) # restore context->Rsi
974 mov %rdi,176($context) # restore context->Rdi
976 mov 40($disp),%rdi # disp->ContextRecord
977 mov $context,%rsi # context
978 mov \$154,%ecx # sizeof(CONTEXT)
979 .long 0xa548f3fc # cld; rep movsq
982 xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER
983 mov 8(%rsi),%rdx # arg2, disp->ImageBase
984 mov 0(%rsi),%r8 # arg3, disp->ControlPc
985 mov 16(%rsi),%r9 # arg4, disp->FunctionEntry
986 mov 40(%rsi),%r10 # disp->ContextRecord
987 lea 56(%rsi),%r11 # &disp->HandlerData
988 lea 24(%rsi),%r12 # &disp->EstablisherFrame
989 mov %r10,32(%rsp) # arg5
990 mov %r11,40(%rsp) # arg6
991 mov %r12,48(%rsp) # arg7
992 mov %rcx,56(%rsp) # arg8, (NULL)
993 call *__imp_RtlVirtualUnwind(%rip)
995 mov \$1,%eax # ExceptionContinueSearch
1007 .size gcm_se_handler,.-gcm_se_handler
1011 .rva .LSEH_begin_aesni_gcm_decrypt
1012 .rva .LSEH_end_aesni_gcm_decrypt
1013 .rva .LSEH_gcm_dec_info
1015 .rva .LSEH_begin_aesni_gcm_encrypt
1016 .rva .LSEH_end_aesni_gcm_encrypt
1017 .rva .LSEH_gcm_enc_info
1023 .rva .Lgcm_dec_body,.Lgcm_dec_abort
1027 .rva .Lgcm_enc_body,.Lgcm_enc_abort
1031 $code=<<___; # assembler is too old
1034 .globl aesni_gcm_encrypt
1035 .type aesni_gcm_encrypt,\@abi-omnipotent
1039 .size aesni_gcm_encrypt,.-aesni_gcm_encrypt
1041 .globl aesni_gcm_decrypt
1042 .type aesni_gcm_decrypt,\@abi-omnipotent
1046 .size aesni_gcm_decrypt,.-aesni_gcm_decrypt
1050 $code =~ s/\`([^\`]*)\`/eval($1)/gem;