2 # Copyright 2016 The OpenSSL Project Authors. All Rights Reserved.
4 # Licensed under the OpenSSL license (the "License"). You may not use
5 # this file except in compliance with the License. You can obtain a copy
6 # in the file LICENSE in the source distribution or at
7 # https://www.openssl.org/source/license.html
10 # ====================================================================
11 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
12 # project. The module is, however, dual licensed under OpenSSL and
13 # CRYPTOGAMS licenses depending on where you obtain it. For further
14 # details see http://www.openssl.org/~appro/cryptogams/.
15 # ====================================================================
21 # Performance in cycles per byte out of large buffer.
27 # Core2 9.56/+89% 4.83
28 # Westmere 9.50/+45% 3.35
29 # Sandy Bridge 10.5/+47% 3.20
30 # Haswell 8.15/+50% 2.83
31 # Silvermont 17.4/+36% 8.35
32 # Goldmont 13.4/+40% 4.36
33 # Sledgehammer 10.2/+54%
34 # Bulldozer 13.4/+50% 4.38(*)
36 # (*) Bulldozer actually executes 4xXOP code path that delivers 3.55;
38 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
39 push(@INC,"${dir}","${dir}../../perlasm");
43 open STDOUT,">$output";
45 &asm_init($ARGV[0],"chacha-x86.pl",$ARGV[$#ARGV] eq "386");
48 for (@ARGV) { $xmm=1 if (/-DOPENSSL_IA32_SSE2/); }
51 `$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
52 =~ /GNU assembler version ([2-9]\.[0-9]+)/ &&
53 $1>=2.19); # first version supporting AVX
55 $ymm=1 if ($xmm && !$ymm && $ARGV[0] eq "win32n" &&
56 `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/ &&
57 $1>=2.03); # first version supporting AVX
59 $ymm=1 if ($xmm && !$ymm && $ARGV[0] eq "win32" &&
60 `ml 2>&1` =~ /Version ([0-9]+)\./ &&
61 $1>=10); # first version supporting AVX
63 $ymm=1 if ($xmm && !$ymm &&
64 `$ENV{CC} -v 2>&1` =~ /(^clang version|based on LLVM) ([3-9]\.[0-9]+)/ &&
65 $2>=3.0); # first version supporting AVX
68 ($b,$b_)=("ebx","ebp");
69 ($c,$c_)=("ecx","esi");
70 ($d,$d_)=("edx","edi");
73 my ($ai,$bi,$ci,$di,$i)=@_;
74 my ($an,$bn,$cn,$dn)=map(($_&~3)+(($_+1)&3),($ai,$bi,$ci,$di)); # next
75 my ($ap,$bp,$cp,$dp)=map(($_&~3)+(($_-1)&3),($ai,$bi,$ci,$di)); # previous
79 # 0 4 8 12 < even round
83 # 0 5 10 15 < odd round
90 ($ap,$bp,$cp,$dp)=map(($_&~3)+(($_-$j--)&3),($ap,$bp,$cp,$dp));
93 ($an,$bn,$cn,$dn)=map(($_&~3)+(($_+$j++)&3),($an,$bn,$cn,$dn));
96 ($ap,$bp,$cp,$dp)=map(($_&~3)+(($_+$j--)&3),($ap,$bp,$cp,$dp));
99 ($an,$bn,$cn,$dn)=map(($_&~3)+(($_-$j++)&3),($an,$bn,$cn,$dn));
102 #&add ($a,$b); # see elsewhere
104 &mov (&DWP(4*$cp,"esp"),$c_) if ($ai>0 && $ai<3);
106 &mov (&DWP(4*$bp,"esp"),$b_) if ($i!=0);
108 &mov ($c_,&DWP(4*$cn,"esp")) if ($ai>0 && $ai<3);
110 &mov ($d_,&DWP(4*$dn,"esp")) if ($di!=$dn);
112 &mov ($b_,&DWP(4*$bn,"esp")) if ($i<7);
113 &mov ($b_,&DWP(128,"esp")) if ($i==7); # loop counter
116 &mov (&DWP(4*$ai,"esp"),$a);
118 &mov ($a,&DWP(4*$an,"esp"));
120 &mov (&DWP(4*$di,"esp"),$d) if ($di!=$dn);
121 &mov ($d_,$d) if ($di==$dn);
123 &add ($a,$b_) if ($i<7); # elsewhere
131 &static_label("ssse3_shortcut");
132 &static_label("xop_shortcut");
133 &static_label("ssse3_data");
134 &static_label("pic_point");
136 &function_begin("ChaCha20_ctr32");
138 &cmp ("eax",&wparam(2)); # len==0?
139 &je (&label("no_data"));
141 &call (&label("pic_point"));
142 &set_label("pic_point");
144 &picmeup("ebp","OPENSSL_ia32cap_P","eax",&label("pic_point"));
145 &test (&DWP(0,"ebp"),1<<24); # test FXSR bit
147 &test (&DWP(4,"ebp"),1<<9); # test SSSE3 bit
149 &jmp (&label("ssse3_shortcut"));
152 &mov ("esi",&wparam(3)); # key
153 &mov ("edi",&wparam(4)); # counter and nonce
157 &mov ("eax",&DWP(4*0,"esi")); # copy key
158 &mov ("ebx",&DWP(4*1,"esi"));
159 &mov ("ecx",&DWP(4*2,"esi"));
160 &mov ("edx",&DWP(4*3,"esi"));
161 &mov (&DWP(64+4*4,"esp"),"eax");
162 &mov (&DWP(64+4*5,"esp"),"ebx");
163 &mov (&DWP(64+4*6,"esp"),"ecx");
164 &mov (&DWP(64+4*7,"esp"),"edx");
165 &mov ("eax",&DWP(4*4,"esi"));
166 &mov ("ebx",&DWP(4*5,"esi"));
167 &mov ("ecx",&DWP(4*6,"esi"));
168 &mov ("edx",&DWP(4*7,"esi"));
169 &mov (&DWP(64+4*8,"esp"),"eax");
170 &mov (&DWP(64+4*9,"esp"),"ebx");
171 &mov (&DWP(64+4*10,"esp"),"ecx");
172 &mov (&DWP(64+4*11,"esp"),"edx");
173 &mov ("eax",&DWP(4*0,"edi")); # copy counter and nonce
174 &mov ("ebx",&DWP(4*1,"edi"));
175 &mov ("ecx",&DWP(4*2,"edi"));
176 &mov ("edx",&DWP(4*3,"edi"));
178 &mov (&DWP(64+4*12,"esp"),"eax");
179 &mov (&DWP(64+4*13,"esp"),"ebx");
180 &mov (&DWP(64+4*14,"esp"),"ecx");
181 &mov (&DWP(64+4*15,"esp"),"edx");
182 &jmp (&label("entry"));
184 &set_label("outer_loop",16);
185 &mov (&wparam(1),$b); # save input
186 &mov (&wparam(0),$a); # save output
187 &mov (&wparam(2),$c); # save len
189 &mov ($a,0x61707865);
190 &mov (&DWP(4*1,"esp"),0x3320646e);
191 &mov (&DWP(4*2,"esp"),0x79622d32);
192 &mov (&DWP(4*3,"esp"),0x6b206574);
194 &mov ($b, &DWP(64+4*5,"esp")); # copy key material
195 &mov ($b_,&DWP(64+4*6,"esp"));
196 &mov ($c, &DWP(64+4*10,"esp"));
197 &mov ($c_,&DWP(64+4*11,"esp"));
198 &mov ($d, &DWP(64+4*13,"esp"));
199 &mov ($d_,&DWP(64+4*14,"esp"));
200 &mov (&DWP(4*5,"esp"),$b);
201 &mov (&DWP(4*6,"esp"),$b_);
202 &mov (&DWP(4*10,"esp"),$c);
203 &mov (&DWP(4*11,"esp"),$c_);
204 &mov (&DWP(4*13,"esp"),$d);
205 &mov (&DWP(4*14,"esp"),$d_);
207 &mov ($b, &DWP(64+4*7,"esp"));
208 &mov ($d_,&DWP(64+4*15,"esp"));
209 &mov ($d, &DWP(64+4*12,"esp"));
210 &mov ($b_,&DWP(64+4*4,"esp"));
211 &mov ($c, &DWP(64+4*8,"esp"));
212 &mov ($c_,&DWP(64+4*9,"esp"));
213 &add ($d,1); # counter value
214 &mov (&DWP(4*7,"esp"),$b);
215 &mov (&DWP(4*15,"esp"),$d_);
216 &mov (&DWP(64+4*12,"esp"),$d); # save counter value
218 &mov ($b,10); # loop counter
219 &jmp (&label("loop"));
221 &set_label("loop",16);
222 &add ($a,$b_); # elsewhere
223 &mov (&DWP(128,"esp"),$b); # save loop counter
225 &QUARTERROUND(0, 4, 8, 12, 0);
226 &QUARTERROUND(1, 5, 9, 13, 1);
227 &QUARTERROUND(2, 6,10, 14, 2);
228 &QUARTERROUND(3, 7,11, 15, 3);
229 &QUARTERROUND(0, 5,10, 15, 4);
230 &QUARTERROUND(1, 6,11, 12, 5);
231 &QUARTERROUND(2, 7, 8, 13, 6);
232 &QUARTERROUND(3, 4, 9, 14, 7);
234 &jnz (&label("loop"));
236 &mov ($b,&wparam(2)); # load len
238 &add ($a,0x61707865); # accumulate key material
239 &add ($b_,&DWP(64+4*4,"esp"));
240 &add ($c, &DWP(64+4*8,"esp"));
241 &add ($c_,&DWP(64+4*9,"esp"));
244 &jb (&label("tail"));
246 &mov ($b,&wparam(1)); # load input pointer
247 &add ($d, &DWP(64+4*12,"esp"));
248 &add ($d_,&DWP(64+4*14,"esp"));
250 &xor ($a, &DWP(4*0,$b)); # xor with input
251 &xor ($b_,&DWP(4*4,$b));
252 &mov (&DWP(4*0,"esp"),$a);
253 &mov ($a,&wparam(0)); # load output pointer
254 &xor ($c, &DWP(4*8,$b));
255 &xor ($c_,&DWP(4*9,$b));
256 &xor ($d, &DWP(4*12,$b));
257 &xor ($d_,&DWP(4*14,$b));
258 &mov (&DWP(4*4,$a),$b_); # write output
259 &mov (&DWP(4*8,$a),$c);
260 &mov (&DWP(4*9,$a),$c_);
261 &mov (&DWP(4*12,$a),$d);
262 &mov (&DWP(4*14,$a),$d_);
264 &mov ($b_,&DWP(4*1,"esp"));
265 &mov ($c, &DWP(4*2,"esp"));
266 &mov ($c_,&DWP(4*3,"esp"));
267 &mov ($d, &DWP(4*5,"esp"));
268 &mov ($d_,&DWP(4*6,"esp"));
269 &add ($b_,0x3320646e); # accumulate key material
270 &add ($c, 0x79622d32);
271 &add ($c_,0x6b206574);
272 &add ($d, &DWP(64+4*5,"esp"));
273 &add ($d_,&DWP(64+4*6,"esp"));
274 &xor ($b_,&DWP(4*1,$b));
275 &xor ($c, &DWP(4*2,$b));
276 &xor ($c_,&DWP(4*3,$b));
277 &xor ($d, &DWP(4*5,$b));
278 &xor ($d_,&DWP(4*6,$b));
279 &mov (&DWP(4*1,$a),$b_);
280 &mov (&DWP(4*2,$a),$c);
281 &mov (&DWP(4*3,$a),$c_);
282 &mov (&DWP(4*5,$a),$d);
283 &mov (&DWP(4*6,$a),$d_);
285 &mov ($b_,&DWP(4*7,"esp"));
286 &mov ($c, &DWP(4*10,"esp"));
287 &mov ($c_,&DWP(4*11,"esp"));
288 &mov ($d, &DWP(4*13,"esp"));
289 &mov ($d_,&DWP(4*15,"esp"));
290 &add ($b_,&DWP(64+4*7,"esp"));
291 &add ($c, &DWP(64+4*10,"esp"));
292 &add ($c_,&DWP(64+4*11,"esp"));
293 &add ($d, &DWP(64+4*13,"esp"));
294 &add ($d_,&DWP(64+4*15,"esp"));
295 &xor ($b_,&DWP(4*7,$b));
296 &xor ($c, &DWP(4*10,$b));
297 &xor ($c_,&DWP(4*11,$b));
298 &xor ($d, &DWP(4*13,$b));
299 &xor ($d_,&DWP(4*15,$b));
300 &lea ($b,&DWP(4*16,$b));
301 &mov (&DWP(4*7,$a),$b_);
302 &mov ($b_,&DWP(4*0,"esp"));
303 &mov (&DWP(4*10,$a),$c);
304 &mov ($c,&wparam(2)); # len
305 &mov (&DWP(4*11,$a),$c_);
306 &mov (&DWP(4*13,$a),$d);
307 &mov (&DWP(4*15,$a),$d_);
308 &mov (&DWP(4*0,$a),$b_);
309 &lea ($a,&DWP(4*16,$a));
311 &jnz (&label("outer_loop"));
313 &jmp (&label("done"));
316 &add ($d, &DWP(64+4*12,"esp"));
317 &add ($d_,&DWP(64+4*14,"esp"));
318 &mov (&DWP(4*0,"esp"),$a);
319 &mov (&DWP(4*4,"esp"),$b_);
320 &mov (&DWP(4*8,"esp"),$c);
321 &mov (&DWP(4*9,"esp"),$c_);
322 &mov (&DWP(4*12,"esp"),$d);
323 &mov (&DWP(4*14,"esp"),$d_);
325 &mov ($b_,&DWP(4*1,"esp"));
326 &mov ($c, &DWP(4*2,"esp"));
327 &mov ($c_,&DWP(4*3,"esp"));
328 &mov ($d, &DWP(4*5,"esp"));
329 &mov ($d_,&DWP(4*6,"esp"));
330 &add ($b_,0x3320646e); # accumulate key material
331 &add ($c, 0x79622d32);
332 &add ($c_,0x6b206574);
333 &add ($d, &DWP(64+4*5,"esp"));
334 &add ($d_,&DWP(64+4*6,"esp"));
335 &mov (&DWP(4*1,"esp"),$b_);
336 &mov (&DWP(4*2,"esp"),$c);
337 &mov (&DWP(4*3,"esp"),$c_);
338 &mov (&DWP(4*5,"esp"),$d);
339 &mov (&DWP(4*6,"esp"),$d_);
341 &mov ($b_,&DWP(4*7,"esp"));
342 &mov ($c, &DWP(4*10,"esp"));
343 &mov ($c_,&DWP(4*11,"esp"));
344 &mov ($d, &DWP(4*13,"esp"));
345 &mov ($d_,&DWP(4*15,"esp"));
346 &add ($b_,&DWP(64+4*7,"esp"));
347 &add ($c, &DWP(64+4*10,"esp"));
348 &add ($c_,&DWP(64+4*11,"esp"));
349 &add ($d, &DWP(64+4*13,"esp"));
350 &add ($d_,&DWP(64+4*15,"esp"));
351 &mov (&DWP(4*7,"esp"),$b_);
352 &mov ($b_,&wparam(1)); # load input
353 &mov (&DWP(4*10,"esp"),$c);
354 &mov ($c,&wparam(0)); # load output
355 &mov (&DWP(4*11,"esp"),$c_);
357 &mov (&DWP(4*13,"esp"),$d);
358 &mov (&DWP(4*15,"esp"),$d_);
362 &set_label("tail_loop");
363 &movb ("al",&BP(0,$c_,$b_));
364 &movb ("dl",&BP(0,"esp",$c_));
365 &lea ($c_,&DWP(1,$c_));
367 &mov (&BP(-1,$c,$c_),"al");
369 &jnz (&label("tail_loop"));
373 &set_label("no_data");
374 &function_end("ChaCha20_ctr32");
377 my ($xa,$xa_,$xb,$xb_,$xc,$xc_,$xd,$xd_)=map("xmm$_",(0..7));
378 my ($out,$inp,$len)=("edi","esi","ecx");
380 sub QUARTERROUND_SSSE3 {
381 my ($ai,$bi,$ci,$di,$i)=@_;
382 my ($an,$bn,$cn,$dn)=map(($_&~3)+(($_+1)&3),($ai,$bi,$ci,$di)); # next
383 my ($ap,$bp,$cp,$dp)=map(($_&~3)+(($_-1)&3),($ai,$bi,$ci,$di)); # previous
387 # 0 4 8 12 < even round
391 # 0 5 10 15 < odd round
398 ($ap,$bp,$cp,$dp)=map(($_&~3)+(($_-$j--)&3),($ap,$bp,$cp,$dp));
401 ($an,$bn,$cn,$dn)=map(($_&~3)+(($_+$j++)&3),($an,$bn,$cn,$dn));
404 ($ap,$bp,$cp,$dp)=map(($_&~3)+(($_+$j--)&3),($ap,$bp,$cp,$dp));
407 ($an,$bn,$cn,$dn)=map(($_&~3)+(($_-$j++)&3),($an,$bn,$cn,$dn));
410 #&paddd ($xa,$xb); # see elsewhere
411 #&pxor ($xd,$xa); # see elsewhere
412 &movdqa(&QWP(16*$cp-128,"ebx"),$xc_) if ($ai>0 && $ai<3);
413 &pshufb ($xd,&QWP(0,"eax")); # rot16
414 &movdqa(&QWP(16*$bp-128,"ebx"),$xb_) if ($i!=0);
416 &movdqa($xc_,&QWP(16*$cn-128,"ebx")) if ($ai>0 && $ai<3);
418 &movdqa($xb_,&QWP(16*$bn-128,"ebx")) if ($i<7);
419 &movdqa ($xa_,$xb); # borrow as temporary
423 &movdqa($xa_,&QWP(16*$an-128,"ebx"));
425 &movdqa($xd_,&QWP(16*$dn-128,"ebx")) if ($di!=$dn);
427 &movdqa (&QWP(16*$ai-128,"ebx"),$xa);
428 &pshufb ($xd,&QWP(16,"eax")); # rot8
430 &movdqa (&QWP(16*$di-128,"ebx"),$xd) if ($di!=$dn);
431 &movdqa ($xd_,$xd) if ($di==$dn);
433 &paddd ($xa_,$xb_) if ($i<7); # elsewhere
434 &movdqa ($xa,$xb); # borrow as temporary
437 &pxor ($xd_,$xa_) if ($i<7); # elsewhere
440 ($xa,$xa_)=($xa_,$xa);
441 ($xb,$xb_)=($xb_,$xb);
442 ($xc,$xc_)=($xc_,$xc);
443 ($xd,$xd_)=($xd_,$xd);
446 &function_begin("ChaCha20_ssse3");
447 &set_label("ssse3_shortcut");
449 &test (&DWP(4,"ebp"),1<<11); # test XOP bit
450 &jnz (&label("xop_shortcut"));
453 &mov ($out,&wparam(0));
454 &mov ($inp,&wparam(1));
455 &mov ($len,&wparam(2));
456 &mov ("edx",&wparam(3)); # key
457 &mov ("ebx",&wparam(4)); # counter and nonce
462 &mov (&DWP(512,"esp"),"ebp");
464 &lea ("eax",&DWP(&label("ssse3_data")."-".
465 &label("pic_point"),"eax"));
466 &movdqu ("xmm3",&QWP(0,"ebx")); # counter and nonce
471 &mov (&DWP(512+4,"esp"),"edx"); # offload pointers
472 &mov (&DWP(512+8,"esp"),"ebx");
473 &sub ($len,64*4); # bias len
474 &lea ("ebp",&DWP(256+128,"esp")); # size optimization
476 &movdqu ("xmm7",&QWP(0,"edx")); # key
477 &pshufd ("xmm0","xmm3",0x00);
478 &pshufd ("xmm1","xmm3",0x55);
479 &pshufd ("xmm2","xmm3",0xaa);
480 &pshufd ("xmm3","xmm3",0xff);
481 &paddd ("xmm0",&QWP(16*3,"eax")); # fix counters
482 &pshufd ("xmm4","xmm7",0x00);
483 &pshufd ("xmm5","xmm7",0x55);
484 &psubd ("xmm0",&QWP(16*4,"eax"));
485 &pshufd ("xmm6","xmm7",0xaa);
486 &pshufd ("xmm7","xmm7",0xff);
487 &movdqa (&QWP(16*12-128,"ebp"),"xmm0");
488 &movdqa (&QWP(16*13-128,"ebp"),"xmm1");
489 &movdqa (&QWP(16*14-128,"ebp"),"xmm2");
490 &movdqa (&QWP(16*15-128,"ebp"),"xmm3");
491 &movdqu ("xmm3",&QWP(16,"edx")); # key
492 &movdqa (&QWP(16*4-128,"ebp"),"xmm4");
493 &movdqa (&QWP(16*5-128,"ebp"),"xmm5");
494 &movdqa (&QWP(16*6-128,"ebp"),"xmm6");
495 &movdqa (&QWP(16*7-128,"ebp"),"xmm7");
496 &movdqa ("xmm7",&QWP(16*2,"eax")); # sigma
497 &lea ("ebx",&DWP(128,"esp")); # size optimization
499 &pshufd ("xmm0","xmm3",0x00);
500 &pshufd ("xmm1","xmm3",0x55);
501 &pshufd ("xmm2","xmm3",0xaa);
502 &pshufd ("xmm3","xmm3",0xff);
503 &pshufd ("xmm4","xmm7",0x00);
504 &pshufd ("xmm5","xmm7",0x55);
505 &pshufd ("xmm6","xmm7",0xaa);
506 &pshufd ("xmm7","xmm7",0xff);
507 &movdqa (&QWP(16*8-128,"ebp"),"xmm0");
508 &movdqa (&QWP(16*9-128,"ebp"),"xmm1");
509 &movdqa (&QWP(16*10-128,"ebp"),"xmm2");
510 &movdqa (&QWP(16*11-128,"ebp"),"xmm3");
511 &movdqa (&QWP(16*0-128,"ebp"),"xmm4");
512 &movdqa (&QWP(16*1-128,"ebp"),"xmm5");
513 &movdqa (&QWP(16*2-128,"ebp"),"xmm6");
514 &movdqa (&QWP(16*3-128,"ebp"),"xmm7");
516 &lea ($inp,&DWP(128,$inp)); # size optimization
517 &lea ($out,&DWP(128,$out)); # size optimization
518 &jmp (&label("outer_loop"));
520 &set_label("outer_loop",16);
521 #&movdqa ("xmm0",&QWP(16*0-128,"ebp")); # copy key material
522 &movdqa ("xmm1",&QWP(16*1-128,"ebp"));
523 &movdqa ("xmm2",&QWP(16*2-128,"ebp"));
524 &movdqa ("xmm3",&QWP(16*3-128,"ebp"));
525 #&movdqa ("xmm4",&QWP(16*4-128,"ebp"));
526 &movdqa ("xmm5",&QWP(16*5-128,"ebp"));
527 &movdqa ("xmm6",&QWP(16*6-128,"ebp"));
528 &movdqa ("xmm7",&QWP(16*7-128,"ebp"));
529 #&movdqa (&QWP(16*0-128,"ebx"),"xmm0");
530 &movdqa (&QWP(16*1-128,"ebx"),"xmm1");
531 &movdqa (&QWP(16*2-128,"ebx"),"xmm2");
532 &movdqa (&QWP(16*3-128,"ebx"),"xmm3");
533 #&movdqa (&QWP(16*4-128,"ebx"),"xmm4");
534 &movdqa (&QWP(16*5-128,"ebx"),"xmm5");
535 &movdqa (&QWP(16*6-128,"ebx"),"xmm6");
536 &movdqa (&QWP(16*7-128,"ebx"),"xmm7");
537 #&movdqa ("xmm0",&QWP(16*8-128,"ebp"));
538 #&movdqa ("xmm1",&QWP(16*9-128,"ebp"));
539 &movdqa ("xmm2",&QWP(16*10-128,"ebp"));
540 &movdqa ("xmm3",&QWP(16*11-128,"ebp"));
541 &movdqa ("xmm4",&QWP(16*12-128,"ebp"));
542 &movdqa ("xmm5",&QWP(16*13-128,"ebp"));
543 &movdqa ("xmm6",&QWP(16*14-128,"ebp"));
544 &movdqa ("xmm7",&QWP(16*15-128,"ebp"));
545 &paddd ("xmm4",&QWP(16*4,"eax")); # counter value
546 #&movdqa (&QWP(16*8-128,"ebx"),"xmm0");
547 #&movdqa (&QWP(16*9-128,"ebx"),"xmm1");
548 &movdqa (&QWP(16*10-128,"ebx"),"xmm2");
549 &movdqa (&QWP(16*11-128,"ebx"),"xmm3");
550 &movdqa (&QWP(16*12-128,"ebx"),"xmm4");
551 &movdqa (&QWP(16*13-128,"ebx"),"xmm5");
552 &movdqa (&QWP(16*14-128,"ebx"),"xmm6");
553 &movdqa (&QWP(16*15-128,"ebx"),"xmm7");
554 &movdqa (&QWP(16*12-128,"ebp"),"xmm4"); # save counter value
556 &movdqa ($xa, &QWP(16*0-128,"ebp"));
557 &movdqa ($xd, "xmm4");
558 &movdqa ($xb_,&QWP(16*4-128,"ebp"));
559 &movdqa ($xc, &QWP(16*8-128,"ebp"));
560 &movdqa ($xc_,&QWP(16*9-128,"ebp"));
562 &mov ("edx",10); # loop counter
565 &set_label("loop",16);
566 &paddd ($xa,$xb_); # elsewhere
568 &pxor ($xd,$xa); # elsewhere
569 &QUARTERROUND_SSSE3(0, 4, 8, 12, 0);
570 &QUARTERROUND_SSSE3(1, 5, 9, 13, 1);
571 &QUARTERROUND_SSSE3(2, 6,10, 14, 2);
572 &QUARTERROUND_SSSE3(3, 7,11, 15, 3);
573 &QUARTERROUND_SSSE3(0, 5,10, 15, 4);
574 &QUARTERROUND_SSSE3(1, 6,11, 12, 5);
575 &QUARTERROUND_SSSE3(2, 7, 8, 13, 6);
576 &QUARTERROUND_SSSE3(3, 4, 9, 14, 7);
578 &jnz (&label("loop"));
580 &movdqa (&QWP(16*4-128,"ebx"),$xb_);
581 &movdqa (&QWP(16*8-128,"ebx"),$xc);
582 &movdqa (&QWP(16*9-128,"ebx"),$xc_);
583 &movdqa (&QWP(16*12-128,"ebx"),$xd);
584 &movdqa (&QWP(16*14-128,"ebx"),$xd_);
586 my ($xa0,$xa1,$xa2,$xa3,$xt0,$xt1,$xt2,$xt3)=map("xmm$_",(0..7));
588 #&movdqa ($xa0,&QWP(16*0-128,"ebx")); # it's there
589 &movdqa ($xa1,&QWP(16*1-128,"ebx"));
590 &movdqa ($xa2,&QWP(16*2-128,"ebx"));
591 &movdqa ($xa3,&QWP(16*3-128,"ebx"));
593 for($i=0;$i<256;$i+=64) {
594 &paddd ($xa0,&QWP($i+16*0-128,"ebp")); # accumulate key material
595 &paddd ($xa1,&QWP($i+16*1-128,"ebp"));
596 &paddd ($xa2,&QWP($i+16*2-128,"ebp"));
597 &paddd ($xa3,&QWP($i+16*3-128,"ebp"));
599 &movdqa ($xt2,$xa0); # "de-interlace" data
600 &punpckldq ($xa0,$xa1);
602 &punpckldq ($xa2,$xa3);
603 &punpckhdq ($xt2,$xa1);
604 &punpckhdq ($xt3,$xa3);
606 &punpcklqdq ($xa0,$xa2); # "a0"
608 &punpcklqdq ($xt2,$xt3); # "a2"
609 &punpckhqdq ($xa1,$xa2); # "a1"
610 &punpckhqdq ($xa3,$xt3); # "a3"
612 #($xa2,$xt2)=($xt2,$xa2);
614 &movdqu ($xt0,&QWP(64*0-128,$inp)); # load input
615 &movdqu ($xt1,&QWP(64*1-128,$inp));
616 &movdqu ($xa2,&QWP(64*2-128,$inp));
617 &movdqu ($xt3,&QWP(64*3-128,$inp));
618 &lea ($inp,&QWP($i<192?16:(64*4-16*3),$inp));
620 &movdqa ($xa0,&QWP($i+16*4-128,"ebx")) if ($i<192);
622 &movdqa ($xa1,&QWP($i+16*5-128,"ebx")) if ($i<192);
624 &movdqa ($xa2,&QWP($i+16*6-128,"ebx")) if ($i<192);
626 &movdqa ($xa3,&QWP($i+16*7-128,"ebx")) if ($i<192);
627 &movdqu (&QWP(64*0-128,$out),$xt0); # store output
628 &movdqu (&QWP(64*1-128,$out),$xt1);
629 &movdqu (&QWP(64*2-128,$out),$xt2);
630 &movdqu (&QWP(64*3-128,$out),$xt3);
631 &lea ($out,&QWP($i<192?16:(64*4-16*3),$out));
634 &jnc (&label("outer_loop"));
637 &jz (&label("done"));
639 &mov ("ebx",&DWP(512+8,"esp")); # restore pointers
640 &lea ($inp,&DWP(-128,$inp));
641 &mov ("edx",&DWP(512+4,"esp"));
642 &lea ($out,&DWP(-128,$out));
644 &movd ("xmm2",&DWP(16*12-128,"ebp")); # counter value
645 &movdqu ("xmm3",&QWP(0,"ebx"));
646 &paddd ("xmm2",&QWP(16*6,"eax")); # +four
647 &pand ("xmm3",&QWP(16*7,"eax"));
648 &por ("xmm3","xmm2"); # counter value
650 my ($a,$b,$c,$d,$t,$t1,$rot16,$rot24)=map("xmm$_",(0..7));
652 sub SSSE3ROUND { # critical path is 20 "SIMD ticks" per round
677 &movdqa ($a,&QWP(16*2,"eax")); # sigma
678 &movdqu ($b,&QWP(0,"edx"));
679 &movdqu ($c,&QWP(16,"edx"));
680 #&movdqu ($d,&QWP(0,"ebx")); # already loaded
681 &movdqa ($rot16,&QWP(0,"eax"));
682 &movdqa ($rot24,&QWP(16,"eax"));
683 &mov (&DWP(16*3,"esp"),"ebp");
685 &movdqa (&QWP(16*0,"esp"),$a);
686 &movdqa (&QWP(16*1,"esp"),$b);
687 &movdqa (&QWP(16*2,"esp"),$c);
688 &movdqa (&QWP(16*3,"esp"),$d);
690 &jmp (&label("loop1x"));
692 &set_label("outer1x",16);
693 &movdqa ($d,&QWP(16*5,"eax")); # one
694 &movdqa ($a,&QWP(16*0,"esp"));
695 &movdqa ($b,&QWP(16*1,"esp"));
696 &movdqa ($c,&QWP(16*2,"esp"));
697 &paddd ($d,&QWP(16*3,"esp"));
699 &movdqa (&QWP(16*3,"esp"),$d);
700 &jmp (&label("loop1x"));
702 &set_label("loop1x",16);
704 &pshufd ($c,$c,0b01001110);
705 &pshufd ($b,$b,0b00111001);
706 &pshufd ($d,$d,0b10010011);
710 &pshufd ($c,$c,0b01001110);
711 &pshufd ($b,$b,0b10010011);
712 &pshufd ($d,$d,0b00111001);
715 &jnz (&label("loop1x"));
717 &paddd ($a,&QWP(16*0,"esp"));
718 &paddd ($b,&QWP(16*1,"esp"));
719 &paddd ($c,&QWP(16*2,"esp"));
720 &paddd ($d,&QWP(16*3,"esp"));
723 &jb (&label("tail"));
725 &movdqu ($t,&QWP(16*0,$inp));
726 &movdqu ($t1,&QWP(16*1,$inp));
727 &pxor ($a,$t); # xor with input
728 &movdqu ($t,&QWP(16*2,$inp));
730 &movdqu ($t1,&QWP(16*3,$inp));
733 &lea ($inp,&DWP(16*4,$inp)); # inp+=64
735 &movdqu (&QWP(16*0,$out),$a); # write output
736 &movdqu (&QWP(16*1,$out),$b);
737 &movdqu (&QWP(16*2,$out),$c);
738 &movdqu (&QWP(16*3,$out),$d);
739 &lea ($out,&DWP(16*4,$out)); # inp+=64
742 &jnz (&label("outer1x"));
744 &jmp (&label("done"));
747 &movdqa (&QWP(16*0,"esp"),$a);
748 &movdqa (&QWP(16*1,"esp"),$b);
749 &movdqa (&QWP(16*2,"esp"),$c);
750 &movdqa (&QWP(16*3,"esp"),$d);
756 &set_label("tail_loop");
757 &movb ("al",&BP(0,"esp","ebp"));
758 &movb ("dl",&BP(0,$inp,"ebp"));
759 &lea ("ebp",&DWP(1,"ebp"));
761 &movb (&BP(-1,$out,"ebp"),"al");
763 &jnz (&label("tail_loop"));
766 &mov ("esp",&DWP(512,"esp"));
767 &function_end("ChaCha20_ssse3");
770 &set_label("ssse3_data");
771 &data_byte(0x2,0x3,0x0,0x1, 0x6,0x7,0x4,0x5, 0xa,0xb,0x8,0x9, 0xe,0xf,0xc,0xd);
772 &data_byte(0x3,0x0,0x1,0x2, 0x7,0x4,0x5,0x6, 0xb,0x8,0x9,0xa, 0xf,0xc,0xd,0xe);
773 &data_word(0x61707865,0x3320646e,0x79622d32,0x6b206574);
778 &data_word(0,-1,-1,-1);
781 &asciz ("ChaCha20 for x86, CRYPTOGAMS by <appro\@openssl.org>");
784 my ($xa,$xa_,$xb,$xb_,$xc,$xc_,$xd,$xd_)=map("xmm$_",(0..7));
785 my ($out,$inp,$len)=("edi","esi","ecx");
787 sub QUARTERROUND_XOP {
788 my ($ai,$bi,$ci,$di,$i)=@_;
789 my ($an,$bn,$cn,$dn)=map(($_&~3)+(($_+1)&3),($ai,$bi,$ci,$di)); # next
790 my ($ap,$bp,$cp,$dp)=map(($_&~3)+(($_-1)&3),($ai,$bi,$ci,$di)); # previous
794 # 0 4 8 12 < even round
798 # 0 5 10 15 < odd round
805 ($ap,$bp,$cp,$dp)=map(($_&~3)+(($_-$j--)&3),($ap,$bp,$cp,$dp));
808 ($an,$bn,$cn,$dn)=map(($_&~3)+(($_+$j++)&3),($an,$bn,$cn,$dn));
811 ($ap,$bp,$cp,$dp)=map(($_&~3)+(($_+$j--)&3),($ap,$bp,$cp,$dp));
814 ($an,$bn,$cn,$dn)=map(($_&~3)+(($_-$j++)&3),($an,$bn,$cn,$dn));
817 #&vpaddd ($xa,$xa,$xb); # see elsewhere
818 #&vpxor ($xd,$xd,$xa); # see elsewhere
819 &vmovdqa (&QWP(16*$cp-128,"ebx"),$xc_) if ($ai>0 && $ai<3);
820 &vprotd ($xd,$xd,16);
821 &vmovdqa (&QWP(16*$bp-128,"ebx"),$xb_) if ($i!=0);
822 &vpaddd ($xc,$xc,$xd);
823 &vmovdqa ($xc_,&QWP(16*$cn-128,"ebx")) if ($ai>0 && $ai<3);
824 &vpxor ($xb,$i!=0?$xb:$xb_,$xc);
825 &vmovdqa ($xa_,&QWP(16*$an-128,"ebx"));
826 &vprotd ($xb,$xb,12);
827 &vmovdqa ($xb_,&QWP(16*$bn-128,"ebx")) if ($i<7);
828 &vpaddd ($xa,$xa,$xb);
829 &vmovdqa ($xd_,&QWP(16*$dn-128,"ebx")) if ($di!=$dn);
830 &vpxor ($xd,$xd,$xa);
831 &vpaddd ($xa_,$xa_,$xb_) if ($i<7); # elsewhere
833 &vmovdqa (&QWP(16*$ai-128,"ebx"),$xa);
834 &vpaddd ($xc,$xc,$xd);
835 &vmovdqa (&QWP(16*$di-128,"ebx"),$xd) if ($di!=$dn);
836 &vpxor ($xb,$xb,$xc);
837 &vpxor ($xd_,$di==$dn?$xd:$xd_,$xa_) if ($i<7); # elsewhere
840 ($xa,$xa_)=($xa_,$xa);
841 ($xb,$xb_)=($xb_,$xb);
842 ($xc,$xc_)=($xc_,$xc);
843 ($xd,$xd_)=($xd_,$xd);
846 &function_begin("ChaCha20_xop");
847 &set_label("xop_shortcut");
848 &mov ($out,&wparam(0));
849 &mov ($inp,&wparam(1));
850 &mov ($len,&wparam(2));
851 &mov ("edx",&wparam(3)); # key
852 &mov ("ebx",&wparam(4)); # counter and nonce
858 &mov (&DWP(512,"esp"),"ebp");
860 &lea ("eax",&DWP(&label("ssse3_data")."-".
861 &label("pic_point"),"eax"));
862 &vmovdqu ("xmm3",&QWP(0,"ebx")); # counter and nonce
867 &mov (&DWP(512+4,"esp"),"edx"); # offload pointers
868 &mov (&DWP(512+8,"esp"),"ebx");
869 &sub ($len,64*4); # bias len
870 &lea ("ebp",&DWP(256+128,"esp")); # size optimization
872 &vmovdqu ("xmm7",&QWP(0,"edx")); # key
873 &vpshufd ("xmm0","xmm3",0x00);
874 &vpshufd ("xmm1","xmm3",0x55);
875 &vpshufd ("xmm2","xmm3",0xaa);
876 &vpshufd ("xmm3","xmm3",0xff);
877 &vpaddd ("xmm0","xmm0",&QWP(16*3,"eax")); # fix counters
878 &vpshufd ("xmm4","xmm7",0x00);
879 &vpshufd ("xmm5","xmm7",0x55);
880 &vpsubd ("xmm0","xmm0",&QWP(16*4,"eax"));
881 &vpshufd ("xmm6","xmm7",0xaa);
882 &vpshufd ("xmm7","xmm7",0xff);
883 &vmovdqa (&QWP(16*12-128,"ebp"),"xmm0");
884 &vmovdqa (&QWP(16*13-128,"ebp"),"xmm1");
885 &vmovdqa (&QWP(16*14-128,"ebp"),"xmm2");
886 &vmovdqa (&QWP(16*15-128,"ebp"),"xmm3");
887 &vmovdqu ("xmm3",&QWP(16,"edx")); # key
888 &vmovdqa (&QWP(16*4-128,"ebp"),"xmm4");
889 &vmovdqa (&QWP(16*5-128,"ebp"),"xmm5");
890 &vmovdqa (&QWP(16*6-128,"ebp"),"xmm6");
891 &vmovdqa (&QWP(16*7-128,"ebp"),"xmm7");
892 &vmovdqa ("xmm7",&QWP(16*2,"eax")); # sigma
893 &lea ("ebx",&DWP(128,"esp")); # size optimization
895 &vpshufd ("xmm0","xmm3",0x00);
896 &vpshufd ("xmm1","xmm3",0x55);
897 &vpshufd ("xmm2","xmm3",0xaa);
898 &vpshufd ("xmm3","xmm3",0xff);
899 &vpshufd ("xmm4","xmm7",0x00);
900 &vpshufd ("xmm5","xmm7",0x55);
901 &vpshufd ("xmm6","xmm7",0xaa);
902 &vpshufd ("xmm7","xmm7",0xff);
903 &vmovdqa (&QWP(16*8-128,"ebp"),"xmm0");
904 &vmovdqa (&QWP(16*9-128,"ebp"),"xmm1");
905 &vmovdqa (&QWP(16*10-128,"ebp"),"xmm2");
906 &vmovdqa (&QWP(16*11-128,"ebp"),"xmm3");
907 &vmovdqa (&QWP(16*0-128,"ebp"),"xmm4");
908 &vmovdqa (&QWP(16*1-128,"ebp"),"xmm5");
909 &vmovdqa (&QWP(16*2-128,"ebp"),"xmm6");
910 &vmovdqa (&QWP(16*3-128,"ebp"),"xmm7");
912 &lea ($inp,&DWP(128,$inp)); # size optimization
913 &lea ($out,&DWP(128,$out)); # size optimization
914 &jmp (&label("outer_loop"));
916 &set_label("outer_loop",32);
917 #&vmovdqa ("xmm0",&QWP(16*0-128,"ebp")); # copy key material
918 &vmovdqa ("xmm1",&QWP(16*1-128,"ebp"));
919 &vmovdqa ("xmm2",&QWP(16*2-128,"ebp"));
920 &vmovdqa ("xmm3",&QWP(16*3-128,"ebp"));
921 #&vmovdqa ("xmm4",&QWP(16*4-128,"ebp"));
922 &vmovdqa ("xmm5",&QWP(16*5-128,"ebp"));
923 &vmovdqa ("xmm6",&QWP(16*6-128,"ebp"));
924 &vmovdqa ("xmm7",&QWP(16*7-128,"ebp"));
925 #&vmovdqa (&QWP(16*0-128,"ebx"),"xmm0");
926 &vmovdqa (&QWP(16*1-128,"ebx"),"xmm1");
927 &vmovdqa (&QWP(16*2-128,"ebx"),"xmm2");
928 &vmovdqa (&QWP(16*3-128,"ebx"),"xmm3");
929 #&vmovdqa (&QWP(16*4-128,"ebx"),"xmm4");
930 &vmovdqa (&QWP(16*5-128,"ebx"),"xmm5");
931 &vmovdqa (&QWP(16*6-128,"ebx"),"xmm6");
932 &vmovdqa (&QWP(16*7-128,"ebx"),"xmm7");
933 #&vmovdqa ("xmm0",&QWP(16*8-128,"ebp"));
934 #&vmovdqa ("xmm1",&QWP(16*9-128,"ebp"));
935 &vmovdqa ("xmm2",&QWP(16*10-128,"ebp"));
936 &vmovdqa ("xmm3",&QWP(16*11-128,"ebp"));
937 &vmovdqa ("xmm4",&QWP(16*12-128,"ebp"));
938 &vmovdqa ("xmm5",&QWP(16*13-128,"ebp"));
939 &vmovdqa ("xmm6",&QWP(16*14-128,"ebp"));
940 &vmovdqa ("xmm7",&QWP(16*15-128,"ebp"));
941 &vpaddd ("xmm4","xmm4",&QWP(16*4,"eax")); # counter value
942 #&vmovdqa (&QWP(16*8-128,"ebx"),"xmm0");
943 #&vmovdqa (&QWP(16*9-128,"ebx"),"xmm1");
944 &vmovdqa (&QWP(16*10-128,"ebx"),"xmm2");
945 &vmovdqa (&QWP(16*11-128,"ebx"),"xmm3");
946 &vmovdqa (&QWP(16*12-128,"ebx"),"xmm4");
947 &vmovdqa (&QWP(16*13-128,"ebx"),"xmm5");
948 &vmovdqa (&QWP(16*14-128,"ebx"),"xmm6");
949 &vmovdqa (&QWP(16*15-128,"ebx"),"xmm7");
950 &vmovdqa (&QWP(16*12-128,"ebp"),"xmm4"); # save counter value
952 &vmovdqa ($xa, &QWP(16*0-128,"ebp"));
953 &vmovdqa ($xd, "xmm4");
954 &vmovdqa ($xb_,&QWP(16*4-128,"ebp"));
955 &vmovdqa ($xc, &QWP(16*8-128,"ebp"));
956 &vmovdqa ($xc_,&QWP(16*9-128,"ebp"));
958 &mov ("edx",10); # loop counter
961 &set_label("loop",32);
962 &vpaddd ($xa,$xa,$xb_); # elsewhere
963 &vpxor ($xd,$xd,$xa); # elsewhere
964 &QUARTERROUND_XOP(0, 4, 8, 12, 0);
965 &QUARTERROUND_XOP(1, 5, 9, 13, 1);
966 &QUARTERROUND_XOP(2, 6,10, 14, 2);
967 &QUARTERROUND_XOP(3, 7,11, 15, 3);
968 &QUARTERROUND_XOP(0, 5,10, 15, 4);
969 &QUARTERROUND_XOP(1, 6,11, 12, 5);
970 &QUARTERROUND_XOP(2, 7, 8, 13, 6);
971 &QUARTERROUND_XOP(3, 4, 9, 14, 7);
973 &jnz (&label("loop"));
975 &vmovdqa (&QWP(16*4-128,"ebx"),$xb_);
976 &vmovdqa (&QWP(16*8-128,"ebx"),$xc);
977 &vmovdqa (&QWP(16*9-128,"ebx"),$xc_);
978 &vmovdqa (&QWP(16*12-128,"ebx"),$xd);
979 &vmovdqa (&QWP(16*14-128,"ebx"),$xd_);
981 my ($xa0,$xa1,$xa2,$xa3,$xt0,$xt1,$xt2,$xt3)=map("xmm$_",(0..7));
983 #&vmovdqa ($xa0,&QWP(16*0-128,"ebx")); # it's there
984 &vmovdqa ($xa1,&QWP(16*1-128,"ebx"));
985 &vmovdqa ($xa2,&QWP(16*2-128,"ebx"));
986 &vmovdqa ($xa3,&QWP(16*3-128,"ebx"));
988 for($i=0;$i<256;$i+=64) {
989 &vpaddd ($xa0,$xa0,&QWP($i+16*0-128,"ebp")); # accumulate key material
990 &vpaddd ($xa1,$xa1,&QWP($i+16*1-128,"ebp"));
991 &vpaddd ($xa2,$xa2,&QWP($i+16*2-128,"ebp"));
992 &vpaddd ($xa3,$xa3,&QWP($i+16*3-128,"ebp"));
994 &vpunpckldq ($xt2,$xa0,$xa1); # "de-interlace" data
995 &vpunpckldq ($xt3,$xa2,$xa3);
996 &vpunpckhdq ($xa0,$xa0,$xa1);
997 &vpunpckhdq ($xa2,$xa2,$xa3);
998 &vpunpcklqdq ($xa1,$xt2,$xt3); # "a0"
999 &vpunpckhqdq ($xt2,$xt2,$xt3); # "a1"
1000 &vpunpcklqdq ($xt3,$xa0,$xa2); # "a2"
1001 &vpunpckhqdq ($xa3,$xa0,$xa2); # "a3"
1003 &vpxor ($xt0,$xa1,&QWP(64*0-128,$inp));
1004 &vpxor ($xt1,$xt2,&QWP(64*1-128,$inp));
1005 &vpxor ($xt2,$xt3,&QWP(64*2-128,$inp));
1006 &vpxor ($xt3,$xa3,&QWP(64*3-128,$inp));
1007 &lea ($inp,&QWP($i<192?16:(64*4-16*3),$inp));
1008 &vmovdqa ($xa0,&QWP($i+16*4-128,"ebx")) if ($i<192);
1009 &vmovdqa ($xa1,&QWP($i+16*5-128,"ebx")) if ($i<192);
1010 &vmovdqa ($xa2,&QWP($i+16*6-128,"ebx")) if ($i<192);
1011 &vmovdqa ($xa3,&QWP($i+16*7-128,"ebx")) if ($i<192);
1012 &vmovdqu (&QWP(64*0-128,$out),$xt0); # store output
1013 &vmovdqu (&QWP(64*1-128,$out),$xt1);
1014 &vmovdqu (&QWP(64*2-128,$out),$xt2);
1015 &vmovdqu (&QWP(64*3-128,$out),$xt3);
1016 &lea ($out,&QWP($i<192?16:(64*4-16*3),$out));
1019 &jnc (&label("outer_loop"));
1022 &jz (&label("done"));
1024 &mov ("ebx",&DWP(512+8,"esp")); # restore pointers
1025 &lea ($inp,&DWP(-128,$inp));
1026 &mov ("edx",&DWP(512+4,"esp"));
1027 &lea ($out,&DWP(-128,$out));
1029 &vmovd ("xmm2",&DWP(16*12-128,"ebp")); # counter value
1030 &vmovdqu ("xmm3",&QWP(0,"ebx"));
1031 &vpaddd ("xmm2","xmm2",&QWP(16*6,"eax"));# +four
1032 &vpand ("xmm3","xmm3",&QWP(16*7,"eax"));
1033 &vpor ("xmm3","xmm3","xmm2"); # counter value
1035 my ($a,$b,$c,$d,$t,$t1,$rot16,$rot24)=map("xmm$_",(0..7));
1056 &vmovdqa ($a,&QWP(16*2,"eax")); # sigma
1057 &vmovdqu ($b,&QWP(0,"edx"));
1058 &vmovdqu ($c,&QWP(16,"edx"));
1059 #&vmovdqu ($d,&QWP(0,"ebx")); # already loaded
1060 &vmovdqa ($rot16,&QWP(0,"eax"));
1061 &vmovdqa ($rot24,&QWP(16,"eax"));
1062 &mov (&DWP(16*3,"esp"),"ebp");
1064 &vmovdqa (&QWP(16*0,"esp"),$a);
1065 &vmovdqa (&QWP(16*1,"esp"),$b);
1066 &vmovdqa (&QWP(16*2,"esp"),$c);
1067 &vmovdqa (&QWP(16*3,"esp"),$d);
1069 &jmp (&label("loop1x"));
1071 &set_label("outer1x",16);
1072 &vmovdqa ($d,&QWP(16*5,"eax")); # one
1073 &vmovdqa ($a,&QWP(16*0,"esp"));
1074 &vmovdqa ($b,&QWP(16*1,"esp"));
1075 &vmovdqa ($c,&QWP(16*2,"esp"));
1076 &vpaddd ($d,$d,&QWP(16*3,"esp"));
1078 &vmovdqa (&QWP(16*3,"esp"),$d);
1079 &jmp (&label("loop1x"));
1081 &set_label("loop1x",16);
1083 &vpshufd ($c,$c,0b01001110);
1084 &vpshufd ($b,$b,0b00111001);
1085 &vpshufd ($d,$d,0b10010011);
1088 &vpshufd ($c,$c,0b01001110);
1089 &vpshufd ($b,$b,0b10010011);
1090 &vpshufd ($d,$d,0b00111001);
1093 &jnz (&label("loop1x"));
1095 &vpaddd ($a,$a,&QWP(16*0,"esp"));
1096 &vpaddd ($b,$b,&QWP(16*1,"esp"));
1097 &vpaddd ($c,$c,&QWP(16*2,"esp"));
1098 &vpaddd ($d,$d,&QWP(16*3,"esp"));
1101 &jb (&label("tail"));
1103 &vpxor ($a,$a,&QWP(16*0,$inp)); # xor with input
1104 &vpxor ($b,$b,&QWP(16*1,$inp));
1105 &vpxor ($c,$c,&QWP(16*2,$inp));
1106 &vpxor ($d,$d,&QWP(16*3,$inp));
1107 &lea ($inp,&DWP(16*4,$inp)); # inp+=64
1109 &vmovdqu (&QWP(16*0,$out),$a); # write output
1110 &vmovdqu (&QWP(16*1,$out),$b);
1111 &vmovdqu (&QWP(16*2,$out),$c);
1112 &vmovdqu (&QWP(16*3,$out),$d);
1113 &lea ($out,&DWP(16*4,$out)); # inp+=64
1116 &jnz (&label("outer1x"));
1118 &jmp (&label("done"));
1121 &vmovdqa (&QWP(16*0,"esp"),$a);
1122 &vmovdqa (&QWP(16*1,"esp"),$b);
1123 &vmovdqa (&QWP(16*2,"esp"),$c);
1124 &vmovdqa (&QWP(16*3,"esp"),$d);
1130 &set_label("tail_loop");
1131 &movb ("al",&BP(0,"esp","ebp"));
1132 &movb ("dl",&BP(0,$inp,"ebp"));
1133 &lea ("ebp",&DWP(1,"ebp"));
1135 &movb (&BP(-1,$out,"ebp"),"al");
1137 &jnz (&label("tail_loop"));
1141 &mov ("esp",&DWP(512,"esp"));
1142 &function_end("ChaCha20_xop");