3 ######################################################################
4 ## Constant-time SSSE3 AES core implementation.
7 ## By Mike Hamburg (Stanford University), 2009
10 ## For details see http://shiftleft.org/papers/vector_aes/ and
11 ## http://crypto.stanford.edu/vpaes/.
13 ######################################################################
16 # Port vpaes-x86_64.pl as 32-bit "almost" drop-in replacement for
17 # aes-586.pl. "Almost" refers to the fact that AES_cbc_encrypt
18 # doesn't handle partial vectors (doesn't have to if called from
19 # EVP only). "Drop-in" implies that this module doesn't share key
20 # schedule structure with the original nor does it make assumption
21 # about its alignment...
23 # Performance summary. aes-586.pl column lists large-block CBC
24 # encrypt/decrypt/with-hyper-threading-off(*) results in cycles per
25 # byte processed with 128-bit key, and vpaes-x86.pl column - [also
26 # large-block CBC] encrypt/decrypt.
28 # aes-586.pl vpaes-x86.pl
30 # Core 2(**) 28.1/41.4/18.3 21.9/25.2(***)
31 # Nehalem 27.9/40.4/18.1 10.2/11.9
32 # Atom 70.7/92.1/60.1 61.1/81.0(***)
34 # (*) "Hyper-threading" in the context refers rather to cache shared
35 # among multiple cores, than to specifically Intel HTT. As vast
36 # majority of contemporary cores share cache, slower code path
37 # is common place. In other words "with-hyper-threading-off"
38 # results are presented mostly for reference purposes.
40 # (**) "Core 2" refers to initial 65nm design, a.k.a. Conroe.
42 # (***) Less impressive improvement on Core 2 and Atom is due to slow
43 # pshufb, yet it's respectable +28%/64% improvement on Core 2
44 # and +15% on Atom (as implied, over "hyper-threading-safe"
49 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
50 push(@INC,"${dir}","${dir}../../perlasm");
53 &asm_init($ARGV[0],"vpaes-x86.pl",$x86only = $ARGV[$#ARGV] eq "386");
57 my ($round, $base, $magic, $key, $const, $inp, $out)=
58 ("eax", "ebx", "ecx", "edx","ebp", "esi","edi");
60 &static_label("_vpaes_consts");
61 &static_label("_vpaes_schedule_low_round");
63 &set_label("_vpaes_consts",64);
64 $k_inv=-0x30; # inv, inva
65 &data_word(0x0D080180,0x0E05060F,0x0A0B0C02,0x04070309);
66 &data_word(0x0F0B0780,0x01040A06,0x02050809,0x030D0E0C);
69 &data_word(0x0F0F0F0F,0x0F0F0F0F,0x0F0F0F0F,0x0F0F0F0F);
71 $k_ipt=0x00; # input transform (lo, hi)
72 &data_word(0x5A2A7000,0xC2B2E898,0x52227808,0xCABAE090);
73 &data_word(0x317C4D00,0x4C01307D,0xB0FDCC81,0xCD80B1FC);
75 $k_sb1=0x20; # sb1u, sb1t
76 &data_word(0xCB503E00,0xB19BE18F,0x142AF544,0xA5DF7A6E);
77 &data_word(0xFAE22300,0x3618D415,0x0D2ED9EF,0x3BF7CCC1);
78 $k_sb2=0x40; # sb2u, sb2t
79 &data_word(0x0B712400,0xE27A93C6,0xBC982FCD,0x5EB7E955);
80 &data_word(0x0AE12900,0x69EB8840,0xAB82234A,0xC2A163C8);
81 $k_sbo=0x60; # sbou, sbot
82 &data_word(0x6FBDC700,0xD0D26D17,0xC502A878,0x15AABF7A);
83 &data_word(0x5FBB6A00,0xCFE474A5,0x412B35FA,0x8E1E90D1);
85 $k_mc_forward=0x80; # mc_forward
86 &data_word(0x00030201,0x04070605,0x080B0A09,0x0C0F0E0D);
87 &data_word(0x04070605,0x080B0A09,0x0C0F0E0D,0x00030201);
88 &data_word(0x080B0A09,0x0C0F0E0D,0x00030201,0x04070605);
89 &data_word(0x0C0F0E0D,0x00030201,0x04070605,0x080B0A09);
91 $k_mc_backward=0xc0; # mc_backward
92 &data_word(0x02010003,0x06050407,0x0A09080B,0x0E0D0C0F);
93 &data_word(0x0E0D0C0F,0x02010003,0x06050407,0x0A09080B);
94 &data_word(0x0A09080B,0x0E0D0C0F,0x02010003,0x06050407);
95 &data_word(0x06050407,0x0A09080B,0x0E0D0C0F,0x02010003);
98 &data_word(0x03020100,0x07060504,0x0B0A0908,0x0F0E0D0C);
99 &data_word(0x0F0A0500,0x030E0904,0x07020D08,0x0B06010C);
100 &data_word(0x0B020900,0x0F060D04,0x030A0108,0x070E050C);
101 &data_word(0x070A0D00,0x0B0E0104,0x0F020508,0x0306090C);
103 $k_rcon=0x140; # rcon
104 &data_word(0xAF9DEEB6,0x1F8391B9,0x4D7C7D81,0x702A9808);
106 $k_s63=0x150; # s63: all equal to 0x63 transformed
107 &data_word(0x5B5B5B5B,0x5B5B5B5B,0x5B5B5B5B,0x5B5B5B5B);
109 $k_opt=0x160; # output transform
110 &data_word(0xD6B66000,0xFF9F4929,0xDEBE6808,0xF7974121);
111 &data_word(0x50BCEC00,0x01EDBD51,0xB05C0CE0,0xE10D5DB1);
113 $k_deskew=0x180; # deskew tables: inverts the sbox's "skew"
114 &data_word(0x47A4E300,0x07E4A340,0x5DBEF91A,0x1DFEB95A);
115 &data_word(0x83EA6900,0x5F36B5DC,0xF49D1E77,0x2841C2AB);
118 ## Key schedule constants
120 $k_dksd=0x1a0; # decryption key schedule: invskew x*D
121 &data_word(0xA3E44700,0xFEB91A5D,0x5A1DBEF9,0x0740E3A4);
122 &data_word(0xB5368300,0x41C277F4,0xAB289D1E,0x5FDC69EA);
123 $k_dksb=0x1c0; # decryption key schedule: invskew x*B
124 &data_word(0x8550D500,0x9A4FCA1F,0x1CC94C99,0x03D65386);
125 &data_word(0xB6FC4A00,0x115BEDA7,0x7E3482C8,0xD993256F);
126 $k_dkse=0x1e0; # decryption key schedule: invskew x*E + 0x63
127 &data_word(0x1FC9D600,0xD5031CCA,0x994F5086,0x53859A4C);
128 &data_word(0x4FDC7BE8,0xA2319605,0x20B31487,0xCD5EF96A);
129 $k_dks9=0x200; # decryption key schedule: invskew x*9
130 &data_word(0x7ED9A700,0xB6116FC8,0x82255BFC,0x4AED9334);
131 &data_word(0x27143300,0x45765162,0xE9DAFDCE,0x8BB89FAC);
135 ## Round function constants
137 $k_dipt=0x220; # decryption input transform
138 &data_word(0x0B545F00,0x0F505B04,0x114E451A,0x154A411E);
139 &data_word(0x60056500,0x86E383E6,0xF491F194,0x12771772);
141 $k_dsb9=0x240; # decryption sbox output *9*u, *9*t
142 &data_word(0x9A86D600,0x851C0353,0x4F994CC9,0xCAD51F50);
143 &data_word(0xECD74900,0xC03B1789,0xB2FBA565,0x725E2C9E);
144 $k_dsbd=0x260; # decryption sbox output *D*u, *D*t
145 &data_word(0xE6B1A200,0x7D57CCDF,0x882A4439,0xF56E9B13);
146 &data_word(0x24C6CB00,0x3CE2FAF7,0x15DEEFD3,0x2931180D);
147 $k_dsbb=0x280; # decryption sbox output *B*u, *B*t
148 &data_word(0x96B44200,0xD0226492,0xB0F2D404,0x602646F6);
149 &data_word(0xCD596700,0xC19498A6,0x3255AA6B,0xF3FF0C3E);
150 $k_dsbe=0x2a0; # decryption sbox output *E*u, *E*t
151 &data_word(0x26D4D000,0x46F29296,0x64B4F6B0,0x22426004);
152 &data_word(0xFFAAC100,0x0C55A6CD,0x98593E32,0x9467F36B);
153 $k_dsbo=0x2c0; # decryption sbox final output
154 &data_word(0x7EF94000,0x1387EA53,0xD4943E2D,0xC7AA6DB9);
155 &data_word(0x93441D00,0x12D7560F,0xD8C58E9C,0xCA4B8159);
156 &asciz ("Vector Permutation AES for x86/SSSE3, Mike Hamburg (Stanford University)");
159 &function_begin_B("_vpaes_preheat");
160 &add ($const,&DWP(0,"esp"));
161 &movdqa ("xmm7",&QWP($k_inv,$const));
162 &movdqa ("xmm6",&QWP($k_s0F,$const));
164 &function_end_B("_vpaes_preheat");
169 ## AES-encrypt %xmm0.
173 ## %xmm6-%xmm7 as in _vpaes_preheat
174 ## (%edx) = scheduled keys
177 ## Clobbers %xmm1-%xmm5, %eax, %ebx, %ecx, %edx
180 &function_begin_B("_vpaes_encrypt_core");
182 &mov ($round,&DWP(240,$key));
183 &movdqa ("xmm1","xmm6")
184 &movdqa ("xmm2",&QWP($k_ipt,$const));
185 &pandn ("xmm1","xmm0");
186 &pand ("xmm0","xmm6");
187 &movdqu ("xmm5",&QWP(0,$key));
188 &pshufb ("xmm2","xmm0");
189 &movdqa ("xmm0",&QWP($k_ipt+16,$const));
190 &pxor ("xmm2","xmm5");
193 &pshufb ("xmm0","xmm1");
194 &lea ($base,&DWP($k_mc_backward,$const));
195 &pxor ("xmm0","xmm2");
196 &jmp (&label("enc_entry"));
199 &set_label("enc_loop",16);
200 # middle of middle round
201 &movdqa ("xmm4",&QWP($k_sb1,$const)); # 4 : sb1u
202 &movdqa ("xmm0",&QWP($k_sb1+16,$const));# 0 : sb1t
203 &pshufb ("xmm4","xmm2"); # 4 = sb1u
204 &pshufb ("xmm0","xmm3"); # 0 = sb1t
205 &pxor ("xmm4","xmm5"); # 4 = sb1u + k
206 &movdqa ("xmm5",&QWP($k_sb2,$const)); # 4 : sb2u
207 &pxor ("xmm0","xmm4"); # 0 = A
208 &movdqa ("xmm1",&QWP(-0x40,$base,$magic));# .Lk_mc_forward[]
209 &pshufb ("xmm5","xmm2"); # 4 = sb2u
210 &movdqa ("xmm2",&QWP($k_sb2+16,$const));# 2 : sb2t
211 &movdqa ("xmm4",&QWP(0,$base,$magic)); # .Lk_mc_backward[]
212 &pshufb ("xmm2","xmm3"); # 2 = sb2t
213 &movdqa ("xmm3","xmm0"); # 3 = A
214 &pxor ("xmm2","xmm5"); # 2 = 2A
215 &pshufb ("xmm0","xmm1"); # 0 = B
216 &add ($key,16); # next key
217 &pxor ("xmm0","xmm2"); # 0 = 2A+B
218 &pshufb ("xmm3","xmm4"); # 3 = D
219 &add ($magic,16); # next mc
220 &pxor ("xmm3","xmm0"); # 3 = 2A+B+D
221 &pshufb ("xmm0","xmm1"); # 0 = 2B+C
222 &and ($magic,0x30); # ... mod 4
223 &sub ($round,1); # nr--
224 &pxor ("xmm0","xmm3"); # 0 = 2A+3B+C+D
226 &set_label("enc_entry");
228 &movdqa ("xmm1","xmm6"); # 1 : i
229 &movdqa ("xmm5",&QWP($k_inv+16,$const));# 2 : a/k
230 &pandn ("xmm1","xmm0"); # 1 = i<<4
231 &psrld ("xmm1",4); # 1 = i
232 &pand ("xmm0","xmm6"); # 0 = k
233 &pshufb ("xmm5","xmm0"); # 2 = a/k
234 &movdqa ("xmm3","xmm7"); # 3 : 1/i
235 &pxor ("xmm0","xmm1"); # 0 = j
236 &pshufb ("xmm3","xmm1"); # 3 = 1/i
237 &movdqa ("xmm4","xmm7"); # 4 : 1/j
238 &pxor ("xmm3","xmm5"); # 3 = iak = 1/i + a/k
239 &pshufb ("xmm4","xmm0"); # 4 = 1/j
240 &movdqa ("xmm2","xmm7"); # 2 : 1/iak
241 &pxor ("xmm4","xmm5"); # 4 = jak = 1/j + a/k
242 &pshufb ("xmm2","xmm3"); # 2 = 1/iak
243 &movdqa ("xmm3","xmm7"); # 3 : 1/jak
244 &pxor ("xmm2","xmm0"); # 2 = io
245 &pshufb ("xmm3","xmm4"); # 3 = 1/jak
246 &movdqu ("xmm5",&QWP(0,$key));
247 &pxor ("xmm3","xmm1"); # 3 = jo
248 &jnz (&label("enc_loop"));
250 # middle of last round
251 &movdqa ("xmm4",&QWP($k_sbo,$const)); # 3 : sbou .Lk_sbo
252 &movdqa ("xmm0",&QWP($k_sbo+16,$const));# 3 : sbot .Lk_sbo+16
253 &pshufb ("xmm4","xmm2"); # 4 = sbou
254 &pxor ("xmm4","xmm5"); # 4 = sb1u + k
255 &pshufb ("xmm0","xmm3"); # 0 = sb1t
256 &movdqa ("xmm1",&QWP(0x40,$base,$magic));# .Lk_sr[]
257 &pxor ("xmm0","xmm4"); # 0 = A
258 &pshufb ("xmm0","xmm1");
260 &function_end_B("_vpaes_encrypt_core");
265 ## Same API as encryption core.
267 &function_begin_B("_vpaes_decrypt_core");
268 &lea ($base,&DWP($k_dsbd,$const));
269 &mov ($round,&DWP(240,$key));
270 &movdqa ("xmm1","xmm6");
271 &movdqa ("xmm2",&QWP($k_dipt-$k_dsbd,$base));
272 &pandn ("xmm1","xmm0");
273 &mov ($magic,$round);
275 &movdqu ("xmm5",&QWP(0,$key));
277 &pand ("xmm0","xmm6");
278 &pshufb ("xmm2","xmm0");
279 &movdqa ("xmm0",&QWP($k_dipt-$k_dsbd+16,$base));
281 &pshufb ("xmm0","xmm1");
283 &pxor ("xmm2","xmm5");
284 &movdqa ("xmm5",&QWP($k_mc_forward+48,$const));
285 &pxor ("xmm0","xmm2");
287 &lea ($magic,&DWP($k_sr-$k_dsbd,$base,$magic));
288 &jmp (&label("dec_entry"));
290 &set_label("dec_loop",16);
292 ## Inverse mix columns
294 &movdqa ("xmm4",&QWP(-0x20,$base)); # 4 : sb9u
295 &movdqa ("xmm1",&QWP(-0x10,$base)); # 0 : sb9t
296 &pshufb ("xmm4","xmm2"); # 4 = sb9u
297 &pshufb ("xmm1","xmm3"); # 0 = sb9t
298 &pxor ("xmm4","xmm0");
299 &add ($key,16); # next round key
300 &pxor ("xmm1","xmm4"); # 0 = ch
302 &movdqa ("xmm4",&QWP(0,$base)); # 4 : sbdu
303 &pshufb ("xmm1","xmm5"); # MC ch
304 &pshufb ("xmm4","xmm2"); # 4 = sbdu
305 &movdqa ("xmm0",&QWP(0x10,$base)); # 0 : sbdt
306 &pxor ("xmm4","xmm1"); # 4 = ch
307 &pshufb ("xmm0","xmm3"); # 0 = sbdt
308 &sub ($round,1); # nr--
309 &pxor ("xmm0","xmm4"); # 0 = ch
311 &movdqa ("xmm4",&QWP(0x20,$base)); # 4 : sbbu
312 &pshufb ("xmm0","xmm5"); # MC ch
313 &movdqa ("xmm1",&QWP(0x30,$base)); # 0 : sbbt
314 &pshufb ("xmm4","xmm2"); # 4 = sbbu
315 &pshufb ("xmm1","xmm3"); # 0 = sbbt
316 &pxor ("xmm4","xmm0"); # 4 = ch
317 &pxor ("xmm1","xmm4"); # 0 = ch
319 &movdqa ("xmm4",&QWP(0x40,$base)); # 4 : sbeu
320 &pshufb ("xmm1","xmm5"); # MC ch
321 &movdqa ("xmm0",&QWP(0x50,$base)); # 0 : sbet
322 &pshufb ("xmm4","xmm2"); # 4 = sbeu
323 &pshufb ("xmm0","xmm3"); # 0 = sbet
324 &palignr("xmm5","xmm5",12);
325 &pxor ("xmm4","xmm1"); # 4 = ch
326 &pxor ("xmm0","xmm4"); # 0 = ch
328 &set_label("dec_entry");
330 &movdqa ("xmm1","xmm6"); # 1 : i
331 &pandn ("xmm1","xmm0"); # 1 = i<<4
332 &movdqa ("xmm2",&QWP($k_inv+16,$const));# 2 : a/k
333 &psrld ("xmm1",4); # 1 = i
334 &pand ("xmm0","xmm6"); # 0 = k
335 &pshufb ("xmm2","xmm0"); # 2 = a/k
336 &movdqa ("xmm3","xmm7"); # 3 : 1/i
337 &pxor ("xmm0","xmm1"); # 0 = j
338 &pshufb ("xmm3","xmm1"); # 3 = 1/i
339 &movdqa ("xmm4","xmm7"); # 4 : 1/j
340 &pxor ("xmm3","xmm2"); # 3 = iak = 1/i + a/k
341 &pshufb ("xmm4","xmm0"); # 4 = 1/j
342 &pxor ("xmm4","xmm2"); # 4 = jak = 1/j + a/k
343 &movdqa ("xmm2","xmm7"); # 2 : 1/iak
344 &pshufb ("xmm2","xmm3"); # 2 = 1/iak
345 &movdqa ("xmm3","xmm7"); # 3 : 1/jak
346 &pxor ("xmm2","xmm0"); # 2 = io
347 &pshufb ("xmm3","xmm4"); # 3 = 1/jak
348 &movdqu ("xmm0",&QWP(0,$key));
349 &pxor ("xmm3","xmm1"); # 3 = jo
350 &jnz (&label("dec_loop"));
352 # middle of last round
353 &movdqa ("xmm4",&QWP(0x60,$base)); # 3 : sbou
354 &pshufb ("xmm4","xmm2"); # 4 = sbou
355 &pxor ("xmm4","xmm0"); # 4 = sb1u + k
356 &movdqa ("xmm0",&QWP(0x70,$base)); # 0 : sbot
357 &movdqa ("xmm2",&QWP(0,$magic));
358 &pshufb ("xmm0","xmm3"); # 0 = sb1t
359 &pxor ("xmm0","xmm4"); # 0 = A
360 &pshufb ("xmm0","xmm2");
362 &function_end_B("_vpaes_decrypt_core");
364 ########################################################
366 ## AES key schedule ##
368 ########################################################
369 &function_begin_B("_vpaes_schedule_core");
370 &add ($const,&DWP(0,"esp"));
371 &movdqu ("xmm0",&QWP(0,$inp)); # load key (unaligned)
372 &movdqa ("xmm2",&QWP($k_rcon,$const)); # load rcon
375 &movdqa ("xmm3","xmm0");
376 &lea ($base,&DWP($k_ipt,$const));
377 &movdqa (&QWP(4,"esp"),"xmm2"); # xmm8
378 &call ("_vpaes_schedule_transform");
379 &movdqa ("xmm7","xmm0");
382 &jnz (&label("schedule_am_decrypting"));
384 # encrypting, output zeroth round key after transform
385 &movdqu (&QWP(0,$key),"xmm0");
386 &jmp (&label("schedule_go"));
388 &set_label("schedule_am_decrypting");
389 # decrypting, output zeroth round key after shiftrows
390 &movdqa ("xmm1",&QWP($k_sr,$const,$magic));
391 &pshufb ("xmm3","xmm1");
392 &movdqu (&QWP(0,$key),"xmm3");
395 &set_label("schedule_go");
397 &ja (&label("schedule_256"));
398 &je (&label("schedule_192"));
404 ## 128-bit specific part of key schedule.
406 ## This schedule is really simple, because all its parts
407 ## are accomplished by the subroutines.
409 &set_label("schedule_128");
412 &set_label("loop_schedule_128");
413 &call ("_vpaes_schedule_round");
415 &jz (&label("schedule_mangle_last"));
416 &call ("_vpaes_schedule_mangle"); # write output
417 &jmp (&label("loop_schedule_128"));
422 ## 192-bit specific part of key schedule.
424 ## The main body of this schedule is the same as the 128-bit
425 ## schedule, but with more smearing. The long, high side is
426 ## stored in %xmm7 as before, and the short, low side is in
427 ## the high bits of %xmm6.
429 ## This schedule is somewhat nastier, however, because each
430 ## round produces 192 bits of key material, or 1.5 round keys.
431 ## Therefore, on each cycle we do 2 rounds and produce 3 round
434 &set_label("schedule_192",16);
435 &movdqu ("xmm0",&QWP(8,$inp)); # load key part 2 (very unaligned)
436 &call ("_vpaes_schedule_transform"); # input transform
437 &movdqa ("xmm6","xmm0"); # save short part
438 &pxor ("xmm4","xmm4"); # clear 4
439 &movhlps("xmm6","xmm4"); # clobber low side with zeros
442 &set_label("loop_schedule_192");
443 &call ("_vpaes_schedule_round");
444 &palignr("xmm0","xmm6",8);
445 &call ("_vpaes_schedule_mangle"); # save key n
446 &call ("_vpaes_schedule_192_smear");
447 &call ("_vpaes_schedule_mangle"); # save key n+1
448 &call ("_vpaes_schedule_round");
450 &jz (&label("schedule_mangle_last"));
451 &call ("_vpaes_schedule_mangle"); # save key n+2
452 &call ("_vpaes_schedule_192_smear");
453 &jmp (&label("loop_schedule_192"));
458 ## 256-bit specific part of key schedule.
460 ## The structure here is very similar to the 128-bit
461 ## schedule, but with an additional "low side" in
462 ## %xmm6. The low side's rounds are the same as the
463 ## high side's, except no rcon and no rotation.
465 &set_label("schedule_256",16);
466 &movdqu ("xmm0",&QWP(16,$inp)); # load key part 2 (unaligned)
467 &call ("_vpaes_schedule_transform"); # input transform
470 &set_label("loop_schedule_256");
471 &call ("_vpaes_schedule_mangle"); # output low result
472 &movdqa ("xmm6","xmm0"); # save cur_lo in xmm6
475 &call ("_vpaes_schedule_round");
477 &jz (&label("schedule_mangle_last"));
478 &call ("_vpaes_schedule_mangle");
480 # low round. swap xmm7 and xmm6
481 &pshufd ("xmm0","xmm0",0xFF);
482 &movdqa (&QWP(20,"esp"),"xmm7");
483 &movdqa ("xmm7","xmm6");
484 &call ("_vpaes_schedule_low_round");
485 &movdqa ("xmm7",&QWP(20,"esp"));
487 &jmp (&label("loop_schedule_256"));
490 ## .aes_schedule_mangle_last
492 ## Mangler for last round of key schedule
494 ## when encrypting, outputs out(%xmm0) ^ 63
495 ## when decrypting, outputs unskew(%xmm0)
497 ## Always called right before return... jumps to cleanup and exits
499 &set_label("schedule_mangle_last",16);
500 # schedule last round key from xmm0
501 &lea ($base,&DWP($k_deskew,$const));
503 &jnz (&label("schedule_mangle_last_dec"));
506 &movdqa ("xmm1",&QWP($k_sr,$const,$magic));
507 &pshufb ("xmm0","xmm1"); # output permute
508 &lea ($base,&DWP($k_opt,$const)); # prepare to output transform
511 &set_label("schedule_mangle_last_dec");
513 &pxor ("xmm0",&QWP($k_s63,$const));
514 &call ("_vpaes_schedule_transform"); # output transform
515 &movdqu (&QWP(0,$key),"xmm0"); # save last key
518 &pxor ("xmm0","xmm0");
519 &pxor ("xmm1","xmm1");
520 &pxor ("xmm2","xmm2");
521 &pxor ("xmm3","xmm3");
522 &pxor ("xmm4","xmm4");
523 &pxor ("xmm5","xmm5");
524 &pxor ("xmm6","xmm6");
525 &pxor ("xmm7","xmm7");
527 &function_end_B("_vpaes_schedule_core");
530 ## .aes_schedule_192_smear
532 ## Smear the short, low side in the 192-bit key schedule.
535 ## %xmm7: high side, b a x y
536 ## %xmm6: low side, d c 0 0
540 ## %xmm6: b+c+d b+c 0 0
541 ## %xmm0: b+c+d b+c b a
543 &function_begin_B("_vpaes_schedule_192_smear");
544 &pshufd ("xmm1","xmm6",0x80); # d c 0 0 -> c 0 0 0
545 &pshufd ("xmm0","xmm7",0xFE); # b a _ _ -> b b b a
546 &pxor ("xmm6","xmm1"); # -> c+d c 0 0
547 &pxor ("xmm1","xmm1");
548 &pxor ("xmm6","xmm0"); # -> b+c+d b+c b a
549 &movdqa ("xmm0","xmm6");
550 &movhlps("xmm6","xmm1"); # clobber low side with zeros
552 &function_end_B("_vpaes_schedule_192_smear");
555 ## .aes_schedule_round
557 ## Runs one main round of the key schedule on %xmm0, %xmm7
559 ## Specifically, runs subbytes on the high dword of %xmm0
560 ## then rotates it by one byte and xors into the low dword of
563 ## Adds rcon from low byte of %xmm8, then rotates %xmm8 for
566 ## Smears the dwords of %xmm7 by xoring the low into the
567 ## second low, result into third, result into highest.
569 ## Returns results in %xmm7 = %xmm0.
570 ## Clobbers %xmm1-%xmm5.
572 &function_begin_B("_vpaes_schedule_round");
573 # extract rcon from xmm8
574 &movdqa ("xmm2",&QWP(8,"esp")); # xmm8
575 &pxor ("xmm1","xmm1");
576 &palignr("xmm1","xmm2",15);
577 &palignr("xmm2","xmm2",15);
578 &pxor ("xmm7","xmm1");
581 &pshufd ("xmm0","xmm0",0xFF);
582 &palignr("xmm0","xmm0",1);
585 &movdqa (&QWP(8,"esp"),"xmm2"); # xmm8
587 # low round: same as high round, but no rotation and no rcon.
588 &set_label("_vpaes_schedule_low_round");
590 &movdqa ("xmm1","xmm7");
592 &pxor ("xmm7","xmm1");
593 &movdqa ("xmm1","xmm7");
595 &pxor ("xmm7","xmm1");
596 &pxor ("xmm7",&QWP($k_s63,$const));
599 &movdqa ("xmm4",&QWP($k_s0F,$const));
600 &movdqa ("xmm5",&QWP($k_inv,$const)); # 4 : 1/j
601 &movdqa ("xmm1","xmm4");
602 &pandn ("xmm1","xmm0");
603 &psrld ("xmm1",4); # 1 = i
604 &pand ("xmm0","xmm4"); # 0 = k
605 &movdqa ("xmm2",&QWP($k_inv+16,$const));# 2 : a/k
606 &pshufb ("xmm2","xmm0"); # 2 = a/k
607 &pxor ("xmm0","xmm1"); # 0 = j
608 &movdqa ("xmm3","xmm5"); # 3 : 1/i
609 &pshufb ("xmm3","xmm1"); # 3 = 1/i
610 &pxor ("xmm3","xmm2"); # 3 = iak = 1/i + a/k
611 &movdqa ("xmm4","xmm5"); # 4 : 1/j
612 &pshufb ("xmm4","xmm0"); # 4 = 1/j
613 &pxor ("xmm4","xmm2"); # 4 = jak = 1/j + a/k
614 &movdqa ("xmm2","xmm5"); # 2 : 1/iak
615 &pshufb ("xmm2","xmm3"); # 2 = 1/iak
616 &pxor ("xmm2","xmm0"); # 2 = io
617 &movdqa ("xmm3","xmm5"); # 3 : 1/jak
618 &pshufb ("xmm3","xmm4"); # 3 = 1/jak
619 &pxor ("xmm3","xmm1"); # 3 = jo
620 &movdqa ("xmm4",&QWP($k_sb1,$const)); # 4 : sbou
621 &pshufb ("xmm4","xmm2"); # 4 = sbou
622 &movdqa ("xmm0",&QWP($k_sb1+16,$const));# 0 : sbot
623 &pshufb ("xmm0","xmm3"); # 0 = sb1t
624 &pxor ("xmm0","xmm4"); # 0 = sbox output
626 # add in smeared stuff
627 &pxor ("xmm0","xmm7");
628 &movdqa ("xmm7","xmm0");
630 &function_end_B("_vpaes_schedule_round");
633 ## .aes_schedule_transform
635 ## Linear-transform %xmm0 according to tables at (%ebx)
638 ## Clobbers %xmm1, %xmm2
640 &function_begin_B("_vpaes_schedule_transform");
641 &movdqa ("xmm2",&QWP($k_s0F,$const));
642 &movdqa ("xmm1","xmm2");
643 &pandn ("xmm1","xmm0");
645 &pand ("xmm0","xmm2");
646 &movdqa ("xmm2",&QWP(0,$base));
647 &pshufb ("xmm2","xmm0");
648 &movdqa ("xmm0",&QWP(16,$base));
649 &pshufb ("xmm0","xmm1");
650 &pxor ("xmm0","xmm2");
652 &function_end_B("_vpaes_schedule_transform");
655 ## .aes_schedule_mangle
657 ## Mangle xmm0 from (basis-transformed) standard version
662 ## multiply by circulant 0,1,1,1
663 ## apply shiftrows transform
667 ## multiply by "inverse mixcolumns" circulant E,B,D,9
669 ## apply shiftrows transform
672 ## Writes out to (%edx), and increments or decrements it
673 ## Keeps track of round number mod 4 in %ecx
675 ## Clobbers xmm1-xmm5
677 &function_begin_B("_vpaes_schedule_mangle");
678 &movdqa ("xmm4","xmm0"); # save xmm0 for later
679 &movdqa ("xmm5",&QWP($k_mc_forward,$const));
681 &jnz (&label("schedule_mangle_dec"));
685 &pxor ("xmm4",&QWP($k_s63,$const));
686 &pshufb ("xmm4","xmm5");
687 &movdqa ("xmm3","xmm4");
688 &pshufb ("xmm4","xmm5");
689 &pxor ("xmm3","xmm4");
690 &pshufb ("xmm4","xmm5");
691 &pxor ("xmm3","xmm4");
693 &jmp (&label("schedule_mangle_both"));
695 &set_label("schedule_mangle_dec",16);
696 # inverse mix columns
697 &movdqa ("xmm2",&QWP($k_s0F,$const));
698 &lea ($inp,&DWP($k_dksd,$const));
699 &movdqa ("xmm1","xmm2");
700 &pandn ("xmm1","xmm4");
701 &psrld ("xmm1",4); # 1 = hi
702 &pand ("xmm4","xmm2"); # 4 = lo
704 &movdqa ("xmm2",&QWP(0,$inp));
705 &pshufb ("xmm2","xmm4");
706 &movdqa ("xmm3",&QWP(0x10,$inp));
707 &pshufb ("xmm3","xmm1");
708 &pxor ("xmm3","xmm2");
709 &pshufb ("xmm3","xmm5");
711 &movdqa ("xmm2",&QWP(0x20,$inp));
712 &pshufb ("xmm2","xmm4");
713 &pxor ("xmm2","xmm3");
714 &movdqa ("xmm3",&QWP(0x30,$inp));
715 &pshufb ("xmm3","xmm1");
716 &pxor ("xmm3","xmm2");
717 &pshufb ("xmm3","xmm5");
719 &movdqa ("xmm2",&QWP(0x40,$inp));
720 &pshufb ("xmm2","xmm4");
721 &pxor ("xmm2","xmm3");
722 &movdqa ("xmm3",&QWP(0x50,$inp));
723 &pshufb ("xmm3","xmm1");
724 &pxor ("xmm3","xmm2");
725 &pshufb ("xmm3","xmm5");
727 &movdqa ("xmm2",&QWP(0x60,$inp));
728 &pshufb ("xmm2","xmm4");
729 &pxor ("xmm2","xmm3");
730 &movdqa ("xmm3",&QWP(0x70,$inp));
731 &pshufb ("xmm3","xmm1");
732 &pxor ("xmm3","xmm2");
736 &set_label("schedule_mangle_both");
737 &movdqa ("xmm1",&QWP($k_sr,$const,$magic));
738 &pshufb ("xmm3","xmm1");
741 &movdqu (&QWP(0,$key),"xmm3");
743 &function_end_B("_vpaes_schedule_mangle");
746 # Interface to OpenSSL
748 &function_begin("${PREFIX}_set_encrypt_key");
749 &mov ($inp,&wparam(0)); # inp
750 &lea ($base,&DWP(-56,"esp"));
751 &mov ($round,&wparam(1)); # bits
753 &mov ($key,&wparam(2)); # key
754 &xchg ($base,"esp"); # alloca
755 &mov (&DWP(48,"esp"),$base);
760 &mov (&DWP(240,$key),$base); # AES_KEY->rounds = nbits/32+5;
764 &lea ($const,&DWP(&label("_vpaes_consts")."+0x30-".&label("pic_point")));
765 &call ("_vpaes_schedule_core");
766 &set_label("pic_point");
768 &mov ("esp",&DWP(48,"esp"));
770 &function_end("${PREFIX}_set_encrypt_key");
772 &function_begin("${PREFIX}_set_decrypt_key");
773 &mov ($inp,&wparam(0)); # inp
774 &lea ($base,&DWP(-56,"esp"));
775 &mov ($round,&wparam(1)); # bits
777 &mov ($key,&wparam(2)); # key
778 &xchg ($base,"esp"); # alloca
779 &mov (&DWP(48,"esp"),$base);
784 &mov (&DWP(240,$key),$base); # AES_KEY->rounds = nbits/32+5;
786 &lea ($key,&DWP(16,$key,$base));
789 &mov ($magic,$round);
792 &xor ($magic,32); # nbist==192?0:32;
794 &lea ($const,&DWP(&label("_vpaes_consts")."+0x30-".&label("pic_point")));
795 &call ("_vpaes_schedule_core");
796 &set_label("pic_point");
798 &mov ("esp",&DWP(48,"esp"));
800 &function_end("${PREFIX}_set_decrypt_key");
802 &function_begin("${PREFIX}_encrypt");
803 &lea ($const,&DWP(&label("_vpaes_consts")."+0x30-".&label("pic_point")));
804 &call ("_vpaes_preheat");
805 &set_label("pic_point");
806 &mov ($inp,&wparam(0)); # inp
807 &lea ($base,&DWP(-56,"esp"));
808 &mov ($out,&wparam(1)); # out
810 &mov ($key,&wparam(2)); # key
811 &xchg ($base,"esp"); # alloca
812 &mov (&DWP(48,"esp"),$base);
814 &movdqu ("xmm0",&QWP(0,$inp));
815 &call ("_vpaes_encrypt_core");
816 &movdqu (&QWP(0,$out),"xmm0");
818 &mov ("esp",&DWP(48,"esp"));
819 &function_end("${PREFIX}_encrypt");
821 &function_begin("${PREFIX}_decrypt");
822 &lea ($const,&DWP(&label("_vpaes_consts")."+0x30-".&label("pic_point")));
823 &call ("_vpaes_preheat");
824 &set_label("pic_point");
825 &mov ($inp,&wparam(0)); # inp
826 &lea ($base,&DWP(-56,"esp"));
827 &mov ($out,&wparam(1)); # out
829 &mov ($key,&wparam(2)); # key
830 &xchg ($base,"esp"); # alloca
831 &mov (&DWP(48,"esp"),$base);
833 &movdqu ("xmm0",&QWP(0,$inp));
834 &call ("_vpaes_decrypt_core");
835 &movdqu (&QWP(0,$out),"xmm0");
837 &mov ("esp",&DWP(48,"esp"));
838 &function_end("${PREFIX}_decrypt");
840 &function_begin("${PREFIX}_cbc_encrypt");
841 &mov ($inp,&wparam(0)); # inp
842 &mov ($out,&wparam(1)); # out
843 &mov ($round,&wparam(2)); # len
844 &mov ($key,&wparam(3)); # key
846 &jc (&label("cbc_abort"));
847 &lea ($base,&DWP(-56,"esp"));
848 &mov ($const,&wparam(4)); # ivp
850 &mov ($magic,&wparam(5)); # enc
851 &xchg ($base,"esp"); # alloca
852 &movdqu ("xmm1",&QWP(0,$const)); # load IV
854 &mov (&DWP(48,"esp"),$base);
856 &mov (&DWP(0,"esp"),$out); # save out
857 &mov (&DWP(4,"esp"),$key) # save key
858 &mov (&DWP(8,"esp"),$const); # save ivp
859 &mov ($out,$round); # $out works as $len
861 &lea ($const,&DWP(&label("_vpaes_consts")."+0x30-".&label("pic_point")));
862 &call ("_vpaes_preheat");
863 &set_label("pic_point");
865 &je (&label("cbc_dec_loop"));
866 &jmp (&label("cbc_enc_loop"));
868 &set_label("cbc_enc_loop",16);
869 &movdqu ("xmm0",&QWP(0,$inp)); # load input
870 &pxor ("xmm0","xmm1"); # inp^=iv
871 &call ("_vpaes_encrypt_core");
872 &mov ($base,&DWP(0,"esp")); # restore out
873 &mov ($key,&DWP(4,"esp")); # restore key
874 &movdqa ("xmm1","xmm0");
875 &movdqu (&QWP(0,$base,$inp),"xmm0"); # write output
876 &lea ($inp,&DWP(16,$inp));
878 &jnc (&label("cbc_enc_loop"));
879 &jmp (&label("cbc_done"));
881 &set_label("cbc_dec_loop",16);
882 &movdqu ("xmm0",&QWP(0,$inp)); # load input
883 &movdqa (&QWP(16,"esp"),"xmm1"); # save IV
884 &movdqa (&QWP(32,"esp"),"xmm0"); # save future IV
885 &call ("_vpaes_decrypt_core");
886 &mov ($base,&DWP(0,"esp")); # restore out
887 &mov ($key,&DWP(4,"esp")); # restore key
888 &pxor ("xmm0",&QWP(16,"esp")); # out^=iv
889 &movdqa ("xmm1",&QWP(32,"esp")); # load next IV
890 &movdqu (&QWP(0,$base,$inp),"xmm0"); # write output
891 &lea ($inp,&DWP(16,$inp));
893 &jnc (&label("cbc_dec_loop"));
895 &set_label("cbc_done");
896 &mov ($base,&DWP(8,"esp")); # restore ivp
897 &mov ("esp",&DWP(48,"esp"));
898 &movdqu (&QWP(0,$base),"xmm1"); # write IV
899 &set_label("cbc_abort");
900 &function_end("${PREFIX}_cbc_encrypt");