3 # ====================================================================
4 # Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
5 # project. Rights for redistribution and usage in source and binary
6 # forms are granted according to the OpenSSL license.
7 # ====================================================================
9 # 2.22x RC4 tune-up:-) It should be noted though that my hand [as in
10 # "hand-coded assembler"] doesn't stand for the whole improvement
11 # coefficient. It turned out that eliminating RC4_CHAR from config
12 # line results in ~40% improvement (yes, even for C implementation).
13 # Presumably it has everything to do with AMD cache architecture and
14 # RAW or whatever penalties. Once again! The module *requires* config
15 # line *without* RC4_CHAR! As for coding "secret," I bet on partial
16 # register arithmetics. For example instead of 'inc %r8; and $255,%r8'
17 # I simply 'inc %r8b'. Even though optimization manual discourages
18 # to operate on partial registers, it turned out to be the best bet.
19 # At least for AMD... How IA32E would perform remains to be seen...
21 # As was shown by Marc Bevand reordering of couple of load operations
22 # results in even higher performance gain of 3.3x:-) At least on
23 # Opteron... For reference, 1x in this case is RC4_CHAR C-code
24 # compiled with gcc 3.3.2, which performs at ~54MBps per 1GHz clock.
25 # Latter means that if you want to *estimate* what to expect from
26 # *your* Opteron, then multiply 54 by 3.3 and clock frequency in GHz.
28 # Intel P4 EM64T core was found to run the AMD64 code really slow...
29 # The only way to achieve comparable performance on P4 was to keep
30 # RC4_CHAR. Kind of ironic, huh? As it's apparently impossible to
31 # compose blended code, which would perform even within 30% marginal
32 # on either AMD and Intel platforms, I implement both cases. See
33 # rc4_skey.c for further details...
35 # P4 EM64T core appears to be "allergic" to 64-bit inc/dec. Replacing
36 # those with add/sub results in 50% performance improvement of folded
39 # As was shown by Zou Nanhai loop unrolling can improve Intel EM64T
40 # performance by >30% [unlike P4 32-bit case that is]. But this is
41 # provided that loads are reordered even more aggressively! Both code
42 # pathes, AMD64 and EM64T, reorder loads in essentially same manner
43 # as my IA-64 implementation. On Opteron this resulted in modest 5%
44 # improvement [I had to test it], while final Intel P4 performance
45 # achieves respectful 432MBps on 2.8GHz processor now. For reference.
46 # If executed on Xeon, current RC4_CHAR code-path is 2.7x faster than
47 # RC4_INT code-path. While if executed on Opteron, it's only 25%
48 # slower than the RC4_INT one [meaning that if CPU ยต-arch detection
49 # is not implemented, then this final RC4_CHAR code-path should be
50 # preferred, as it provides better *all-round* performance].
52 # Intel Core2 was observed to perform poorly on both code paths:-( It
53 # apparently suffers from some kind of partial register stall, which
54 # occurs in 64-bit mode only [as virtually identical 32-bit loop was
55 # observed to outperform 64-bit one by almost 50%]. Adding two movzb to
56 # cloop1 boosts its performance by 80%! This loop appears to be optimal
57 # fit for Core2 and therefore the code was modified to skip cloop8 on
61 open STDOUT,"| $^X ../perlasm/x86_64-xlate.pl $output";
77 .type RC4,\@function,4
87 movl -8($dat),$XX[0]#d
92 movl ($dat,$XX[0],4),$TX[0]#d
99 for ($i=0;$i<8;$i++) {
103 movl ($dat,$YY,4),$TY#d
104 ror \$8,%rax # ror is redundant when $i=0
106 movl ($dat,$XX[1],4),$TX[1]#d
108 movl $TX[0]#d,($dat,$YY,4)
110 movl $TY#d,($dat,$XX[0],4)
112 movb ($dat,$TY,4),%al
114 push(@TX,shift(@TX)); push(@XX,shift(@XX)); # "rotate" registers
133 movl $XX[0]#d,-8($dat)
142 movl ($dat,$YY,4),$TY#d
143 movl $TX[0]#d,($dat,$YY,4)
144 movl $TY#d,($dat,$XX[0],4)
147 movl ($dat,$TX[0],4),$TY#d
148 movl ($dat,$XX[0],4),$TX[0]#d
160 movzb ($dat,$XX[0]),$TX[0]#d
172 # unroll 2x4-wise, because 64-bit rotates kill Intel P4...
173 for ($i=0;$i<4;$i++) {
177 movzb ($dat,$YY),$TY#d
178 movzb $XX[1]#b,$XX[1]#d
179 movzb ($dat,$XX[1]),$TX[1]#d
180 movb $TX[0]#b,($dat,$YY)
182 movb $TY#b,($dat,$XX[0])
183 jne .Lcmov$i # Intel cmov is sloooow...
190 push(@TX,shift(@TX)); push(@XX,shift(@XX)); # "rotate" registers
192 for ($i=4;$i<8;$i++) {
196 movzb ($dat,$YY),$TY#d
197 movzb $XX[1]#b,$XX[1]#d
198 movzb ($dat,$XX[1]),$TX[1]#d
199 movb $TX[0]#b,($dat,$YY)
201 movb $TY#b,($dat,$XX[0])
202 jne .Lcmov$i # Intel cmov is sloooow...
209 push(@TX,shift(@TX)); push(@XX,shift(@XX)); # "rotate" registers
229 movzb ($dat,$YY),$TY#d
230 movb $TX[0]#b,($dat,$YY)
231 movb $TY#b,($dat,$XX[0])
235 movzb $XX[0]#b,$XX[0]#d
236 movzb ($dat,$TY),$TY#d
237 movzb ($dat,$XX[0]),$TX[0]#d
252 .extern OPENSSL_ia32cap_P
254 .type RC4_set_key,\@function,3
266 mov OPENSSL_ia32cap_P(%rip),$idx#d
276 mov %eax,($dat,%rax,4)
284 mov ($dat,$ido,4),%r10d
285 add ($inp,$len,1),$idx#b
288 mov ($dat,$idx,4),%r11d
290 mov %r10d,($dat,$idx,4)
291 mov %r11d,($dat,$ido,4)
306 mov ($dat,$ido),%r10b
307 add ($inp,$len),$idx#b
310 mov ($dat,$idx),%r11b
314 mov %r10b,($dat,$idx)
315 mov %r11b,($dat,$ido)
326 .size RC4_set_key,.-RC4_set_key
329 .type RC4_options,\@function,0
333 lea .Lopts-.(%rax),%rax
334 mov OPENSSL_ia32cap_P(%rip),%edx
346 .asciz "rc4(8x,char)"
347 .asciz "rc4(1x,char)"
348 .asciz "RC4 for x86_64, OpenSSL project"
350 .size RC4_options,.-RC4_options
353 $code =~ s/#([bwd])/$1/gm;