2 # Copyright 2004-2016 The OpenSSL Project Authors. All Rights Reserved.
4 # Licensed under the OpenSSL license (the "License"). You may not use
5 # this file except in compliance with the License. You can obtain a copy
6 # in the file LICENSE in the source distribution or at
7 # https://www.openssl.org/source/license.html
9 # Implemented as a Perl wrapper as we want to support several different
10 # architectures with single file. We pick up the target based on the
11 # file name we are asked to generate.
13 # It should be noted though that this perl code is nothing like
14 # <openssl>/crypto/perlasm/x86*. In this case perl is used pretty much
15 # as pre-processor to cover for platform differences in name decoration,
16 # linker tables, 32-/64-bit instruction sets...
18 # As you might know there're several PowerPC ABI in use. Most notably
19 # Linux and AIX use different 32-bit ABIs. Good news are that these ABIs
20 # are similar enough to implement leaf(!) functions, which would be ABI
21 # neutral. And that's what you find here: ABI neutral leaf functions.
22 # In case you wonder what that is...
26 # MEASUREMENTS WITH cc ON a 200 MhZ PowerPC 604e.
28 # The following is the performance of 32-bit compiler
31 # OpenSSL 0.9.6c 21 dec 2001
32 # built on: Tue Jun 11 11:06:51 EDT 2002
33 # options:bn(64,32) ...
34 #compiler: cc -DTHREADS -DAIX -DB_ENDIAN -DBN_LLONG -O3
35 # sign verify sign/s verify/s
36 #rsa 512 bits 0.0098s 0.0009s 102.0 1170.6
37 #rsa 1024 bits 0.0507s 0.0026s 19.7 387.5
38 #rsa 2048 bits 0.3036s 0.0085s 3.3 117.1
39 #rsa 4096 bits 2.0040s 0.0299s 0.5 33.4
40 #dsa 512 bits 0.0087s 0.0106s 114.3 94.5
41 #dsa 1024 bits 0.0256s 0.0313s 39.0 32.0
43 # Same bechmark with this assembler code:
45 #rsa 512 bits 0.0056s 0.0005s 178.6 2049.2
46 #rsa 1024 bits 0.0283s 0.0015s 35.3 674.1
47 #rsa 2048 bits 0.1744s 0.0050s 5.7 201.2
48 #rsa 4096 bits 1.1644s 0.0179s 0.9 55.7
49 #dsa 512 bits 0.0052s 0.0062s 191.6 162.0
50 #dsa 1024 bits 0.0149s 0.0180s 67.0 55.5
52 # Number of operations increases by at almost 75%
54 # Here are performance numbers for 64-bit compiler
57 # OpenSSL 0.9.6g [engine] 9 Aug 2002
58 # built on: Fri Apr 18 16:59:20 EDT 2003
59 # options:bn(64,64) ...
60 # compiler: cc -DTHREADS -D_REENTRANT -q64 -DB_ENDIAN -O3
61 # sign verify sign/s verify/s
62 #rsa 512 bits 0.0028s 0.0003s 357.1 3844.4
63 #rsa 1024 bits 0.0148s 0.0008s 67.5 1239.7
64 #rsa 2048 bits 0.0963s 0.0028s 10.4 353.0
65 #rsa 4096 bits 0.6538s 0.0102s 1.5 98.1
66 #dsa 512 bits 0.0026s 0.0032s 382.5 313.7
67 #dsa 1024 bits 0.0081s 0.0099s 122.8 100.6
69 # Same benchmark with this assembler code:
71 #rsa 512 bits 0.0020s 0.0002s 510.4 6273.7
72 #rsa 1024 bits 0.0088s 0.0005s 114.1 2128.3
73 #rsa 2048 bits 0.0540s 0.0016s 18.5 622.5
74 #rsa 4096 bits 0.3700s 0.0058s 2.7 171.0
75 #dsa 512 bits 0.0016s 0.0020s 610.7 507.1
76 #dsa 1024 bits 0.0047s 0.0058s 212.5 173.2
78 # Again, performance increases by at about 75%
80 # Mac OS X, Apple G5 1.8GHz (Note this is 32 bit code)
81 # OpenSSL 0.9.7c 30 Sep 2003
85 #rsa 512 bits 0.0011s 0.0001s 906.1 11012.5
86 #rsa 1024 bits 0.0060s 0.0003s 166.6 3363.1
87 #rsa 2048 bits 0.0370s 0.0010s 27.1 982.4
88 #rsa 4096 bits 0.2426s 0.0036s 4.1 280.4
89 #dsa 512 bits 0.0010s 0.0012s 1038.1 841.5
90 #dsa 1024 bits 0.0030s 0.0037s 329.6 269.7
91 #dsa 2048 bits 0.0101s 0.0127s 98.9 78.6
93 # Same benchmark with this assembler code:
95 #rsa 512 bits 0.0007s 0.0001s 1416.2 16645.9
96 #rsa 1024 bits 0.0036s 0.0002s 274.4 5380.6
97 #rsa 2048 bits 0.0222s 0.0006s 45.1 1589.5
98 #rsa 4096 bits 0.1469s 0.0022s 6.8 449.6
99 #dsa 512 bits 0.0006s 0.0007s 1664.2 1376.2
100 #dsa 1024 bits 0.0018s 0.0023s 545.0 442.2
101 #dsa 2048 bits 0.0061s 0.0075s 163.5 132.8
103 # Performance increase of ~60%
105 # If you have comments or suggestions to improve code send
106 # me a note at schari@us.ibm.com
111 if ($flavour =~ /32/) {
117 $LDU= "lwzu"; # load and update
119 $STU= "stwu"; # store and update
120 $UMULL= "mullw"; # unsigned multiply low
121 $UMULH= "mulhwu"; # unsigned multiply high
122 $UDIV= "divwu"; # unsigned divide
123 $UCMPI= "cmplwi"; # unsigned compare with immediate
124 $UCMP= "cmplw"; # unsigned compare
125 $CNTLZ= "cntlzw"; # count leading zeros
126 $SHL= "slw"; # shift left
127 $SHR= "srw"; # unsigned shift right
128 $SHRI= "srwi"; # unsigned shift right by immediate
129 $SHLI= "slwi"; # shift left by immediate
130 $CLRU= "clrlwi"; # clear upper bits
131 $INSR= "insrwi"; # insert right
132 $ROTL= "rotlwi"; # rotate left by immediate
133 $TR= "tw"; # conditional trap
134 } elsif ($flavour =~ /64/) {
139 # same as above, but 64-bit mnemonics...
141 $LDU= "ldu"; # load and update
143 $STU= "stdu"; # store and update
144 $UMULL= "mulld"; # unsigned multiply low
145 $UMULH= "mulhdu"; # unsigned multiply high
146 $UDIV= "divdu"; # unsigned divide
147 $UCMPI= "cmpldi"; # unsigned compare with immediate
148 $UCMP= "cmpld"; # unsigned compare
149 $CNTLZ= "cntlzd"; # count leading zeros
150 $SHL= "sld"; # shift left
151 $SHR= "srd"; # unsigned shift right
152 $SHRI= "srdi"; # unsigned shift right by immediate
153 $SHLI= "sldi"; # shift left by immediate
154 $CLRU= "clrldi"; # clear upper bits
155 $INSR= "insrdi"; # insert right
156 $ROTL= "rotldi"; # rotate left by immediate
157 $TR= "td"; # conditional trap
158 } else { die "nonsense $flavour"; }
160 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
161 ( $xlate="${dir}ppc-xlate.pl" and -f $xlate ) or
162 ( $xlate="${dir}../../perlasm/ppc-xlate.pl" and -f $xlate) or
163 die "can't locate ppc-xlate.pl";
165 open STDOUT,"| $^X $xlate $flavour ".shift || die "can't call $xlate: $!";
168 #--------------------------------------------------------------------
175 # Created by: Suresh Chari
176 # IBM Thomas J. Watson Research Library
180 # Description: Optimized assembly routines for OpenSSL crypto
181 # on the 32 bitPowerPC platform.
186 # 2. Fixed bn_add,bn_sub and bn_div_words, added comments,
187 # cleaned up code. Also made a single version which can
188 # be used for both the AIX and Linux compilers. See NOTE
190 # 12/05/03 Suresh Chari
191 # (with lots of help from) Andy Polyakov
193 # 1. Initial version 10/20/02 Suresh Chari
196 # The following file works for the xlc,cc
199 # NOTE: To get the file to link correctly with the gcc compiler
200 # you have to change the names of the routines and remove
201 # the first .(dot) character. This should automatically
202 # be done in the build process.
204 # Hand optimized assembly code for the following routines
217 # NOTE: It is possible to optimize this code more for
218 # specific PowerPC or Power architectures. On the Northstar
219 # architecture the optimizations in this file do
220 # NOT provide much improvement.
222 # If you have comments or suggestions to improve code send
223 # me a note at schari\@us.ibm.com
225 #--------------------------------------------------------------------------
227 # Defines to be used in the assembly code.
229 #.set r0,0 # we use it as storage for value of 0
230 #.set SP,1 # preserved
231 #.set RTOC,2 # preserved
232 #.set r3,3 # 1st argument/return value
233 #.set r4,4 # 2nd argument/volatile register
234 #.set r5,5 # 3rd argument/volatile register
242 #.set r13,13 # not used, nor any other "below" it...
244 # Declare function names to be global
245 # NOTE: For gcc these names MUST be changed to remove
246 # the first . i.e. for example change ".bn_sqr_comba4"
247 # to "bn_sqr_comba4". This should be automatically done
250 .globl .bn_sqr_comba4
251 .globl .bn_sqr_comba8
252 .globl .bn_mul_comba4
253 .globl .bn_mul_comba8
259 .globl .bn_mul_add_words
266 # NOTE: The following label name should be changed to
267 # "bn_sqr_comba4" i.e. remove the first dot
268 # for the gcc compiler. This should be automatically
275 # Optimized version of bn_sqr_comba4.
277 # void bn_sqr_comba4(BN_ULONG *r, BN_ULONG *a)
281 # Freely use registers r5,r6,r7,r8,r9,r10,r11 as follows:
283 # r5,r6 are the two BN_ULONGs being multiplied.
284 # r7,r8 are the results of the 32x32 giving 64 bit multiply.
285 # r9,r10, r11 are the equivalents of c1,c2, c3.
286 # Here's the assembly
289 xor r0,r0,r0 # set r0 = 0. Used in the addze
292 #sqr_add_c(a,0,c1,c2,c3)
295 $UMULH r10,r5,r5 #in first iteration. No need
296 #to add since c1=c2=c3=0.
297 # Note c3(r11) is NOT set to 0
300 $ST r9,`0*$BNSZ`(r3) # r[0]=c1;
301 # sqr_add_c2(a,1,0,c2,c3,c1);
306 addc r7,r7,r7 # compute (r7,r8)=2*(r7,r8)
308 addze r9,r0 # catch carry if any.
309 # r9= r0(=0) and carry
311 addc r10,r7,r10 # now add to temp result.
312 addze r11,r8 # r8 added to r11 which is 0
315 $ST r10,`1*$BNSZ`(r3) #r[1]=c2;
316 #sqr_add_c(a,1,c3,c1,c2)
322 #sqr_add_c2(a,2,0,c3,c1,c2)
334 $ST r11,`2*$BNSZ`(r3) #r[2]=c3
335 #sqr_add_c2(a,3,0,c1,c2,c3);
346 #sqr_add_c2(a,2,1,c1,c2,c3);
358 $ST r9,`3*$BNSZ`(r3) #r[3]=c1
359 #sqr_add_c(a,2,c2,c3,c1);
365 #sqr_add_c2(a,3,1,c2,c3,c1);
376 $ST r10,`4*$BNSZ`(r3) #r[4]=c2
377 #sqr_add_c2(a,3,2,c3,c1,c2);
388 $ST r11,`5*$BNSZ`(r3) #r[5] = c3
389 #sqr_add_c(a,3,c1,c2,c3);
395 $ST r9,`6*$BNSZ`(r3) #r[6]=c1
396 $ST r10,`7*$BNSZ`(r3) #r[7]=c2
399 .byte 0,12,0x14,0,0,0,2,0
401 .size .bn_sqr_comba4,.-.bn_sqr_comba4
404 # NOTE: The following label name should be changed to
405 # "bn_sqr_comba8" i.e. remove the first dot
406 # for the gcc compiler. This should be automatically
413 # This is an optimized version of the bn_sqr_comba8 routine.
414 # Tightly uses the adde instruction
417 # void bn_sqr_comba8(BN_ULONG *r, BN_ULONG *a)
421 # Freely use registers r5,r6,r7,r8,r9,r10,r11 as follows:
423 # r5,r6 are the two BN_ULONGs being multiplied.
424 # r7,r8 are the results of the 32x32 giving 64 bit multiply.
425 # r9,r10, r11 are the equivalents of c1,c2, c3.
427 # Possible optimization of loading all 8 longs of a into registers
428 # doesnt provide any speedup
431 xor r0,r0,r0 #set r0 = 0.Used in addze
434 #sqr_add_c(a,0,c1,c2,c3);
436 $UMULL r9,r5,r5 #1st iteration: no carries.
438 $ST r9,`0*$BNSZ`(r3) # r[0]=c1;
439 #sqr_add_c2(a,1,0,c2,c3,c1);
444 addc r10,r7,r10 #add the two register number
445 adde r11,r8,r0 # (r8,r7) to the three register
446 addze r9,r0 # number (r9,r11,r10).NOTE:r0=0
448 addc r10,r7,r10 #add the two register number
449 adde r11,r8,r11 # (r8,r7) to the three register
450 addze r9,r9 # number (r9,r11,r10).
452 $ST r10,`1*$BNSZ`(r3) # r[1]=c2
454 #sqr_add_c(a,1,c3,c1,c2);
460 #sqr_add_c2(a,2,0,c3,c1,c2);
473 $ST r11,`2*$BNSZ`(r3) #r[2]=c3
474 #sqr_add_c2(a,3,0,c1,c2,c3);
475 $LD r6,`3*$BNSZ`(r4) #r6 = a[3]. r5 is already a[0].
486 #sqr_add_c2(a,2,1,c1,c2,c3);
500 $ST r9,`3*$BNSZ`(r3) #r[3]=c1;
501 #sqr_add_c(a,2,c2,c3,c1);
508 #sqr_add_c2(a,3,1,c2,c3,c1);
520 #sqr_add_c2(a,4,0,c2,c3,c1);
533 $ST r10,`4*$BNSZ`(r3) #r[4]=c2;
534 #sqr_add_c2(a,5,0,c3,c1,c2);
546 #sqr_add_c2(a,4,1,c3,c1,c2);
559 #sqr_add_c2(a,3,2,c3,c1,c2);
572 $ST r11,`5*$BNSZ`(r3) #r[5]=c3;
573 #sqr_add_c(a,3,c1,c2,c3);
579 #sqr_add_c2(a,4,2,c1,c2,c3);
591 #sqr_add_c2(a,5,1,c1,c2,c3);
604 #sqr_add_c2(a,6,0,c1,c2,c3);
615 $ST r9,`6*$BNSZ`(r3) #r[6]=c1;
616 #sqr_add_c2(a,7,0,c2,c3,c1);
627 #sqr_add_c2(a,6,1,c2,c3,c1);
639 #sqr_add_c2(a,5,2,c2,c3,c1);
650 #sqr_add_c2(a,4,3,c2,c3,c1);
662 $ST r10,`7*$BNSZ`(r3) #r[7]=c2;
663 #sqr_add_c(a,4,c3,c1,c2);
669 #sqr_add_c2(a,5,3,c3,c1,c2);
679 #sqr_add_c2(a,6,2,c3,c1,c2);
691 #sqr_add_c2(a,7,1,c3,c1,c2);
702 $ST r11,`8*$BNSZ`(r3) #r[8]=c3;
703 #sqr_add_c2(a,7,2,c1,c2,c3);
714 #sqr_add_c2(a,6,3,c1,c2,c3);
725 #sqr_add_c2(a,5,4,c1,c2,c3);
736 $ST r9,`9*$BNSZ`(r3) #r[9]=c1;
737 #sqr_add_c(a,5,c2,c3,c1);
743 #sqr_add_c2(a,6,4,c2,c3,c1);
753 #sqr_add_c2(a,7,3,c2,c3,c1);
764 $ST r10,`10*$BNSZ`(r3) #r[10]=c2;
765 #sqr_add_c2(a,7,4,c3,c1,c2);
775 #sqr_add_c2(a,6,5,c3,c1,c2);
786 $ST r11,`11*$BNSZ`(r3) #r[11]=c3;
787 #sqr_add_c(a,6,c1,c2,c3);
793 #sqr_add_c2(a,7,5,c1,c2,c3)
803 $ST r9,`12*$BNSZ`(r3) #r[12]=c1;
805 #sqr_add_c2(a,7,6,c2,c3,c1)
815 $ST r10,`13*$BNSZ`(r3) #r[13]=c2;
816 #sqr_add_c(a,7,c3,c1,c2);
821 $ST r11,`14*$BNSZ`(r3) #r[14]=c3;
822 $ST r9, `15*$BNSZ`(r3) #r[15]=c1;
827 .byte 0,12,0x14,0,0,0,2,0
829 .size .bn_sqr_comba8,.-.bn_sqr_comba8
832 # NOTE: The following label name should be changed to
833 # "bn_mul_comba4" i.e. remove the first dot
834 # for the gcc compiler. This should be automatically
841 # This is an optimized version of the bn_mul_comba4 routine.
843 # void bn_mul_comba4(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
847 # r6, r7 are the 2 BN_ULONGs being multiplied.
848 # r8, r9 are the results of the 32x32 giving 64 multiply.
849 # r10, r11, r12 are the equivalents of c1, c2, and c3.
851 xor r0,r0,r0 #r0=0. Used in addze below.
852 #mul_add_c(a[0],b[0],c1,c2,c3);
857 $ST r10,`0*$BNSZ`(r3) #r[0]=c1
858 #mul_add_c(a[0],b[1],c2,c3,c1);
865 #mul_add_c(a[1],b[0],c2,c3,c1);
866 $LD r6, `1*$BNSZ`(r4)
867 $LD r7, `0*$BNSZ`(r5)
873 $ST r11,`1*$BNSZ`(r3) #r[1]=c2
874 #mul_add_c(a[2],b[0],c3,c1,c2);
881 #mul_add_c(a[1],b[1],c3,c1,c2);
889 #mul_add_c(a[0],b[2],c3,c1,c2);
897 $ST r12,`2*$BNSZ`(r3) #r[2]=c3
898 #mul_add_c(a[0],b[3],c1,c2,c3);
905 #mul_add_c(a[1],b[2],c1,c2,c3);
913 #mul_add_c(a[2],b[1],c1,c2,c3);
921 #mul_add_c(a[3],b[0],c1,c2,c3);
929 $ST r10,`3*$BNSZ`(r3) #r[3]=c1
930 #mul_add_c(a[3],b[1],c2,c3,c1);
937 #mul_add_c(a[2],b[2],c2,c3,c1);
945 #mul_add_c(a[1],b[3],c2,c3,c1);
953 $ST r11,`4*$BNSZ`(r3) #r[4]=c2
954 #mul_add_c(a[2],b[3],c3,c1,c2);
961 #mul_add_c(a[3],b[2],c3,c1,c2);
969 $ST r12,`5*$BNSZ`(r3) #r[5]=c3
970 #mul_add_c(a[3],b[3],c1,c2,c3);
977 $ST r10,`6*$BNSZ`(r3) #r[6]=c1
978 $ST r11,`7*$BNSZ`(r3) #r[7]=c2
981 .byte 0,12,0x14,0,0,0,3,0
983 .size .bn_mul_comba4,.-.bn_mul_comba4
986 # NOTE: The following label name should be changed to
987 # "bn_mul_comba8" i.e. remove the first dot
988 # for the gcc compiler. This should be automatically
995 # Optimized version of the bn_mul_comba8 routine.
997 # void bn_mul_comba8(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
1001 # r6, r7 are the 2 BN_ULONGs being multiplied.
1002 # r8, r9 are the results of the 32x32 giving 64 multiply.
1003 # r10, r11, r12 are the equivalents of c1, c2, and c3.
1005 xor r0,r0,r0 #r0=0. Used in addze below.
1007 #mul_add_c(a[0],b[0],c1,c2,c3);
1008 $LD r6,`0*$BNSZ`(r4) #a[0]
1009 $LD r7,`0*$BNSZ`(r5) #b[0]
1012 $ST r10,`0*$BNSZ`(r3) #r[0]=c1;
1013 #mul_add_c(a[0],b[1],c2,c3,c1);
1014 $LD r7,`1*$BNSZ`(r5)
1018 addze r12,r9 # since we didnt set r12 to zero before.
1020 #mul_add_c(a[1],b[0],c2,c3,c1);
1021 $LD r6,`1*$BNSZ`(r4)
1022 $LD r7,`0*$BNSZ`(r5)
1028 $ST r11,`1*$BNSZ`(r3) #r[1]=c2;
1029 #mul_add_c(a[2],b[0],c3,c1,c2);
1030 $LD r6,`2*$BNSZ`(r4)
1036 #mul_add_c(a[1],b[1],c3,c1,c2);
1037 $LD r6,`1*$BNSZ`(r4)
1038 $LD r7,`1*$BNSZ`(r5)
1044 #mul_add_c(a[0],b[2],c3,c1,c2);
1045 $LD r6,`0*$BNSZ`(r4)
1046 $LD r7,`2*$BNSZ`(r5)
1052 $ST r12,`2*$BNSZ`(r3) #r[2]=c3;
1053 #mul_add_c(a[0],b[3],c1,c2,c3);
1054 $LD r7,`3*$BNSZ`(r5)
1060 #mul_add_c(a[1],b[2],c1,c2,c3);
1061 $LD r6,`1*$BNSZ`(r4)
1062 $LD r7,`2*$BNSZ`(r5)
1069 #mul_add_c(a[2],b[1],c1,c2,c3);
1070 $LD r6,`2*$BNSZ`(r4)
1071 $LD r7,`1*$BNSZ`(r5)
1077 #mul_add_c(a[3],b[0],c1,c2,c3);
1078 $LD r6,`3*$BNSZ`(r4)
1079 $LD r7,`0*$BNSZ`(r5)
1085 $ST r10,`3*$BNSZ`(r3) #r[3]=c1;
1086 #mul_add_c(a[4],b[0],c2,c3,c1);
1087 $LD r6,`4*$BNSZ`(r4)
1093 #mul_add_c(a[3],b[1],c2,c3,c1);
1094 $LD r6,`3*$BNSZ`(r4)
1095 $LD r7,`1*$BNSZ`(r5)
1101 #mul_add_c(a[2],b[2],c2,c3,c1);
1102 $LD r6,`2*$BNSZ`(r4)
1103 $LD r7,`2*$BNSZ`(r5)
1109 #mul_add_c(a[1],b[3],c2,c3,c1);
1110 $LD r6,`1*$BNSZ`(r4)
1111 $LD r7,`3*$BNSZ`(r5)
1117 #mul_add_c(a[0],b[4],c2,c3,c1);
1118 $LD r6,`0*$BNSZ`(r4)
1119 $LD r7,`4*$BNSZ`(r5)
1125 $ST r11,`4*$BNSZ`(r3) #r[4]=c2;
1126 #mul_add_c(a[0],b[5],c3,c1,c2);
1127 $LD r7,`5*$BNSZ`(r5)
1133 #mul_add_c(a[1],b[4],c3,c1,c2);
1134 $LD r6,`1*$BNSZ`(r4)
1135 $LD r7,`4*$BNSZ`(r5)
1141 #mul_add_c(a[2],b[3],c3,c1,c2);
1142 $LD r6,`2*$BNSZ`(r4)
1143 $LD r7,`3*$BNSZ`(r5)
1149 #mul_add_c(a[3],b[2],c3,c1,c2);
1150 $LD r6,`3*$BNSZ`(r4)
1151 $LD r7,`2*$BNSZ`(r5)
1157 #mul_add_c(a[4],b[1],c3,c1,c2);
1158 $LD r6,`4*$BNSZ`(r4)
1159 $LD r7,`1*$BNSZ`(r5)
1165 #mul_add_c(a[5],b[0],c3,c1,c2);
1166 $LD r6,`5*$BNSZ`(r4)
1167 $LD r7,`0*$BNSZ`(r5)
1173 $ST r12,`5*$BNSZ`(r3) #r[5]=c3;
1174 #mul_add_c(a[6],b[0],c1,c2,c3);
1175 $LD r6,`6*$BNSZ`(r4)
1181 #mul_add_c(a[5],b[1],c1,c2,c3);
1182 $LD r6,`5*$BNSZ`(r4)
1183 $LD r7,`1*$BNSZ`(r5)
1189 #mul_add_c(a[4],b[2],c1,c2,c3);
1190 $LD r6,`4*$BNSZ`(r4)
1191 $LD r7,`2*$BNSZ`(r5)
1197 #mul_add_c(a[3],b[3],c1,c2,c3);
1198 $LD r6,`3*$BNSZ`(r4)
1199 $LD r7,`3*$BNSZ`(r5)
1205 #mul_add_c(a[2],b[4],c1,c2,c3);
1206 $LD r6,`2*$BNSZ`(r4)
1207 $LD r7,`4*$BNSZ`(r5)
1213 #mul_add_c(a[1],b[5],c1,c2,c3);
1214 $LD r6,`1*$BNSZ`(r4)
1215 $LD r7,`5*$BNSZ`(r5)
1221 #mul_add_c(a[0],b[6],c1,c2,c3);
1222 $LD r6,`0*$BNSZ`(r4)
1223 $LD r7,`6*$BNSZ`(r5)
1229 $ST r10,`6*$BNSZ`(r3) #r[6]=c1;
1230 #mul_add_c(a[0],b[7],c2,c3,c1);
1231 $LD r7,`7*$BNSZ`(r5)
1237 #mul_add_c(a[1],b[6],c2,c3,c1);
1238 $LD r6,`1*$BNSZ`(r4)
1239 $LD r7,`6*$BNSZ`(r5)
1245 #mul_add_c(a[2],b[5],c2,c3,c1);
1246 $LD r6,`2*$BNSZ`(r4)
1247 $LD r7,`5*$BNSZ`(r5)
1253 #mul_add_c(a[3],b[4],c2,c3,c1);
1254 $LD r6,`3*$BNSZ`(r4)
1255 $LD r7,`4*$BNSZ`(r5)
1261 #mul_add_c(a[4],b[3],c2,c3,c1);
1262 $LD r6,`4*$BNSZ`(r4)
1263 $LD r7,`3*$BNSZ`(r5)
1269 #mul_add_c(a[5],b[2],c2,c3,c1);
1270 $LD r6,`5*$BNSZ`(r4)
1271 $LD r7,`2*$BNSZ`(r5)
1277 #mul_add_c(a[6],b[1],c2,c3,c1);
1278 $LD r6,`6*$BNSZ`(r4)
1279 $LD r7,`1*$BNSZ`(r5)
1285 #mul_add_c(a[7],b[0],c2,c3,c1);
1286 $LD r6,`7*$BNSZ`(r4)
1287 $LD r7,`0*$BNSZ`(r5)
1293 $ST r11,`7*$BNSZ`(r3) #r[7]=c2;
1294 #mul_add_c(a[7],b[1],c3,c1,c2);
1295 $LD r7,`1*$BNSZ`(r5)
1301 #mul_add_c(a[6],b[2],c3,c1,c2);
1302 $LD r6,`6*$BNSZ`(r4)
1303 $LD r7,`2*$BNSZ`(r5)
1309 #mul_add_c(a[5],b[3],c3,c1,c2);
1310 $LD r6,`5*$BNSZ`(r4)
1311 $LD r7,`3*$BNSZ`(r5)
1317 #mul_add_c(a[4],b[4],c3,c1,c2);
1318 $LD r6,`4*$BNSZ`(r4)
1319 $LD r7,`4*$BNSZ`(r5)
1325 #mul_add_c(a[3],b[5],c3,c1,c2);
1326 $LD r6,`3*$BNSZ`(r4)
1327 $LD r7,`5*$BNSZ`(r5)
1333 #mul_add_c(a[2],b[6],c3,c1,c2);
1334 $LD r6,`2*$BNSZ`(r4)
1335 $LD r7,`6*$BNSZ`(r5)
1341 #mul_add_c(a[1],b[7],c3,c1,c2);
1342 $LD r6,`1*$BNSZ`(r4)
1343 $LD r7,`7*$BNSZ`(r5)
1349 $ST r12,`8*$BNSZ`(r3) #r[8]=c3;
1350 #mul_add_c(a[2],b[7],c1,c2,c3);
1351 $LD r6,`2*$BNSZ`(r4)
1357 #mul_add_c(a[3],b[6],c1,c2,c3);
1358 $LD r6,`3*$BNSZ`(r4)
1359 $LD r7,`6*$BNSZ`(r5)
1365 #mul_add_c(a[4],b[5],c1,c2,c3);
1366 $LD r6,`4*$BNSZ`(r4)
1367 $LD r7,`5*$BNSZ`(r5)
1373 #mul_add_c(a[5],b[4],c1,c2,c3);
1374 $LD r6,`5*$BNSZ`(r4)
1375 $LD r7,`4*$BNSZ`(r5)
1381 #mul_add_c(a[6],b[3],c1,c2,c3);
1382 $LD r6,`6*$BNSZ`(r4)
1383 $LD r7,`3*$BNSZ`(r5)
1389 #mul_add_c(a[7],b[2],c1,c2,c3);
1390 $LD r6,`7*$BNSZ`(r4)
1391 $LD r7,`2*$BNSZ`(r5)
1397 $ST r10,`9*$BNSZ`(r3) #r[9]=c1;
1398 #mul_add_c(a[7],b[3],c2,c3,c1);
1399 $LD r7,`3*$BNSZ`(r5)
1405 #mul_add_c(a[6],b[4],c2,c3,c1);
1406 $LD r6,`6*$BNSZ`(r4)
1407 $LD r7,`4*$BNSZ`(r5)
1413 #mul_add_c(a[5],b[5],c2,c3,c1);
1414 $LD r6,`5*$BNSZ`(r4)
1415 $LD r7,`5*$BNSZ`(r5)
1421 #mul_add_c(a[4],b[6],c2,c3,c1);
1422 $LD r6,`4*$BNSZ`(r4)
1423 $LD r7,`6*$BNSZ`(r5)
1429 #mul_add_c(a[3],b[7],c2,c3,c1);
1430 $LD r6,`3*$BNSZ`(r4)
1431 $LD r7,`7*$BNSZ`(r5)
1437 $ST r11,`10*$BNSZ`(r3) #r[10]=c2;
1438 #mul_add_c(a[4],b[7],c3,c1,c2);
1439 $LD r6,`4*$BNSZ`(r4)
1445 #mul_add_c(a[5],b[6],c3,c1,c2);
1446 $LD r6,`5*$BNSZ`(r4)
1447 $LD r7,`6*$BNSZ`(r5)
1453 #mul_add_c(a[6],b[5],c3,c1,c2);
1454 $LD r6,`6*$BNSZ`(r4)
1455 $LD r7,`5*$BNSZ`(r5)
1461 #mul_add_c(a[7],b[4],c3,c1,c2);
1462 $LD r6,`7*$BNSZ`(r4)
1463 $LD r7,`4*$BNSZ`(r5)
1469 $ST r12,`11*$BNSZ`(r3) #r[11]=c3;
1470 #mul_add_c(a[7],b[5],c1,c2,c3);
1471 $LD r7,`5*$BNSZ`(r5)
1477 #mul_add_c(a[6],b[6],c1,c2,c3);
1478 $LD r6,`6*$BNSZ`(r4)
1479 $LD r7,`6*$BNSZ`(r5)
1485 #mul_add_c(a[5],b[7],c1,c2,c3);
1486 $LD r6,`5*$BNSZ`(r4)
1487 $LD r7,`7*$BNSZ`(r5)
1493 $ST r10,`12*$BNSZ`(r3) #r[12]=c1;
1494 #mul_add_c(a[6],b[7],c2,c3,c1);
1495 $LD r6,`6*$BNSZ`(r4)
1501 #mul_add_c(a[7],b[6],c2,c3,c1);
1502 $LD r6,`7*$BNSZ`(r4)
1503 $LD r7,`6*$BNSZ`(r5)
1509 $ST r11,`13*$BNSZ`(r3) #r[13]=c2;
1510 #mul_add_c(a[7],b[7],c3,c1,c2);
1511 $LD r7,`7*$BNSZ`(r5)
1516 $ST r12,`14*$BNSZ`(r3) #r[14]=c3;
1517 $ST r10,`15*$BNSZ`(r3) #r[15]=c1;
1520 .byte 0,12,0x14,0,0,0,3,0
1522 .size .bn_mul_comba8,.-.bn_mul_comba8
1525 # NOTE: The following label name should be changed to
1526 # "bn_sub_words" i.e. remove the first dot
1527 # for the gcc compiler. This should be automatically
1534 # Handcoded version of bn_sub_words
1536 #BN_ULONG bn_sub_words(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n)
1543 # Note: No loop unrolling done since this is not a performance
1546 xor r0,r0,r0 #set r0 = 0
1548 # check for r6 = 0 AND set carry bit.
1550 subfc. r7,r0,r6 # If r6 is 0 then result is 0.
1551 # if r6 > 0 then result !=0
1552 # In either case carry bit is set.
1553 beq Lppcasm_sub_adios
1558 Lppcasm_sub_mainloop:
1561 subfe r6,r8,r7 # r6 = r7+carry bit + onescomplement(r8)
1562 # if carry = 1 this is r7-r8. Else it
1563 # is r7-r8 -1 as we need.
1565 bdnz Lppcasm_sub_mainloop
1567 subfze r3,r0 # if carry bit is set then r3 = 0 else -1
1568 andi. r3,r3,1 # keep only last bit.
1571 .byte 0,12,0x14,0,0,0,4,0
1573 .size .bn_sub_words,.-.bn_sub_words
1576 # NOTE: The following label name should be changed to
1577 # "bn_add_words" i.e. remove the first dot
1578 # for the gcc compiler. This should be automatically
1585 # Handcoded version of bn_add_words
1587 #BN_ULONG bn_add_words(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n)
1594 # Note: No loop unrolling done since this is not a performance
1599 # check for r6 = 0. Is this needed?
1601 addic. r6,r6,0 #test r6 and clear carry bit.
1602 beq Lppcasm_add_adios
1607 Lppcasm_add_mainloop:
1612 bdnz Lppcasm_add_mainloop
1614 addze r3,r0 #return carry bit.
1617 .byte 0,12,0x14,0,0,0,4,0
1619 .size .bn_add_words,.-.bn_add_words
1622 # NOTE: The following label name should be changed to
1623 # "bn_div_words" i.e. remove the first dot
1624 # for the gcc compiler. This should be automatically
1631 # This is a cleaned up version of code generated by
1632 # the AIX compiler. The only optimization is to use
1633 # the PPC instruction to count leading zeros instead
1634 # of call to num_bits_word. Since this was compiled
1635 # only at level -O2 we can possibly squeeze it more?
1641 $UCMPI 0,r5,0 # compare r5 and 0
1642 bne Lppcasm_div1 # proceed if d!=0
1643 li r3,-1 # d=0 return -1
1648 $CNTLZ. r7,r5 #r7 = num leading 0s in d.
1649 beq Lppcasm_div2 #proceed if no leading zeros
1650 subf r8,r7,r8 #r8 = BN_num_bits_word(d)
1651 $SHR. r9,r3,r8 #are there any bits above r8'th?
1652 $TR 16,r9,r0 #if there're, signal to dump core...
1654 $UCMP 0,r3,r5 #h>=d?
1655 blt Lppcasm_div3 #goto Lppcasm_div3 if not
1656 subf r3,r5,r3 #h-=d ;
1657 Lppcasm_div3: #r7 = BN_BITS2-i. so r7=i
1658 cmpi 0,0,r7,0 # is (i == 0)?
1660 $SHL r3,r3,r7 # h = (h<< i)
1661 $SHR r8,r4,r8 # r8 = (l >> BN_BITS2 -i)
1662 $SHL r5,r5,r7 # d<<=i
1663 or r3,r3,r8 # h = (h<<i)|(l>>(BN_BITS2-i))
1664 $SHL r4,r4,r7 # l <<=i
1666 $SHRI r9,r5,`$BITS/2` # r9 = dh
1667 # dl will be computed when needed
1668 # as it saves registers.
1670 mtctr r6 #counter will be in count.
1671 Lppcasm_divouterloop:
1672 $SHRI r8,r3,`$BITS/2` #r8 = (h>>BN_BITS4)
1673 $SHRI r11,r4,`$BITS/2` #r11= (l&BN_MASK2h)>>BN_BITS4
1674 # compute here for innerloop.
1675 $UCMP 0,r8,r9 # is (h>>BN_BITS4)==dh
1676 bne Lppcasm_div5 # goto Lppcasm_div5 if not
1679 $CLRU r8,r8,`$BITS/2` #q = BN_MASK2l
1682 $UDIV r8,r3,r9 #q = h/dh
1684 $UMULL r12,r9,r8 #th = q*dh
1685 $CLRU r10,r5,`$BITS/2` #r10=dl
1686 $UMULL r6,r8,r10 #tl = q*dl
1688 Lppcasm_divinnerloop:
1689 subf r10,r12,r3 #t = h -th
1690 $SHRI r7,r10,`$BITS/2` #r7= (t &BN_MASK2H), sort of...
1691 addic. r7,r7,0 #test if r7 == 0. used below.
1692 # now want to compute
1693 # r7 = (t<<BN_BITS4)|((l&BN_MASK2h)>>BN_BITS4)
1694 # the following 2 instructions do that
1695 $SHLI r7,r10,`$BITS/2` # r7 = (t<<BN_BITS4)
1696 or r7,r7,r11 # r7|=((l&BN_MASK2h)>>BN_BITS4)
1697 $UCMP cr1,r6,r7 # compare (tl <= r7)
1698 bne Lppcasm_divinnerexit
1699 ble cr1,Lppcasm_divinnerexit
1701 subf r12,r9,r12 #th -=dh
1702 $CLRU r10,r5,`$BITS/2` #r10=dl. t is no longer needed in loop.
1703 subf r6,r10,r6 #tl -=dl
1704 b Lppcasm_divinnerloop
1705 Lppcasm_divinnerexit:
1706 $SHRI r10,r6,`$BITS/2` #t=(tl>>BN_BITS4)
1707 $SHLI r11,r6,`$BITS/2` #tl=(tl<<BN_BITS4)&BN_MASK2h;
1708 $UCMP cr1,r4,r11 # compare l and tl
1709 add r12,r12,r10 # th+=t
1710 bge cr1,Lppcasm_div7 # if (l>=tl) goto Lppcasm_div7
1711 addi r12,r12,1 # th++
1713 subf r11,r11,r4 #r11=l-tl
1714 $UCMP cr1,r3,r12 #compare h and th
1715 bge cr1,Lppcasm_div8 #if (h>=th) goto Lppcasm_div8
1719 subf r12,r12,r3 #r12 = h-th
1720 $SHLI r4,r11,`$BITS/2` #l=(l&BN_MASK2l)<<BN_BITS4
1722 # h = ((h<<BN_BITS4)|(l>>BN_BITS4))&BN_MASK2
1723 # the following 2 instructions will do this.
1724 $INSR r11,r12,`$BITS/2`,`$BITS/2` # r11 is the value we want rotated $BITS/2.
1725 $ROTL r3,r11,`$BITS/2` # rotate by $BITS/2 and store in r3
1726 bdz Lppcasm_div9 #if (count==0) break ;
1727 $SHLI r0,r8,`$BITS/2` #ret =q<<BN_BITS4
1728 b Lppcasm_divouterloop
1733 .byte 0,12,0x14,0,0,0,3,0
1735 .size .bn_div_words,.-.bn_div_words
1738 # NOTE: The following label name should be changed to
1739 # "bn_sqr_words" i.e. remove the first dot
1740 # for the gcc compiler. This should be automatically
1746 # Optimized version of bn_sqr_words
1748 # void bn_sqr_words(BN_ULONG *r, BN_ULONG *a, int n)
1757 # No unrolling done here. Not performance critical.
1759 addic. r5,r5,0 #test r5.
1760 beq Lppcasm_sqr_adios
1764 Lppcasm_sqr_mainloop:
1765 #sqr(r[0],r[1],a[0]);
1771 bdnz Lppcasm_sqr_mainloop
1775 .byte 0,12,0x14,0,0,0,3,0
1777 .size .bn_sqr_words,.-.bn_sqr_words
1780 # NOTE: The following label name should be changed to
1781 # "bn_mul_words" i.e. remove the first dot
1782 # for the gcc compiler. This should be automatically
1789 # BN_ULONG bn_mul_words(BN_ULONG *rp, BN_ULONG *ap, int num, BN_ULONG w)
1796 xor r12,r12,r12 # used for carry
1797 rlwinm. r7,r5,30,2,31 # num >> 2
1801 #mul(rp[0],ap[0],w,c1);
1802 $LD r8,`0*$BNSZ`(r4)
1806 #addze r10,r10 #carry is NOT ignored.
1807 #will be taken care of
1808 #in second spin below
1810 $ST r9,`0*$BNSZ`(r3)
1811 #mul(rp[1],ap[1],w,c1);
1812 $LD r8,`1*$BNSZ`(r4)
1817 $ST r11,`1*$BNSZ`(r3)
1818 #mul(rp[2],ap[2],w,c1);
1819 $LD r8,`2*$BNSZ`(r4)
1824 $ST r9,`2*$BNSZ`(r3)
1825 #mul_add(rp[3],ap[3],w,c1);
1826 $LD r8,`3*$BNSZ`(r4)
1830 addze r12,r12 #this spin we collect carry into
1832 $ST r11,`3*$BNSZ`(r3)
1834 addi r3,r3,`4*$BNSZ`
1835 addi r4,r4,`4*$BNSZ`
1836 bdnz Lppcasm_mw_LOOP
1841 #mul(rp[0],ap[0],w,c1);
1842 $LD r8,`0*$BNSZ`(r4)
1847 $ST r9,`0*$BNSZ`(r3)
1855 #mul(rp[1],ap[1],w,c1);
1856 $LD r8,`1*$BNSZ`(r4)
1861 $ST r9,`1*$BNSZ`(r3)
1868 #mul_add(rp[2],ap[2],w,c1);
1869 $LD r8,`2*$BNSZ`(r4)
1874 $ST r9,`2*$BNSZ`(r3)
1881 .byte 0,12,0x14,0,0,0,4,0
1883 .size bn_mul_words,.-bn_mul_words
1886 # NOTE: The following label name should be changed to
1887 # "bn_mul_add_words" i.e. remove the first dot
1888 # for the gcc compiler. This should be automatically
1895 # BN_ULONG bn_mul_add_words(BN_ULONG *rp, BN_ULONG *ap, int num, BN_ULONG w)
1902 # empirical evidence suggests that unrolled version performs best!!
1904 xor r0,r0,r0 #r0 = 0
1905 xor r12,r12,r12 #r12 = 0 . used for carry
1906 rlwinm. r7,r5,30,2,31 # num >> 2
1907 beq Lppcasm_maw_leftover # if (num < 4) go LPPCASM_maw_leftover
1909 Lppcasm_maw_mainloop:
1910 #mul_add(rp[0],ap[0],w,c1);
1911 $LD r8,`0*$BNSZ`(r4)
1912 $LD r11,`0*$BNSZ`(r3)
1915 addc r9,r9,r12 #r12 is carry.
1919 #the above instruction addze
1920 #is NOT needed. Carry will NOT
1921 #be ignored. It's not affected
1922 #by multiply and will be collected
1924 $ST r9,`0*$BNSZ`(r3)
1926 #mul_add(rp[1],ap[1],w,c1);
1927 $LD r8,`1*$BNSZ`(r4)
1928 $LD r9,`1*$BNSZ`(r3)
1931 adde r11,r11,r10 #r10 is carry.
1935 $ST r11,`1*$BNSZ`(r3)
1937 #mul_add(rp[2],ap[2],w,c1);
1938 $LD r8,`2*$BNSZ`(r4)
1940 $LD r11,`2*$BNSZ`(r3)
1946 $ST r9,`2*$BNSZ`(r3)
1948 #mul_add(rp[3],ap[3],w,c1);
1949 $LD r8,`3*$BNSZ`(r4)
1951 $LD r9,`3*$BNSZ`(r3)
1957 $ST r11,`3*$BNSZ`(r3)
1958 addi r3,r3,`4*$BNSZ`
1959 addi r4,r4,`4*$BNSZ`
1960 bdnz Lppcasm_maw_mainloop
1962 Lppcasm_maw_leftover:
1964 beq Lppcasm_maw_adios
1967 #mul_add(rp[0],ap[0],w,c1);
1979 bdz Lppcasm_maw_adios
1980 #mul_add(rp[1],ap[1],w,c1);
1991 bdz Lppcasm_maw_adios
1992 #mul_add(rp[2],ap[2],w,c1);
2007 .byte 0,12,0x14,0,0,0,4,0
2009 .size .bn_mul_add_words,.-.bn_mul_add_words
2012 $data =~ s/\`([^\`]*)\`/eval $1/gem;