ppccap.c: engage new multipplication and squaring subroutines.
[oweals/openssl.git] / crypto / bn / asm / ppc-mont.pl
1 #! /usr/bin/env perl
2 # Copyright 2006-2016 The OpenSSL Project Authors. All Rights Reserved.
3 #
4 # Licensed under the OpenSSL license (the "License").  You may not use
5 # this file except in compliance with the License.  You can obtain a copy
6 # in the file LICENSE in the source distribution or at
7 # https://www.openssl.org/source/license.html
8
9
10 # ====================================================================
11 # Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
12 # project. The module is, however, dual licensed under OpenSSL and
13 # CRYPTOGAMS licenses depending on where you obtain it. For further
14 # details see http://www.openssl.org/~appro/cryptogams/.
15 # ====================================================================
16
17 # April 2006
18
19 # "Teaser" Montgomery multiplication module for PowerPC. It's possible
20 # to gain a bit more by modulo-scheduling outer loop, then dedicated
21 # squaring procedure should give further 20% and code can be adapted
22 # for 32-bit application running on 64-bit CPU. As for the latter.
23 # It won't be able to achieve "native" 64-bit performance, because in
24 # 32-bit application context every addc instruction will have to be
25 # expanded as addc, twice right shift by 32 and finally adde, etc.
26 # So far RSA *sign* performance improvement over pre-bn_mul_mont asm
27 # for 64-bit application running on PPC970/G5 is:
28 #
29 # 512-bit       +65%
30 # 1024-bit      +35%
31 # 2048-bit      +18%
32 # 4096-bit      +4%
33
34 # September 2016
35 #
36 # Add multiplication procedure operating on lengths divisible by 4
37 # and squaring procedure operating on lengths divisible by 8. Length
38 # is expressed in number of limbs. RSA private key operations are
39 # ~35-50% faster (more for longer keys) on contemporary high-end POWER
40 # processors in 64-bit builds, [mysterously enough] more in 32-bit
41 # builds. On low-end 32-bit processors performance improvement turned
42 # to be marginal...
43
44 $flavour = shift;
45
46 if ($flavour =~ /32/) {
47         $BITS=  32;
48         $BNSZ=  $BITS/8;
49         $SIZE_T=4;
50         $RZONE= 224;
51
52         $LD=    "lwz";          # load
53         $LDU=   "lwzu";         # load and update
54         $LDX=   "lwzx";         # load indexed
55         $ST=    "stw";          # store
56         $STU=   "stwu";         # store and update
57         $STX=   "stwx";         # store indexed
58         $STUX=  "stwux";        # store indexed and update
59         $UMULL= "mullw";        # unsigned multiply low
60         $UMULH= "mulhwu";       # unsigned multiply high
61         $UCMP=  "cmplw";        # unsigned compare
62         $SHRI=  "srwi";         # unsigned shift right by immediate
63         $SHLI=  "slwi";         # unsigned shift left by immediate
64         $PUSH=  $ST;
65         $POP=   $LD;
66 } elsif ($flavour =~ /64/) {
67         $BITS=  64;
68         $BNSZ=  $BITS/8;
69         $SIZE_T=8;
70         $RZONE= 288;
71
72         # same as above, but 64-bit mnemonics...
73         $LD=    "ld";           # load
74         $LDU=   "ldu";          # load and update
75         $LDX=   "ldx";          # load indexed
76         $ST=    "std";          # store
77         $STU=   "stdu";         # store and update
78         $STX=   "stdx";         # store indexed
79         $STUX=  "stdux";        # store indexed and update
80         $UMULL= "mulld";        # unsigned multiply low
81         $UMULH= "mulhdu";       # unsigned multiply high
82         $UCMP=  "cmpld";        # unsigned compare
83         $SHRI=  "srdi";         # unsigned shift right by immediate
84         $SHLI=  "sldi";         # unsigned shift left by immediate
85         $PUSH=  $ST;
86         $POP=   $LD;
87 } else { die "nonsense $flavour"; }
88
89 $FRAME=8*$SIZE_T+$RZONE;
90 $LOCALS=8*$SIZE_T;
91
92 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
93 ( $xlate="${dir}ppc-xlate.pl" and -f $xlate ) or
94 ( $xlate="${dir}../../perlasm/ppc-xlate.pl" and -f $xlate) or
95 die "can't locate ppc-xlate.pl";
96
97 open STDOUT,"| $^X $xlate $flavour ".shift || die "can't call $xlate: $!";
98
99 $sp="r1";
100 $toc="r2";
101 $rp="r3";
102 $ap="r4";
103 $bp="r5";
104 $np="r6";
105 $n0="r7";
106 $num="r8";
107
108 {
109 my $ovf=$rp;
110 my $rp="r9";    # $rp is reassigned
111 my $aj="r10";
112 my $nj="r11";
113 my $tj="r12";
114 # non-volatile registers
115 my $i="r20";
116 my $j="r21";
117 my $tp="r22";
118 my $m0="r23";
119 my $m1="r24";
120 my $lo0="r25";
121 my $hi0="r26";
122 my $lo1="r27";
123 my $hi1="r28";
124 my $alo="r29";
125 my $ahi="r30";
126 my $nlo="r31";
127 #
128 my $nhi="r0";
129
130 $code=<<___;
131 .machine "any"
132 .text
133
134 .globl  .bn_mul_mont_int
135 .align  5
136 .bn_mul_mont_int:
137         mr      $rp,r3          ; $rp is reassigned
138 ___
139 $code.=<<___ if ($BNSZ==4);
140         cmpwi   $num,32         ; longer key performance is not better
141         bgelr
142 ___
143 $code.=<<___;
144         slwi    $num,$num,`log($BNSZ)/log(2)`
145         li      $tj,-4096
146         addi    $ovf,$num,$FRAME
147         subf    $ovf,$ovf,$sp   ; $sp-$ovf
148         and     $ovf,$ovf,$tj   ; minimize TLB usage
149         subf    $ovf,$sp,$ovf   ; $ovf-$sp
150         mr      $tj,$sp
151         srwi    $num,$num,`log($BNSZ)/log(2)`
152         $STUX   $sp,$sp,$ovf
153
154         $PUSH   r20,`-12*$SIZE_T`($tj)
155         $PUSH   r21,`-11*$SIZE_T`($tj)
156         $PUSH   r22,`-10*$SIZE_T`($tj)
157         $PUSH   r23,`-9*$SIZE_T`($tj)
158         $PUSH   r24,`-8*$SIZE_T`($tj)
159         $PUSH   r25,`-7*$SIZE_T`($tj)
160         $PUSH   r26,`-6*$SIZE_T`($tj)
161         $PUSH   r27,`-5*$SIZE_T`($tj)
162         $PUSH   r28,`-4*$SIZE_T`($tj)
163         $PUSH   r29,`-3*$SIZE_T`($tj)
164         $PUSH   r30,`-2*$SIZE_T`($tj)
165         $PUSH   r31,`-1*$SIZE_T`($tj)
166
167         $LD     $n0,0($n0)      ; pull n0[0] value
168         addi    $num,$num,-2    ; adjust $num for counter register
169 \f
170         $LD     $m0,0($bp)      ; m0=bp[0]
171         $LD     $aj,0($ap)      ; ap[0]
172         addi    $tp,$sp,$LOCALS
173         $UMULL  $lo0,$aj,$m0    ; ap[0]*bp[0]
174         $UMULH  $hi0,$aj,$m0
175
176         $LD     $aj,$BNSZ($ap)  ; ap[1]
177         $LD     $nj,0($np)      ; np[0]
178
179         $UMULL  $m1,$lo0,$n0    ; "tp[0]"*n0
180
181         $UMULL  $alo,$aj,$m0    ; ap[1]*bp[0]
182         $UMULH  $ahi,$aj,$m0
183
184         $UMULL  $lo1,$nj,$m1    ; np[0]*m1
185         $UMULH  $hi1,$nj,$m1
186         $LD     $nj,$BNSZ($np)  ; np[1]
187         addc    $lo1,$lo1,$lo0
188         addze   $hi1,$hi1
189
190         $UMULL  $nlo,$nj,$m1    ; np[1]*m1
191         $UMULH  $nhi,$nj,$m1
192
193         mtctr   $num
194         li      $j,`2*$BNSZ`
195 .align  4
196 L1st:
197         $LDX    $aj,$ap,$j      ; ap[j]
198         addc    $lo0,$alo,$hi0
199         $LDX    $nj,$np,$j      ; np[j]
200         addze   $hi0,$ahi
201         $UMULL  $alo,$aj,$m0    ; ap[j]*bp[0]
202         addc    $lo1,$nlo,$hi1
203         $UMULH  $ahi,$aj,$m0
204         addze   $hi1,$nhi
205         $UMULL  $nlo,$nj,$m1    ; np[j]*m1
206         addc    $lo1,$lo1,$lo0  ; np[j]*m1+ap[j]*bp[0]
207         $UMULH  $nhi,$nj,$m1
208         addze   $hi1,$hi1
209         $ST     $lo1,0($tp)     ; tp[j-1]
210
211         addi    $j,$j,$BNSZ     ; j++
212         addi    $tp,$tp,$BNSZ   ; tp++
213         bdnz    L1st
214 ;L1st
215         addc    $lo0,$alo,$hi0
216         addze   $hi0,$ahi
217
218         addc    $lo1,$nlo,$hi1
219         addze   $hi1,$nhi
220         addc    $lo1,$lo1,$lo0  ; np[j]*m1+ap[j]*bp[0]
221         addze   $hi1,$hi1
222         $ST     $lo1,0($tp)     ; tp[j-1]
223
224         li      $ovf,0
225         addc    $hi1,$hi1,$hi0
226         addze   $ovf,$ovf       ; upmost overflow bit
227         $ST     $hi1,$BNSZ($tp)
228 \f
229         li      $i,$BNSZ
230 .align  4
231 Louter:
232         $LDX    $m0,$bp,$i      ; m0=bp[i]
233         $LD     $aj,0($ap)      ; ap[0]
234         addi    $tp,$sp,$LOCALS
235         $LD     $tj,$LOCALS($sp); tp[0]
236         $UMULL  $lo0,$aj,$m0    ; ap[0]*bp[i]
237         $UMULH  $hi0,$aj,$m0
238         $LD     $aj,$BNSZ($ap)  ; ap[1]
239         $LD     $nj,0($np)      ; np[0]
240         addc    $lo0,$lo0,$tj   ; ap[0]*bp[i]+tp[0]
241         $UMULL  $alo,$aj,$m0    ; ap[j]*bp[i]
242         addze   $hi0,$hi0
243         $UMULL  $m1,$lo0,$n0    ; tp[0]*n0
244         $UMULH  $ahi,$aj,$m0
245         $UMULL  $lo1,$nj,$m1    ; np[0]*m1
246         $UMULH  $hi1,$nj,$m1
247         $LD     $nj,$BNSZ($np)  ; np[1]
248         addc    $lo1,$lo1,$lo0
249         $UMULL  $nlo,$nj,$m1    ; np[1]*m1
250         addze   $hi1,$hi1
251         $UMULH  $nhi,$nj,$m1
252 \f
253         mtctr   $num
254         li      $j,`2*$BNSZ`
255 .align  4
256 Linner:
257         $LDX    $aj,$ap,$j      ; ap[j]
258         addc    $lo0,$alo,$hi0
259         $LD     $tj,$BNSZ($tp)  ; tp[j]
260         addze   $hi0,$ahi
261         $LDX    $nj,$np,$j      ; np[j]
262         addc    $lo1,$nlo,$hi1
263         $UMULL  $alo,$aj,$m0    ; ap[j]*bp[i]
264         addze   $hi1,$nhi
265         $UMULH  $ahi,$aj,$m0
266         addc    $lo0,$lo0,$tj   ; ap[j]*bp[i]+tp[j]
267         $UMULL  $nlo,$nj,$m1    ; np[j]*m1
268         addze   $hi0,$hi0
269         $UMULH  $nhi,$nj,$m1
270         addc    $lo1,$lo1,$lo0  ; np[j]*m1+ap[j]*bp[i]+tp[j]
271         addi    $j,$j,$BNSZ     ; j++
272         addze   $hi1,$hi1
273         $ST     $lo1,0($tp)     ; tp[j-1]
274         addi    $tp,$tp,$BNSZ   ; tp++
275         bdnz    Linner
276 ;Linner
277         $LD     $tj,$BNSZ($tp)  ; tp[j]
278         addc    $lo0,$alo,$hi0
279         addze   $hi0,$ahi
280         addc    $lo0,$lo0,$tj   ; ap[j]*bp[i]+tp[j]
281         addze   $hi0,$hi0
282
283         addc    $lo1,$nlo,$hi1
284         addze   $hi1,$nhi
285         addc    $lo1,$lo1,$lo0  ; np[j]*m1+ap[j]*bp[i]+tp[j]
286         addze   $hi1,$hi1
287         $ST     $lo1,0($tp)     ; tp[j-1]
288
289         addic   $ovf,$ovf,-1    ; move upmost overflow to XER[CA]
290         li      $ovf,0
291         adde    $hi1,$hi1,$hi0
292         addze   $ovf,$ovf
293         $ST     $hi1,$BNSZ($tp)
294 ;
295         slwi    $tj,$num,`log($BNSZ)/log(2)`
296         $UCMP   $i,$tj
297         addi    $i,$i,$BNSZ
298         ble     Louter
299 \f
300         addi    $num,$num,2     ; restore $num
301         subfc   $j,$j,$j        ; j=0 and "clear" XER[CA]
302         addi    $tp,$sp,$LOCALS
303         mtctr   $num
304
305 .align  4
306 Lsub:   $LDX    $tj,$tp,$j
307         $LDX    $nj,$np,$j
308         subfe   $aj,$nj,$tj     ; tp[j]-np[j]
309         $STX    $aj,$rp,$j
310         addi    $j,$j,$BNSZ
311         bdnz    Lsub
312
313         li      $j,0
314         mtctr   $num
315         subfe   $ovf,$j,$ovf    ; handle upmost overflow bit
316         and     $ap,$tp,$ovf
317         andc    $np,$rp,$ovf
318         or      $ap,$ap,$np     ; ap=borrow?tp:rp
319
320 .align  4
321 Lcopy:                          ; copy or in-place refresh
322         $LDX    $tj,$ap,$j
323         $STX    $tj,$rp,$j
324         $STX    $j,$tp,$j       ; zap at once
325         addi    $j,$j,$BNSZ
326         bdnz    Lcopy
327
328         $POP    $tj,0($sp)
329         li      r3,1
330         $POP    r20,`-12*$SIZE_T`($tj)
331         $POP    r21,`-11*$SIZE_T`($tj)
332         $POP    r22,`-10*$SIZE_T`($tj)
333         $POP    r23,`-9*$SIZE_T`($tj)
334         $POP    r24,`-8*$SIZE_T`($tj)
335         $POP    r25,`-7*$SIZE_T`($tj)
336         $POP    r26,`-6*$SIZE_T`($tj)
337         $POP    r27,`-5*$SIZE_T`($tj)
338         $POP    r28,`-4*$SIZE_T`($tj)
339         $POP    r29,`-3*$SIZE_T`($tj)
340         $POP    r30,`-2*$SIZE_T`($tj)
341         $POP    r31,`-1*$SIZE_T`($tj)
342         mr      $sp,$tj
343         blr
344         .long   0
345         .byte   0,12,4,0,0x80,12,6,0
346         .long   0
347 .size   .bn_mul_mont_int,.-.bn_mul_mont_int
348 ___
349 }
350 if (1) {
351 my ($a0,$a1,$a2,$a3,
352     $t0,$t1,$t2,$t3,
353     $m0,$m1,$m2,$m3,
354     $acc0,$acc1,$acc2,$acc3,$acc4,
355     $bi,$mi,$tp,$ap_end,$cnt) = map("r$_",(9..12,14..31));
356 my  ($carry,$zero) = ($rp,"r0");
357
358 # sp----------->+-------------------------------+
359 #               | saved sp                      |
360 #               +-------------------------------+
361 #               .                               .
362 # +8*size_t     +-------------------------------+
363 #               | 4 "n0*t0"                     |
364 #               .                               .
365 #               .                               .
366 # +12*size_t    +-------------------------------+
367 #               | size_t tmp[num]               |
368 #               .                               .
369 #               .                               .
370 #               .                               .
371 #               +-------------------------------+
372 #               | topmost carry                 |
373 #               .                               .
374 # -18*size_t    +-------------------------------+
375 #               | 18 saved gpr, r14-r31         |
376 #               .                               .
377 #               .                               .
378 #               +-------------------------------+
379 $code.=<<___;
380 .globl  .bn_mul4x_mont_int
381 .align  5
382 .bn_mul4x_mont_int:
383         andi.   r0,$num,7
384         bne     .Lmul4x_do
385         $UCMP   $ap,$bp
386         bne     .Lmul4x_do
387         b       .Lsqr8x_do
388 .Lmul4x_do:
389         slwi    $num,$num,`log($SIZE_T)/log(2)`
390         mr      $a0,$sp
391         li      $a1,-32*$SIZE_T
392         sub     $a1,$a1,$num
393         $STUX   $sp,$sp,$a1             # alloca
394
395         $PUSH   r14,-$SIZE_T*18($a0)
396         $PUSH   r15,-$SIZE_T*17($a0)
397         $PUSH   r16,-$SIZE_T*16($a0)
398         $PUSH   r17,-$SIZE_T*15($a0)
399         $PUSH   r18,-$SIZE_T*14($a0)
400         $PUSH   r19,-$SIZE_T*13($a0)
401         $PUSH   r20,-$SIZE_T*12($a0)
402         $PUSH   r21,-$SIZE_T*11($a0)
403         $PUSH   r22,-$SIZE_T*10($a0)
404         $PUSH   r23,-$SIZE_T*9($a0)
405         $PUSH   r24,-$SIZE_T*8($a0)
406         $PUSH   r25,-$SIZE_T*7($a0)
407         $PUSH   r26,-$SIZE_T*6($a0)
408         $PUSH   r27,-$SIZE_T*5($a0)
409         $PUSH   r28,-$SIZE_T*4($a0)
410         $PUSH   r29,-$SIZE_T*3($a0)
411         $PUSH   r30,-$SIZE_T*2($a0)
412         $PUSH   r31,-$SIZE_T*1($a0)
413
414         subi    $ap,$ap,$SIZE_T         # bias by -1
415         subi    $np,$np,$SIZE_T         # bias by -1
416         subi    $rp,$rp,$SIZE_T         # bias by -1
417         $LD     $n0,0($n0)              # *n0
418
419         add     $t0,$bp,$num
420         add     $ap_end,$ap,$num
421         subi    $t0,$t0,$SIZE_T*4       # &b[num-4]
422
423         $LD     $bi,$SIZE_T*0($bp)      # b[0]
424         li      $acc0,0
425         $LD     $a0,$SIZE_T*1($ap)      # a[0..3]
426         li      $acc1,0
427         $LD     $a1,$SIZE_T*2($ap)
428         li      $acc2,0
429         $LD     $a2,$SIZE_T*3($ap)
430         li      $acc3,0
431         $LDU    $a3,$SIZE_T*4($ap)
432         $LD     $m0,$SIZE_T*1($np)      # n[0..3]
433         $LD     $m1,$SIZE_T*2($np)
434         $LD     $m2,$SIZE_T*3($np)
435         $LDU    $m3,$SIZE_T*4($np)
436
437         $PUSH   $rp,$SIZE_T*6($sp)      # offload rp and &b[num-4]
438         $PUSH   $t0,$SIZE_T*7($sp)
439         li      $carry,0
440         addic   $tp,$sp,$SIZE_T*7       # &t[-1], clear carry bit
441         li      $cnt,0
442         li      $zero,0
443         b       .Loop_mul4x_1st_reduction
444
445 .align  5
446 .Loop_mul4x_1st_reduction:
447         $UMULL  $t0,$a0,$bi             # lo(a[0..3]*b[0])
448         addze   $carry,$carry           # modulo-scheduled
449         $UMULL  $t1,$a1,$bi
450         addi    $cnt,$cnt,$SIZE_T
451         $UMULL  $t2,$a2,$bi
452         andi.   $cnt,$cnt,$SIZE_T*4-1
453         $UMULL  $t3,$a3,$bi
454         addc    $acc0,$acc0,$t0
455         $UMULH  $t0,$a0,$bi             # hi(a[0..3]*b[0])
456         adde    $acc1,$acc1,$t1
457         $UMULH  $t1,$a1,$bi
458         adde    $acc2,$acc2,$t2
459         $UMULL  $mi,$acc0,$n0           # t[0]*n0
460         adde    $acc3,$acc3,$t3
461         $UMULH  $t2,$a2,$bi
462         addze   $acc4,$zero
463         $UMULH  $t3,$a3,$bi
464         $LDX    $bi,$bp,$cnt            # next b[i] (or b[0])
465         addc    $acc1,$acc1,$t0
466         # (*)   mul     $t0,$m0,$mi     # lo(n[0..3]*t[0]*n0)
467         $STU    $mi,$SIZE_T($tp)        # put aside t[0]*n0 for tail processing
468         adde    $acc2,$acc2,$t1
469         $UMULL  $t1,$m1,$mi
470         adde    $acc3,$acc3,$t2
471         $UMULL  $t2,$m2,$mi
472         adde    $acc4,$acc4,$t3         # can't overflow
473         $UMULL  $t3,$m3,$mi
474         # (*)   addc    $acc0,$acc0,$t0
475         # (*)   As for removal of first multiplication and addition
476         #       instructions. The outcome of first addition is
477         #       guaranteed to be zero, which leaves two computationally
478         #       significant outcomes: it either carries or not. Then
479         #       question is when does it carry? Is there alternative
480         #       way to deduce it? If you follow operations, you can
481         #       observe that condition for carry is quite simple:
482         #       $acc0 being non-zero. So that carry can be calculated
483         #       by adding -1 to $acc0. That's what next instruction does.
484         addic   $acc0,$acc0,-1          # (*), discarded
485         $UMULH  $t0,$m0,$mi             # hi(n[0..3]*t[0]*n0)
486         adde    $acc0,$acc1,$t1
487         $UMULH  $t1,$m1,$mi
488         adde    $acc1,$acc2,$t2
489         $UMULH  $t2,$m2,$mi
490         adde    $acc2,$acc3,$t3
491         $UMULH  $t3,$m3,$mi
492         adde    $acc3,$acc4,$carry
493         addze   $carry,$zero
494         addc    $acc0,$acc0,$t0
495         adde    $acc1,$acc1,$t1
496         adde    $acc2,$acc2,$t2
497         adde    $acc3,$acc3,$t3
498         #addze  $carry,$carry
499         bne     .Loop_mul4x_1st_reduction
500
501         $UCMP   $ap_end,$ap
502         beq     .Lmul4x4_post_condition
503
504         $LD     $a0,$SIZE_T*1($ap)      # a[4..7]
505         $LD     $a1,$SIZE_T*2($ap)
506         $LD     $a2,$SIZE_T*3($ap)
507         $LDU    $a3,$SIZE_T*4($ap)
508         $LD     $mi,$SIZE_T*8($sp)      # a[0]*n0
509         $LD     $m0,$SIZE_T*1($np)      # n[4..7]
510         $LD     $m1,$SIZE_T*2($np)
511         $LD     $m2,$SIZE_T*3($np)
512         $LDU    $m3,$SIZE_T*4($np)
513         b       .Loop_mul4x_1st_tail
514
515 .align  5
516 .Loop_mul4x_1st_tail:
517         $UMULL  $t0,$a0,$bi             # lo(a[4..7]*b[i])
518         addze   $carry,$carry           # modulo-scheduled
519         $UMULL  $t1,$a1,$bi
520         addi    $cnt,$cnt,$SIZE_T
521         $UMULL  $t2,$a2,$bi
522         andi.   $cnt,$cnt,$SIZE_T*4-1
523         $UMULL  $t3,$a3,$bi
524         addc    $acc0,$acc0,$t0
525         $UMULH  $t0,$a0,$bi             # hi(a[4..7]*b[i])
526         adde    $acc1,$acc1,$t1
527         $UMULH  $t1,$a1,$bi
528         adde    $acc2,$acc2,$t2
529         $UMULH  $t2,$a2,$bi
530         adde    $acc3,$acc3,$t3
531         $UMULH  $t3,$a3,$bi
532         addze   $acc4,$zero
533         $LDX    $bi,$bp,$cnt            # next b[i] (or b[0])
534         addc    $acc1,$acc1,$t0
535         $UMULL  $t0,$m0,$mi             # lo(n[4..7]*a[0]*n0)
536         adde    $acc2,$acc2,$t1
537         $UMULL  $t1,$m1,$mi
538         adde    $acc3,$acc3,$t2
539         $UMULL  $t2,$m2,$mi
540         adde    $acc4,$acc4,$t3         # can't overflow
541         $UMULL  $t3,$m3,$mi
542         addc    $acc0,$acc0,$t0
543         $UMULH  $t0,$m0,$mi             # hi(n[4..7]*a[0]*n0)
544         adde    $acc1,$acc1,$t1
545         $UMULH  $t1,$m1,$mi
546         adde    $acc2,$acc2,$t2
547         $UMULH  $t2,$m2,$mi
548         adde    $acc3,$acc3,$t3
549         adde    $acc4,$acc4,$carry
550         $UMULH  $t3,$m3,$mi
551         addze   $carry,$zero
552         addi    $mi,$sp,$SIZE_T*8
553         $LDX    $mi,$mi,$cnt            # next t[0]*n0
554         $STU    $acc0,$SIZE_T($tp)      # word of result
555         addc    $acc0,$acc1,$t0
556         adde    $acc1,$acc2,$t1
557         adde    $acc2,$acc3,$t2
558         adde    $acc3,$acc4,$t3
559         #addze  $carry,$carry
560         bne     .Loop_mul4x_1st_tail
561
562         sub     $t1,$ap_end,$num        # rewinded $ap
563         $UCMP   $ap_end,$ap             # done yet?
564         beq     .Lmul4x_proceed
565
566         $LD     $a0,$SIZE_T*1($ap)
567         $LD     $a1,$SIZE_T*2($ap)
568         $LD     $a2,$SIZE_T*3($ap)
569         $LDU    $a3,$SIZE_T*4($ap)
570         $LD     $m0,$SIZE_T*1($np)
571         $LD     $m1,$SIZE_T*2($np)
572         $LD     $m2,$SIZE_T*3($np)
573         $LDU    $m3,$SIZE_T*4($np)
574         b       .Loop_mul4x_1st_tail
575
576 .align  5
577 .Lmul4x_proceed:
578         $LDU    $bi,$SIZE_T*4($bp)      # *++b
579         addze   $carry,$carry           # topmost carry
580         $LD     $a0,$SIZE_T*1($t1)
581         $LD     $a1,$SIZE_T*2($t1)
582         $LD     $a2,$SIZE_T*3($t1)
583         $LD     $a3,$SIZE_T*4($t1)
584         addi    $ap,$t1,$SIZE_T*4
585         sub     $np,$np,$num            # rewind np
586
587         $ST     $acc0,$SIZE_T*1($tp)    # result
588         $ST     $acc1,$SIZE_T*2($tp)
589         $ST     $acc2,$SIZE_T*3($tp)
590         $ST     $acc3,$SIZE_T*4($tp)
591         $ST     $carry,$SIZE_T*5($tp)   # save topmost carry
592         $LD     $acc0,$SIZE_T*12($sp)   # t[0..3]
593         $LD     $acc1,$SIZE_T*13($sp)
594         $LD     $acc2,$SIZE_T*14($sp)
595         $LD     $acc3,$SIZE_T*15($sp)
596
597         $LD     $m0,$SIZE_T*1($np)      # n[0..3]
598         $LD     $m1,$SIZE_T*2($np)
599         $LD     $m2,$SIZE_T*3($np)
600         $LDU    $m3,$SIZE_T*4($np)
601         addic   $tp,$sp,$SIZE_T*7       # &t[-1], clear carry bit
602         li      $carry,0
603         b       .Loop_mul4x_reduction
604
605 .align  5
606 .Loop_mul4x_reduction:
607         $UMULL  $t0,$a0,$bi             # lo(a[0..3]*b[4])
608         addze   $carry,$carry           # modulo-scheduled
609         $UMULL  $t1,$a1,$bi
610         addi    $cnt,$cnt,$SIZE_T
611         $UMULL  $t2,$a2,$bi
612         andi.   $cnt,$cnt,$SIZE_T*4-1
613         $UMULL  $t3,$a3,$bi
614         addc    $acc0,$acc0,$t0
615         $UMULH  $t0,$a0,$bi             # hi(a[0..3]*b[4])
616         adde    $acc1,$acc1,$t1
617         $UMULH  $t1,$a1,$bi
618         adde    $acc2,$acc2,$t2
619         $UMULL  $mi,$acc0,$n0           # t[0]*n0
620         adde    $acc3,$acc3,$t3
621         $UMULH  $t2,$a2,$bi
622         addze   $acc4,$zero
623         $UMULH  $t3,$a3,$bi
624         $LDX    $bi,$bp,$cnt            # next b[i]
625         addc    $acc1,$acc1,$t0
626         # (*)   mul     $t0,$m0,$mi
627         $STU    $mi,$SIZE_T($tp)        # put aside t[0]*n0 for tail processing
628         adde    $acc2,$acc2,$t1
629         $UMULL  $t1,$m1,$mi             # lo(n[0..3]*t[0]*n0
630         adde    $acc3,$acc3,$t2
631         $UMULL  $t2,$m2,$mi
632         adde    $acc4,$acc4,$t3         # can't overflow
633         $UMULL  $t3,$m3,$mi
634         # (*)   addc    $acc0,$acc0,$t0
635         addic   $acc0,$acc0,-1          # (*), discarded
636         $UMULH  $t0,$m0,$mi             # hi(n[0..3]*t[0]*n0
637         adde    $acc0,$acc1,$t1
638         $UMULH  $t1,$m1,$mi
639         adde    $acc1,$acc2,$t2
640         $UMULH  $t2,$m2,$mi
641         adde    $acc2,$acc3,$t3
642         $UMULH  $t3,$m3,$mi
643         adde    $acc3,$acc4,$carry
644         addze   $carry,$zero
645         addc    $acc0,$acc0,$t0
646         adde    $acc1,$acc1,$t1
647         adde    $acc2,$acc2,$t2
648         adde    $acc3,$acc3,$t3
649         #addze  $carry,$carry
650         bne     .Loop_mul4x_reduction
651
652         $LD     $t0,$SIZE_T*5($tp)      # t[4..7]
653         addze   $carry,$carry
654         $LD     $t1,$SIZE_T*6($tp)
655         $LD     $t2,$SIZE_T*7($tp)
656         $LD     $t3,$SIZE_T*8($tp)
657         $LD     $a0,$SIZE_T*1($ap)      # a[4..7]
658         $LD     $a1,$SIZE_T*2($ap)
659         $LD     $a2,$SIZE_T*3($ap)
660         $LDU    $a3,$SIZE_T*4($ap)
661         addc    $acc0,$acc0,$t0
662         adde    $acc1,$acc1,$t1
663         adde    $acc2,$acc2,$t2
664         adde    $acc3,$acc3,$t3
665         #addze  $carry,$carry
666
667         $LD     $mi,$SIZE_T*8($sp)      # t[0]*n0
668         $LD     $m0,$SIZE_T*1($np)      # n[4..7]
669         $LD     $m1,$SIZE_T*2($np)
670         $LD     $m2,$SIZE_T*3($np)
671         $LDU    $m3,$SIZE_T*4($np)
672         b       .Loop_mul4x_tail
673
674 .align  5
675 .Loop_mul4x_tail:
676         $UMULL  $t0,$a0,$bi             # lo(a[4..7]*b[4])
677         addze   $carry,$carry           # modulo-scheduled
678         $UMULL  $t1,$a1,$bi
679         addi    $cnt,$cnt,$SIZE_T
680         $UMULL  $t2,$a2,$bi
681         andi.   $cnt,$cnt,$SIZE_T*4-1
682         $UMULL  $t3,$a3,$bi
683         addc    $acc0,$acc0,$t0
684         $UMULH  $t0,$a0,$bi             # hi(a[4..7]*b[4])
685         adde    $acc1,$acc1,$t1
686         $UMULH  $t1,$a1,$bi
687         adde    $acc2,$acc2,$t2
688         $UMULH  $t2,$a2,$bi
689         adde    $acc3,$acc3,$t3
690         $UMULH  $t3,$a3,$bi
691         addze   $acc4,$zero
692         $LDX    $bi,$bp,$cnt            # next b[i]
693         addc    $acc1,$acc1,$t0
694         $UMULL  $t0,$m0,$mi             # lo(n[4..7]*t[0]*n0)
695         adde    $acc2,$acc2,$t1
696         $UMULL  $t1,$m1,$mi
697         adde    $acc3,$acc3,$t2
698         $UMULL  $t2,$m2,$mi
699         adde    $acc4,$acc4,$t3         # can't overflow
700         $UMULL  $t3,$m3,$mi
701         addc    $acc0,$acc0,$t0
702         $UMULH  $t0,$m0,$mi             # hi(n[4..7]*t[0]*n0)
703         adde    $acc1,$acc1,$t1
704         $UMULH  $t1,$m1,$mi
705         adde    $acc2,$acc2,$t2
706         $UMULH  $t2,$m2,$mi
707         adde    $acc3,$acc3,$t3
708         $UMULH  $t3,$m3,$mi
709         adde    $acc4,$acc4,$carry
710         addi    $mi,$sp,$SIZE_T*8
711         $LDX    $mi,$mi,$cnt            # next a[0]*n0
712         addze   $carry,$zero
713         $STU    $acc0,$SIZE_T($tp)      # word of result
714         addc    $acc0,$acc1,$t0
715         adde    $acc1,$acc2,$t1
716         adde    $acc2,$acc3,$t2
717         adde    $acc3,$acc4,$t3
718         #addze  $carry,$carry
719         bne     .Loop_mul4x_tail
720
721         $LD     $t0,$SIZE_T*5($tp)      # next t[i] or topmost carry
722         sub     $t1,$np,$num            # rewinded np?
723         addze   $carry,$carry
724         $UCMP   $ap_end,$ap             # done yet?
725         beq     .Loop_mul4x_break
726
727         $LD     $t1,$SIZE_T*6($tp)
728         $LD     $t2,$SIZE_T*7($tp)
729         $LD     $t3,$SIZE_T*8($tp)
730         $LD     $a0,$SIZE_T*1($ap)
731         $LD     $a1,$SIZE_T*2($ap)
732         $LD     $a2,$SIZE_T*3($ap)
733         $LDU    $a3,$SIZE_T*4($ap)
734         addc    $acc0,$acc0,$t0
735         adde    $acc1,$acc1,$t1
736         adde    $acc2,$acc2,$t2
737         adde    $acc3,$acc3,$t3
738         #addze  $carry,$carry
739
740         $LD     $m0,$SIZE_T*1($np)      # n[4..7]
741         $LD     $m1,$SIZE_T*2($np)
742         $LD     $m2,$SIZE_T*3($np)
743         $LDU    $m3,$SIZE_T*4($np)
744         b       .Loop_mul4x_tail
745
746 .align  5
747 .Loop_mul4x_break:
748         $POP    $t2,$SIZE_T*6($sp)      # pull rp and &b[num-4]
749         $POP    $t3,$SIZE_T*7($sp)
750         addc    $a0,$acc0,$t0           # accumulate topmost carry
751         $LD     $acc0,$SIZE_T*12($sp)   # t[0..3]
752         addze   $a1,$acc1
753         $LD     $acc1,$SIZE_T*13($sp)
754         addze   $a2,$acc2
755         $LD     $acc2,$SIZE_T*14($sp)
756         addze   $a3,$acc3
757         $LD     $acc3,$SIZE_T*15($sp)
758         addze   $carry,$carry           # topmost carry
759         $ST     $a0,$SIZE_T*1($tp)      # result
760         sub     $ap,$ap_end,$num        # rewind ap
761         $ST     $a1,$SIZE_T*2($tp)
762         $ST     $a2,$SIZE_T*3($tp)
763         $ST     $a3,$SIZE_T*4($tp)
764         $ST     $carry,$SIZE_T*5($tp)   # store topmost carry
765
766         $LD     $m0,$SIZE_T*1($t1)      # n[0..3]
767         $LD     $m1,$SIZE_T*2($t1)
768         $LD     $m2,$SIZE_T*3($t1)
769         $LD     $m3,$SIZE_T*4($t1)
770         addi    $np,$t1,$SIZE_T*4
771         $UCMP   $bp,$t3                 # done yet?
772         beq     .Lmul4x_post
773
774         $LDU    $bi,$SIZE_T*4($bp)
775         $LD     $a0,$SIZE_T*1($ap)      # a[0..3]
776         $LD     $a1,$SIZE_T*2($ap)
777         $LD     $a2,$SIZE_T*3($ap)
778         $LDU    $a3,$SIZE_T*4($ap)
779         li      $carry,0
780         addic   $tp,$sp,$SIZE_T*7       # &t[-1], clear carry bit
781         b       .Loop_mul4x_reduction
782
783 .align  5
784 .Lmul4x_post:
785         # Final step. We see if result is larger than modulus, and
786         # if it is, subtract the modulus. But comparison implies
787         # subtraction. So we subtract modulus, see if it borrowed,
788         # and conditionally copy original value.
789         srwi    $cnt,$num,`log($SIZE_T)/log(2)+2`
790         mr      $bp,$t2                 # &rp[-1]
791         subi    $cnt,$cnt,1
792         mr      $ap_end,$t2             # &rp[-1] copy
793         subfc   $t0,$m0,$acc0
794         addi    $tp,$sp,$SIZE_T*15
795         subfe   $t1,$m1,$acc1
796
797         mtctr   $cnt
798 .Lmul4x_sub:
799         $LD     $m0,$SIZE_T*1($np)
800         $LD     $acc0,$SIZE_T*1($tp)
801         subfe   $t2,$m2,$acc2
802         $LD     $m1,$SIZE_T*2($np)
803         $LD     $acc1,$SIZE_T*2($tp)
804         subfe   $t3,$m3,$acc3
805         $LD     $m2,$SIZE_T*3($np)
806         $LD     $acc2,$SIZE_T*3($tp)
807         $LDU    $m3,$SIZE_T*4($np)
808         $LDU    $acc3,$SIZE_T*4($tp)
809         $ST     $t0,$SIZE_T*1($bp)
810         $ST     $t1,$SIZE_T*2($bp)
811         subfe   $t0,$m0,$acc0
812         $ST     $t2,$SIZE_T*3($bp)
813         $STU    $t3,$SIZE_T*4($bp)
814         subfe   $t1,$m1,$acc1
815         bdnz    .Lmul4x_sub
816
817          $LD    $a0,$SIZE_T*1($ap_end)
818         $ST     $t0,$SIZE_T*1($bp)
819          $LD    $t0,$SIZE_T*12($sp)
820         subfe   $t2,$m2,$acc2
821          $LD    $a1,$SIZE_T*2($ap_end)
822         $ST     $t1,$SIZE_T*2($bp)
823          $LD    $t1,$SIZE_T*13($sp)
824         subfe   $t3,$m3,$acc3
825         subfe   $carry,$zero,$carry     # did it borrow?
826          addi   $tp,$sp,$SIZE_T*12
827          $LD    $a2,$SIZE_T*3($ap_end)
828         $ST     $t2,$SIZE_T*3($bp)
829          $LD    $t2,$SIZE_T*14($sp)
830          $LD    $a3,$SIZE_T*4($ap_end)
831         $ST     $t3,$SIZE_T*4($bp)
832          $LD    $t3,$SIZE_T*15($sp)
833
834         mtctr   $cnt
835 .Lmul4x_cond_copy:
836         and     $t0,$t0,$carry
837         andc    $a0,$a0,$carry
838         $ST     $zero,$SIZE_T*0($tp)    # wipe stack clean
839         and     $t1,$t1,$carry
840         andc    $a1,$a1,$carry
841         $ST     $zero,$SIZE_T*1($tp)
842         and     $t2,$t2,$carry
843         andc    $a2,$a2,$carry
844         $ST     $zero,$SIZE_T*2($tp)
845         and     $t3,$t3,$carry
846         andc    $a3,$a3,$carry
847         $ST     $zero,$SIZE_T*3($tp)
848         or      $acc0,$t0,$a0
849         $LD     $a0,$SIZE_T*5($ap_end)
850         $LD     $t0,$SIZE_T*4($tp)
851         or      $acc1,$t1,$a1
852         $LD     $a1,$SIZE_T*6($ap_end)
853         $LD     $t1,$SIZE_T*5($tp)
854         or      $acc2,$t2,$a2
855         $LD     $a2,$SIZE_T*7($ap_end)
856         $LD     $t2,$SIZE_T*6($tp)
857         or      $acc3,$t3,$a3
858         $LD     $a3,$SIZE_T*8($ap_end)
859         $LD     $t3,$SIZE_T*7($tp)
860         addi    $tp,$tp,$SIZE_T*4
861         $ST     $acc0,$SIZE_T*1($ap_end)
862         $ST     $acc1,$SIZE_T*2($ap_end)
863         $ST     $acc2,$SIZE_T*3($ap_end)
864         $STU    $acc3,$SIZE_T*4($ap_end)
865         bdnz    .Lmul4x_cond_copy
866
867         $POP    $bp,0($sp)              # pull saved sp
868         and     $t0,$t0,$carry
869         andc    $a0,$a0,$carry
870         $ST     $zero,$SIZE_T*0($tp)
871         and     $t1,$t1,$carry
872         andc    $a1,$a1,$carry
873         $ST     $zero,$SIZE_T*1($tp)
874         and     $t2,$t2,$carry
875         andc    $a2,$a2,$carry
876         $ST     $zero,$SIZE_T*2($tp)
877         and     $t3,$t3,$carry
878         andc    $a3,$a3,$carry
879         $ST     $zero,$SIZE_T*3($tp)
880         or      $acc0,$t0,$a0
881         or      $acc1,$t1,$a1
882         $ST     $zero,$SIZE_T*4($tp)
883         or      $acc2,$t2,$a2
884         or      $acc3,$t3,$a3
885         $ST     $acc0,$SIZE_T*1($ap_end)
886         $ST     $acc1,$SIZE_T*2($ap_end)
887         $ST     $acc2,$SIZE_T*3($ap_end)
888         $ST     $acc3,$SIZE_T*4($ap_end)
889
890         b       .Lmul4x_done
891
892 .align  4
893 .Lmul4x4_post_condition:
894         $POP    $ap,$SIZE_T*6($sp)      # pull &rp[-1]
895         $POP    $bp,0($sp)              # pull saved sp
896         addze   $carry,$carry           # modulo-scheduled
897         # $acc0-3,$carry hold result, $m0-3 hold modulus
898         subfc   $a0,$m0,$acc0
899         subfe   $a1,$m1,$acc1
900         subfe   $a2,$m2,$acc2
901         subfe   $a3,$m3,$acc3
902         subfe   $carry,$zero,$carry     # did it borrow?
903
904         and     $m0,$m0,$carry
905         and     $m1,$m1,$carry
906         addc    $a0,$a0,$m0
907         and     $m2,$m2,$carry
908         adde    $a1,$a1,$m1
909         and     $m3,$m3,$carry
910         adde    $a2,$a2,$m2
911         adde    $a3,$a3,$m3
912
913         $ST     $a0,$SIZE_T*1($ap)      # write result
914         $ST     $a1,$SIZE_T*2($ap)
915         $ST     $a2,$SIZE_T*3($ap)
916         $ST     $a3,$SIZE_T*4($ap)
917
918 .Lmul4x_done:
919         $ST     $zero,$SIZE_T*8($sp)    # wipe stack clean
920         $ST     $zero,$SIZE_T*9($sp)
921         $ST     $zero,$SIZE_T*10($sp)
922         $ST     $zero,$SIZE_T*11($sp)
923         li      r3,1                    # signal "done"
924         $POP    r14,-$SIZE_T*18($bp)
925         $POP    r15,-$SIZE_T*17($bp)
926         $POP    r16,-$SIZE_T*16($bp)
927         $POP    r17,-$SIZE_T*15($bp)
928         $POP    r18,-$SIZE_T*14($bp)
929         $POP    r19,-$SIZE_T*13($bp)
930         $POP    r20,-$SIZE_T*12($bp)
931         $POP    r21,-$SIZE_T*11($bp)
932         $POP    r22,-$SIZE_T*10($bp)
933         $POP    r23,-$SIZE_T*9($bp)
934         $POP    r24,-$SIZE_T*8($bp)
935         $POP    r25,-$SIZE_T*7($bp)
936         $POP    r26,-$SIZE_T*6($bp)
937         $POP    r27,-$SIZE_T*5($bp)
938         $POP    r28,-$SIZE_T*4($bp)
939         $POP    r29,-$SIZE_T*3($bp)
940         $POP    r30,-$SIZE_T*2($bp)
941         $POP    r31,-$SIZE_T*1($bp)
942         mr      $sp,$bp
943         blr
944         .long   0
945         .byte   0,12,4,0x20,0x80,18,6,0
946         .long   0
947 .size   .bn_mul4x_mont_int,.-.bn_mul4x_mont_int
948 ___
949 }
950
951 if (1) {
952 ########################################################################
953 # Following is PPC adaptation of sqrx8x_mont from x86_64-mont5 module.
954
955 my ($a0,$a1,$a2,$a3,$a4,$a5,$a6,$a7)=map("r$_",(9..12,14..17));
956 my ($t0,$t1,$t2,$t3)=map("r$_",(18..21));
957 my ($acc0,$acc1,$acc2,$acc3,$acc4,$acc5,$acc6,$acc7)=map("r$_",(22..29));
958 my ($cnt,$carry,$zero)=("r30","r31","r0");
959 my ($tp,$ap_end,$na0)=($bp,$np,$carry);
960
961 # sp----------->+-------------------------------+
962 #               | saved sp                      |
963 #               +-------------------------------+
964 #               .                               .
965 # +12*size_t    +-------------------------------+
966 #               | size_t tmp[2*num]             |
967 #               .                               .
968 #               .                               .
969 #               .                               .
970 #               +-------------------------------+
971 #               .                               .
972 # -18*size_t    +-------------------------------+
973 #               | 18 saved gpr, r14-r31         |
974 #               .                               .
975 #               .                               .
976 #               +-------------------------------+
977 $code.=<<___;
978 .align  5
979 __bn_sqr8x_mont:
980 .Lsqr8x_do:
981         mr      $a0,$sp
982         slwi    $a1,$num,`log($SIZE_T)/log(2)+1`
983         li      $a2,-32*$SIZE_T
984         sub     $a1,$a2,$a1
985         slwi    $num,$num,`log($SIZE_T)/log(2)`
986         $STUX   $sp,$sp,$a1             # alloca
987
988         $PUSH   r14,-$SIZE_T*18($a0)
989         $PUSH   r15,-$SIZE_T*17($a0)
990         $PUSH   r16,-$SIZE_T*16($a0)
991         $PUSH   r17,-$SIZE_T*15($a0)
992         $PUSH   r18,-$SIZE_T*14($a0)
993         $PUSH   r19,-$SIZE_T*13($a0)
994         $PUSH   r20,-$SIZE_T*12($a0)
995         $PUSH   r21,-$SIZE_T*11($a0)
996         $PUSH   r22,-$SIZE_T*10($a0)
997         $PUSH   r23,-$SIZE_T*9($a0)
998         $PUSH   r24,-$SIZE_T*8($a0)
999         $PUSH   r25,-$SIZE_T*7($a0)
1000         $PUSH   r26,-$SIZE_T*6($a0)
1001         $PUSH   r27,-$SIZE_T*5($a0)
1002         $PUSH   r28,-$SIZE_T*4($a0)
1003         $PUSH   r29,-$SIZE_T*3($a0)
1004         $PUSH   r30,-$SIZE_T*2($a0)
1005         $PUSH   r31,-$SIZE_T*1($a0)
1006
1007         subi    $ap,$ap,$SIZE_T         # bias by -1
1008         subi    $t0,$np,$SIZE_T         # bias by -1
1009         subi    $rp,$rp,$SIZE_T         # bias by -1
1010         $LD     $n0,0($n0)              # *n0
1011         li      $zero,0
1012
1013         add     $ap_end,$ap,$num
1014         $LD     $a0,$SIZE_T*1($ap)
1015         #li     $acc0,0
1016         $LD     $a1,$SIZE_T*2($ap)
1017         li      $acc1,0
1018         $LD     $a2,$SIZE_T*3($ap)
1019         li      $acc2,0
1020         $LD     $a3,$SIZE_T*4($ap)
1021         li      $acc3,0
1022         $LD     $a4,$SIZE_T*5($ap)
1023         li      $acc4,0
1024         $LD     $a5,$SIZE_T*6($ap)
1025         li      $acc5,0
1026         $LD     $a6,$SIZE_T*7($ap)
1027         li      $acc6,0
1028         $LDU    $a7,$SIZE_T*8($ap)
1029         li      $acc7,0
1030
1031         addi    $tp,$sp,$SIZE_T*11      # &tp[-1]
1032         subic.  $cnt,$num,$SIZE_T*8
1033         b       .Lsqr8x_zero_start
1034
1035 .align  5
1036 .Lsqr8x_zero:
1037         subic.  $cnt,$cnt,$SIZE_T*8
1038         $ST     $zero,$SIZE_T*1($tp)
1039         $ST     $zero,$SIZE_T*2($tp)
1040         $ST     $zero,$SIZE_T*3($tp)
1041         $ST     $zero,$SIZE_T*4($tp)
1042         $ST     $zero,$SIZE_T*5($tp)
1043         $ST     $zero,$SIZE_T*6($tp)
1044         $ST     $zero,$SIZE_T*7($tp)
1045         $ST     $zero,$SIZE_T*8($tp)
1046 .Lsqr8x_zero_start:
1047         $ST     $zero,$SIZE_T*9($tp)
1048         $ST     $zero,$SIZE_T*10($tp)
1049         $ST     $zero,$SIZE_T*11($tp)
1050         $ST     $zero,$SIZE_T*12($tp)
1051         $ST     $zero,$SIZE_T*13($tp)
1052         $ST     $zero,$SIZE_T*14($tp)
1053         $ST     $zero,$SIZE_T*15($tp)
1054         $STU    $zero,$SIZE_T*16($tp)
1055         bne     .Lsqr8x_zero
1056
1057         $PUSH   $rp,$SIZE_T*6($sp)      # offload &rp[-1]
1058         $PUSH   $t0,$SIZE_T*7($sp)      # offload &np[-1]
1059         $PUSH   $n0,$SIZE_T*8($sp)      # offload n0
1060         $PUSH   $tp,$SIZE_T*9($sp)      # &tp[2*num-1]
1061         $PUSH   $zero,$SIZE_T*10($sp)   # initial top-most carry
1062         addi    $tp,$sp,$SIZE_T*11      # &tp[-1]
1063
1064         # Multiply everything but a[i]*a[i]
1065 .align  5
1066 .Lsqr8x_outer_loop:
1067         #                                                 a[1]a[0]     (i)
1068         #                                             a[2]a[0]
1069         #                                         a[3]a[0]
1070         #                                     a[4]a[0]
1071         #                                 a[5]a[0]
1072         #                             a[6]a[0]
1073         #                         a[7]a[0]
1074         #                                         a[2]a[1]             (ii)
1075         #                                     a[3]a[1]
1076         #                                 a[4]a[1]
1077         #                             a[5]a[1]
1078         #                         a[6]a[1]
1079         #                     a[7]a[1]
1080         #                                 a[3]a[2]                     (iii)
1081         #                             a[4]a[2]
1082         #                         a[5]a[2]
1083         #                     a[6]a[2]
1084         #                 a[7]a[2]
1085         #                         a[4]a[3]                             (iv)
1086         #                     a[5]a[3]
1087         #                 a[6]a[3]
1088         #             a[7]a[3]
1089         #                 a[5]a[4]                                     (v)
1090         #             a[6]a[4]
1091         #         a[7]a[4]
1092         #         a[6]a[5]                                             (vi)
1093         #     a[7]a[5]
1094         # a[7]a[6]                                                     (vii)
1095
1096         $UMULL  $t0,$a1,$a0             # lo(a[1..7]*a[0])              (i)
1097         $UMULL  $t1,$a2,$a0
1098         $UMULL  $t2,$a3,$a0
1099         $UMULL  $t3,$a4,$a0
1100         addc    $acc1,$acc1,$t0         # t[1]+lo(a[1]*a[0])
1101         $UMULL  $t0,$a5,$a0
1102         adde    $acc2,$acc2,$t1
1103         $UMULL  $t1,$a6,$a0
1104         adde    $acc3,$acc3,$t2
1105         $UMULL  $t2,$a7,$a0
1106         adde    $acc4,$acc4,$t3
1107         $UMULH  $t3,$a1,$a0             # hi(a[1..7]*a[0])
1108         adde    $acc5,$acc5,$t0
1109         $UMULH  $t0,$a2,$a0
1110         adde    $acc6,$acc6,$t1
1111         $UMULH  $t1,$a3,$a0
1112         adde    $acc7,$acc7,$t2
1113         $UMULH  $t2,$a4,$a0
1114         $ST     $acc0,$SIZE_T*1($tp)    # t[0]
1115         addze   $acc0,$zero             # t[8]
1116         $ST     $acc1,$SIZE_T*2($tp)    # t[1]
1117         addc    $acc2,$acc2,$t3         # t[2]+lo(a[1]*a[0])
1118         $UMULH  $t3,$a5,$a0
1119         adde    $acc3,$acc3,$t0
1120         $UMULH  $t0,$a6,$a0
1121         adde    $acc4,$acc4,$t1
1122         $UMULH  $t1,$a7,$a0
1123         adde    $acc5,$acc5,$t2
1124          $UMULL $t2,$a2,$a1             # lo(a[2..7]*a[1])              (ii)
1125         adde    $acc6,$acc6,$t3
1126          $UMULL $t3,$a3,$a1
1127         adde    $acc7,$acc7,$t0
1128          $UMULL $t0,$a4,$a1
1129         adde    $acc0,$acc0,$t1
1130
1131         $UMULL  $t1,$a5,$a1
1132         addc    $acc3,$acc3,$t2
1133         $UMULL  $t2,$a6,$a1
1134         adde    $acc4,$acc4,$t3
1135         $UMULL  $t3,$a7,$a1
1136         adde    $acc5,$acc5,$t0
1137         $UMULH  $t0,$a2,$a1             # hi(a[2..7]*a[1])
1138         adde    $acc6,$acc6,$t1
1139         $UMULH  $t1,$a3,$a1
1140         adde    $acc7,$acc7,$t2
1141         $UMULH  $t2,$a4,$a1
1142         adde    $acc0,$acc0,$t3
1143         $UMULH  $t3,$a5,$a1
1144         $ST     $acc2,$SIZE_T*3($tp)    # t[2]
1145         addze   $acc1,$zero             # t[9]
1146         $ST     $acc3,$SIZE_T*4($tp)    # t[3]
1147         addc    $acc4,$acc4,$t0
1148         $UMULH  $t0,$a6,$a1
1149         adde    $acc5,$acc5,$t1
1150         $UMULH  $t1,$a7,$a1
1151         adde    $acc6,$acc6,$t2
1152          $UMULL $t2,$a3,$a2             # lo(a[3..7]*a[2])              (iii)
1153         adde    $acc7,$acc7,$t3
1154          $UMULL $t3,$a4,$a2
1155         adde    $acc0,$acc0,$t0
1156          $UMULL $t0,$a5,$a2
1157         adde    $acc1,$acc1,$t1
1158
1159         $UMULL  $t1,$a6,$a2
1160         addc    $acc5,$acc5,$t2
1161         $UMULL  $t2,$a7,$a2
1162         adde    $acc6,$acc6,$t3
1163         $UMULH  $t3,$a3,$a2             # hi(a[3..7]*a[2])
1164         adde    $acc7,$acc7,$t0
1165         $UMULH  $t0,$a4,$a2
1166         adde    $acc0,$acc0,$t1
1167         $UMULH  $t1,$a5,$a2
1168         adde    $acc1,$acc1,$t2
1169         $UMULH  $t2,$a6,$a2
1170         $ST     $acc4,$SIZE_T*5($tp)    # t[4]
1171         addze   $acc2,$zero             # t[10]
1172         $ST     $acc5,$SIZE_T*6($tp)    # t[5]
1173         addc    $acc6,$acc6,$t3
1174         $UMULH  $t3,$a7,$a2
1175         adde    $acc7,$acc7,$t0
1176          $UMULL $t0,$a4,$a3             # lo(a[4..7]*a[3])              (iv)
1177         adde    $acc0,$acc0,$t1
1178          $UMULL $t1,$a5,$a3
1179         adde    $acc1,$acc1,$t2
1180          $UMULL $t2,$a6,$a3
1181         adde    $acc2,$acc2,$t3
1182
1183         $UMULL  $t3,$a7,$a3
1184         addc    $acc7,$acc7,$t0
1185         $UMULH  $t0,$a4,$a3             # hi(a[4..7]*a[3])
1186         adde    $acc0,$acc0,$t1
1187         $UMULH  $t1,$a5,$a3
1188         adde    $acc1,$acc1,$t2
1189         $UMULH  $t2,$a6,$a3
1190         adde    $acc2,$acc2,$t3
1191         $UMULH  $t3,$a7,$a3
1192         $ST     $acc6,$SIZE_T*7($tp)    # t[6]
1193         addze   $acc3,$zero             # t[11]
1194         $STU    $acc7,$SIZE_T*8($tp)    # t[7]
1195         addc    $acc0,$acc0,$t0
1196          $UMULL $t0,$a5,$a4             # lo(a[5..7]*a[4])              (v)
1197         adde    $acc1,$acc1,$t1
1198          $UMULL $t1,$a6,$a4
1199         adde    $acc2,$acc2,$t2
1200          $UMULL $t2,$a7,$a4
1201         adde    $acc3,$acc3,$t3
1202
1203         $UMULH  $t3,$a5,$a4             # hi(a[5..7]*a[4])
1204         addc    $acc1,$acc1,$t0
1205         $UMULH  $t0,$a6,$a4
1206         adde    $acc2,$acc2,$t1
1207         $UMULH  $t1,$a7,$a4
1208         adde    $acc3,$acc3,$t2
1209          $UMULL $t2,$a6,$a5             # lo(a[6..7]*a[5])              (vi)
1210         addze   $acc4,$zero             # t[12]
1211         addc    $acc2,$acc2,$t3
1212          $UMULL $t3,$a7,$a5
1213         adde    $acc3,$acc3,$t0
1214          $UMULH $t0,$a6,$a5             # hi(a[6..7]*a[5])
1215         adde    $acc4,$acc4,$t1
1216
1217         $UMULH  $t1,$a7,$a5
1218         addc    $acc3,$acc3,$t2
1219          $UMULL $t2,$a7,$a6             # lo(a[7]*a[6])                 (vii)
1220         adde    $acc4,$acc4,$t3
1221          $UMULH $t3,$a7,$a6             # hi(a[7]*a[6])
1222         addze   $acc5,$zero             # t[13]
1223         addc    $acc4,$acc4,$t0
1224         $UCMP   $ap_end,$ap             # done yet?
1225         adde    $acc5,$acc5,$t1
1226
1227         addc    $acc5,$acc5,$t2
1228         sub     $t0,$ap_end,$num        # rewinded ap
1229         addze   $acc6,$zero             # t[14]
1230         add     $acc6,$acc6,$t3
1231
1232         beq     .Lsqr8x_outer_break
1233
1234         mr      $n0,$a0
1235         $LD     $a0,$SIZE_T*1($tp)
1236         $LD     $a1,$SIZE_T*2($tp)
1237         $LD     $a2,$SIZE_T*3($tp)
1238         $LD     $a3,$SIZE_T*4($tp)
1239         $LD     $a4,$SIZE_T*5($tp)
1240         $LD     $a5,$SIZE_T*6($tp)
1241         $LD     $a6,$SIZE_T*7($tp)
1242         $LD     $a7,$SIZE_T*8($tp)
1243         addc    $acc0,$acc0,$a0
1244         $LD     $a0,$SIZE_T*1($ap)
1245         adde    $acc1,$acc1,$a1
1246         $LD     $a1,$SIZE_T*2($ap)
1247         adde    $acc2,$acc2,$a2
1248         $LD     $a2,$SIZE_T*3($ap)
1249         adde    $acc3,$acc3,$a3
1250         $LD     $a3,$SIZE_T*4($ap)
1251         adde    $acc4,$acc4,$a4
1252         $LD     $a4,$SIZE_T*5($ap)
1253         adde    $acc5,$acc5,$a5
1254         $LD     $a5,$SIZE_T*6($ap)
1255         adde    $acc6,$acc6,$a6
1256         $LD     $a6,$SIZE_T*7($ap)
1257         subi    $rp,$ap,$SIZE_T*7
1258         addze   $acc7,$a7
1259         $LDU    $a7,$SIZE_T*8($ap)
1260         #addze  $carry,$zero            # moved below
1261         li      $cnt,0
1262         b       .Lsqr8x_mul
1263
1264         #                                                          a[8]a[0]
1265         #                                                      a[9]a[0]
1266         #                                                  a[a]a[0]
1267         #                                              a[b]a[0]
1268         #                                          a[c]a[0]
1269         #                                      a[d]a[0]
1270         #                                  a[e]a[0]
1271         #                              a[f]a[0]
1272         #                                                      a[8]a[1]
1273         #                          a[f]a[1]........................
1274         #                                                  a[8]a[2]
1275         #                      a[f]a[2]........................
1276         #                                              a[8]a[3]
1277         #                  a[f]a[3]........................
1278         #                                          a[8]a[4]
1279         #              a[f]a[4]........................
1280         #                                      a[8]a[5]
1281         #          a[f]a[5]........................
1282         #                                  a[8]a[6]
1283         #      a[f]a[6]........................
1284         #                              a[8]a[7]
1285         #  a[f]a[7]........................
1286 .align  5
1287 .Lsqr8x_mul:
1288         $UMULL  $t0,$a0,$n0
1289         addze   $carry,$zero            # carry bit, modulo-scheduled
1290         $UMULL  $t1,$a1,$n0
1291         addi    $cnt,$cnt,$SIZE_T
1292         $UMULL  $t2,$a2,$n0
1293         andi.   $cnt,$cnt,$SIZE_T*8-1
1294         $UMULL  $t3,$a3,$n0
1295         addc    $acc0,$acc0,$t0
1296         $UMULL  $t0,$a4,$n0
1297         adde    $acc1,$acc1,$t1
1298         $UMULL  $t1,$a5,$n0
1299         adde    $acc2,$acc2,$t2
1300         $UMULL  $t2,$a6,$n0
1301         adde    $acc3,$acc3,$t3
1302         $UMULL  $t3,$a7,$n0
1303         adde    $acc4,$acc4,$t0
1304         $UMULH  $t0,$a0,$n0
1305         adde    $acc5,$acc5,$t1
1306         $UMULH  $t1,$a1,$n0
1307         adde    $acc6,$acc6,$t2
1308         $UMULH  $t2,$a2,$n0
1309         adde    $acc7,$acc7,$t3
1310         $UMULH  $t3,$a3,$n0
1311         addze   $carry,$carry
1312         $STU    $acc0,$SIZE_T($tp)
1313         addc    $acc0,$acc1,$t0
1314         $UMULH  $t0,$a4,$n0
1315         adde    $acc1,$acc2,$t1
1316         $UMULH  $t1,$a5,$n0
1317         adde    $acc2,$acc3,$t2
1318         $UMULH  $t2,$a6,$n0
1319         adde    $acc3,$acc4,$t3
1320         $UMULH  $t3,$a7,$n0
1321         $LDX    $n0,$rp,$cnt
1322         adde    $acc4,$acc5,$t0
1323         adde    $acc5,$acc6,$t1
1324         adde    $acc6,$acc7,$t2
1325         adde    $acc7,$carry,$t3
1326         #addze  $carry,$zero            # moved above
1327         bne     .Lsqr8x_mul
1328                                         # note that carry flag is guaranteed
1329                                         # to be zero at this point
1330         $UCMP   $ap,$ap_end             # done yet?
1331         beq     .Lsqr8x_break
1332
1333         $LD     $a0,$SIZE_T*1($tp)
1334         $LD     $a1,$SIZE_T*2($tp)
1335         $LD     $a2,$SIZE_T*3($tp)
1336         $LD     $a3,$SIZE_T*4($tp)
1337         $LD     $a4,$SIZE_T*5($tp)
1338         $LD     $a5,$SIZE_T*6($tp)
1339         $LD     $a6,$SIZE_T*7($tp)
1340         $LD     $a7,$SIZE_T*8($tp)
1341         addc    $acc0,$acc0,$a0
1342         $LD     $a0,$SIZE_T*1($ap)
1343         adde    $acc1,$acc1,$a1
1344         $LD     $a1,$SIZE_T*2($ap)
1345         adde    $acc2,$acc2,$a2
1346         $LD     $a2,$SIZE_T*3($ap)
1347         adde    $acc3,$acc3,$a3
1348         $LD     $a3,$SIZE_T*4($ap)
1349         adde    $acc4,$acc4,$a4
1350         $LD     $a4,$SIZE_T*5($ap)
1351         adde    $acc5,$acc5,$a5
1352         $LD     $a5,$SIZE_T*6($ap)
1353         adde    $acc6,$acc6,$a6
1354         $LD     $a6,$SIZE_T*7($ap)
1355         adde    $acc7,$acc7,$a7
1356         $LDU    $a7,$SIZE_T*8($ap)
1357         #addze  $carry,$zero            # moved above
1358         b       .Lsqr8x_mul
1359
1360 .align  5
1361 .Lsqr8x_break:
1362         $LD     $a0,$SIZE_T*8($rp)
1363         addi    $ap,$rp,$SIZE_T*15
1364         $LD     $a1,$SIZE_T*9($rp)
1365         sub.    $t0,$ap_end,$ap         # is it last iteration?
1366         $LD     $a2,$SIZE_T*10($rp)
1367         sub     $t1,$tp,$t0
1368         $LD     $a3,$SIZE_T*11($rp)
1369         $LD     $a4,$SIZE_T*12($rp)
1370         $LD     $a5,$SIZE_T*13($rp)
1371         $LD     $a6,$SIZE_T*14($rp)
1372         $LD     $a7,$SIZE_T*15($rp)
1373         beq     .Lsqr8x_outer_loop
1374
1375         $ST     $acc0,$SIZE_T*1($tp)
1376         $LD     $acc0,$SIZE_T*1($t1)
1377         $ST     $acc1,$SIZE_T*2($tp)
1378         $LD     $acc1,$SIZE_T*2($t1)
1379         $ST     $acc2,$SIZE_T*3($tp)
1380         $LD     $acc2,$SIZE_T*3($t1)
1381         $ST     $acc3,$SIZE_T*4($tp)
1382         $LD     $acc3,$SIZE_T*4($t1)
1383         $ST     $acc4,$SIZE_T*5($tp)
1384         $LD     $acc4,$SIZE_T*5($t1)
1385         $ST     $acc5,$SIZE_T*6($tp)
1386         $LD     $acc5,$SIZE_T*6($t1)
1387         $ST     $acc6,$SIZE_T*7($tp)
1388         $LD     $acc6,$SIZE_T*7($t1)
1389         $ST     $acc7,$SIZE_T*8($tp)
1390         $LD     $acc7,$SIZE_T*8($t1)
1391         mr      $tp,$t1
1392         b       .Lsqr8x_outer_loop
1393
1394 .align  5
1395 .Lsqr8x_outer_break:
1396         ####################################################################
1397         # Now multiply above result by 2 and add a[n-1]*a[n-1]|...|a[0]*a[0]
1398         $LD     $a1,$SIZE_T*1($t0)      # recall that $t0 is &a[-1]
1399         $LD     $a3,$SIZE_T*2($t0)
1400         $LD     $a5,$SIZE_T*3($t0)
1401         $LD     $a7,$SIZE_T*4($t0)
1402         addi    $ap,$t0,$SIZE_T*4
1403                                         # "tp[x]" comments are for num==8 case
1404         $LD     $t1,$SIZE_T*13($sp)     # =tp[1], t[0] is not interesting
1405         $LD     $t2,$SIZE_T*14($sp)
1406         $LD     $t3,$SIZE_T*15($sp)
1407         $LD     $t0,$SIZE_T*16($sp)
1408
1409         $ST     $acc0,$SIZE_T*1($tp)    # tp[8]=
1410         srwi    $cnt,$num,`log($SIZE_T)/log(2)+2`
1411         $ST     $acc1,$SIZE_T*2($tp)
1412         subi    $cnt,$cnt,1
1413         $ST     $acc2,$SIZE_T*3($tp)
1414         $ST     $acc3,$SIZE_T*4($tp)
1415         $ST     $acc4,$SIZE_T*5($tp)
1416         $ST     $acc5,$SIZE_T*6($tp)
1417         $ST     $acc6,$SIZE_T*7($tp)
1418         #$ST    $acc7,$SIZE_T*8($tp)    # tp[15] is not interesting
1419         addi    $tp,$sp,$SIZE_T*11      # &tp[-1]
1420         $UMULL  $acc0,$a1,$a1
1421         $UMULH  $a1,$a1,$a1
1422         add     $acc1,$t1,$t1           # <<1
1423         $SHRI   $t1,$t1,$BITS-1
1424         $UMULL  $a2,$a3,$a3
1425         $UMULH  $a3,$a3,$a3
1426         addc    $acc1,$acc1,$a1
1427         add     $acc2,$t2,$t2
1428         $SHRI   $t2,$t2,$BITS-1
1429         add     $acc3,$t3,$t3
1430         $SHRI   $t3,$t3,$BITS-1
1431         or      $acc2,$acc2,$t1
1432
1433         mtctr   $cnt
1434 .Lsqr4x_shift_n_add:
1435         $UMULL  $a4,$a5,$a5
1436         $UMULH  $a5,$a5,$a5
1437         $LD     $t1,$SIZE_T*6($tp)      # =tp[5]
1438         $LD     $a1,$SIZE_T*1($ap)
1439         adde    $acc2,$acc2,$a2
1440         add     $acc4,$t0,$t0
1441         $SHRI   $t0,$t0,$BITS-1
1442         or      $acc3,$acc3,$t2
1443         $LD     $t2,$SIZE_T*7($tp)      # =tp[6]
1444         adde    $acc3,$acc3,$a3
1445         $LD     $a3,$SIZE_T*2($ap)
1446         add     $acc5,$t1,$t1
1447         $SHRI   $t1,$t1,$BITS-1
1448         or      $acc4,$acc4,$t3
1449         $LD     $t3,$SIZE_T*8($tp)      # =tp[7]
1450         $UMULL  $a6,$a7,$a7
1451         $UMULH  $a7,$a7,$a7
1452         adde    $acc4,$acc4,$a4
1453         add     $acc6,$t2,$t2
1454         $SHRI   $t2,$t2,$BITS-1
1455         or      $acc5,$acc5,$t0
1456         $LD     $t0,$SIZE_T*9($tp)      # =tp[8]
1457         adde    $acc5,$acc5,$a5
1458         $LD     $a5,$SIZE_T*3($ap)
1459         add     $acc7,$t3,$t3
1460         $SHRI   $t3,$t3,$BITS-1
1461         or      $acc6,$acc6,$t1
1462         $LD     $t1,$SIZE_T*10($tp)     # =tp[9]
1463         $UMULL  $a0,$a1,$a1
1464         $UMULH  $a1,$a1,$a1
1465         adde    $acc6,$acc6,$a6
1466         $ST     $acc0,$SIZE_T*1($tp)    # tp[0]=
1467         add     $acc0,$t0,$t0
1468         $SHRI   $t0,$t0,$BITS-1
1469         or      $acc7,$acc7,$t2
1470         $LD     $t2,$SIZE_T*11($tp)     # =tp[10]
1471         adde    $acc7,$acc7,$a7
1472         $LDU    $a7,$SIZE_T*4($ap)
1473         $ST     $acc1,$SIZE_T*2($tp)    # tp[1]=
1474         add     $acc1,$t1,$t1
1475         $SHRI   $t1,$t1,$BITS-1
1476         or      $acc0,$acc0,$t3
1477         $LD     $t3,$SIZE_T*12($tp)     # =tp[11]
1478         $UMULL  $a2,$a3,$a3
1479         $UMULH  $a3,$a3,$a3
1480         adde    $acc0,$acc0,$a0
1481         $ST     $acc2,$SIZE_T*3($tp)    # tp[2]=
1482         add     $acc2,$t2,$t2
1483         $SHRI   $t2,$t2,$BITS-1
1484         or      $acc1,$acc1,$t0
1485         $LD     $t0,$SIZE_T*13($tp)     # =tp[12]
1486         adde    $acc1,$acc1,$a1
1487         $ST     $acc3,$SIZE_T*4($tp)    # tp[3]=
1488         $ST     $acc4,$SIZE_T*5($tp)    # tp[4]=
1489         $ST     $acc5,$SIZE_T*6($tp)    # tp[5]=
1490         $ST     $acc6,$SIZE_T*7($tp)    # tp[6]=
1491         $STU    $acc7,$SIZE_T*8($tp)    # tp[7]=
1492         add     $acc3,$t3,$t3
1493         $SHRI   $t3,$t3,$BITS-1
1494         or      $acc2,$acc2,$t1
1495         bdnz    .Lsqr4x_shift_n_add
1496 ___
1497 my ($np,$np_end)=($ap,$ap_end);
1498 $code.=<<___;
1499          $POP   $np,$SIZE_T*7($sp)      # pull &np[-1] and n0
1500          $POP   $n0,$SIZE_T*8($sp)
1501
1502         $UMULL  $a4,$a5,$a5
1503         $UMULH  $a5,$a5,$a5
1504         $ST     $acc0,$SIZE_T*1($tp)    # tp[8]=
1505          $LD    $acc0,$SIZE_T*12($sp)   # =tp[0]
1506         $LD     $t1,$SIZE_T*6($tp)      # =tp[13]
1507         adde    $acc2,$acc2,$a2
1508         add     $acc4,$t0,$t0
1509         $SHRI   $t0,$t0,$BITS-1
1510         or      $acc3,$acc3,$t2
1511         $LD     $t2,$SIZE_T*7($tp)      # =tp[14]
1512         adde    $acc3,$acc3,$a3
1513         add     $acc5,$t1,$t1
1514         $SHRI   $t1,$t1,$BITS-1
1515         or      $acc4,$acc4,$t3
1516         $UMULL  $a6,$a7,$a7
1517         $UMULH  $a7,$a7,$a7
1518         adde    $acc4,$acc4,$a4
1519         add     $acc6,$t2,$t2
1520         $SHRI   $t2,$t2,$BITS-1
1521         or      $acc5,$acc5,$t0
1522         $ST     $acc1,$SIZE_T*2($tp)    # tp[9]=
1523          $LD    $acc1,$SIZE_T*13($sp)   # =tp[1]
1524         adde    $acc5,$acc5,$a5
1525         or      $acc6,$acc6,$t1
1526          $LD    $a0,$SIZE_T*1($np)
1527          $LD    $a1,$SIZE_T*2($np)
1528         adde    $acc6,$acc6,$a6
1529          $LD    $a2,$SIZE_T*3($np)
1530          $LD    $a3,$SIZE_T*4($np)
1531         adde    $acc7,$a7,$t2
1532          $LD    $a4,$SIZE_T*5($np)
1533          $LD    $a5,$SIZE_T*6($np)
1534
1535         ################################################################
1536         # Reduce by 8 limbs per iteration
1537         $UMULL  $na0,$n0,$acc0          # t[0]*n0
1538         li      $cnt,8
1539         $LD     $a6,$SIZE_T*7($np)
1540         add     $np_end,$np,$num
1541         $LDU    $a7,$SIZE_T*8($np)
1542         $ST     $acc2,$SIZE_T*3($tp)    # tp[10]=
1543         $LD     $acc2,$SIZE_T*14($sp)
1544         $ST     $acc3,$SIZE_T*4($tp)    # tp[11]=
1545         $LD     $acc3,$SIZE_T*15($sp)
1546         $ST     $acc4,$SIZE_T*5($tp)    # tp[12]=
1547         $LD     $acc4,$SIZE_T*16($sp)
1548         $ST     $acc5,$SIZE_T*6($tp)    # tp[13]=
1549         $LD     $acc5,$SIZE_T*17($sp)
1550         $ST     $acc6,$SIZE_T*7($tp)    # tp[14]=
1551         $LD     $acc6,$SIZE_T*18($sp)
1552         $ST     $acc7,$SIZE_T*8($tp)    # tp[15]=
1553         $LD     $acc7,$SIZE_T*19($sp)
1554         addi    $tp,$sp,$SIZE_T*11      # &tp[-1]
1555         mtctr   $cnt
1556         b       .Lsqr8x_reduction
1557
1558 .align  5
1559 .Lsqr8x_reduction:
1560         # (*)   $UMULL  $t0,$a0,$na0    # lo(n[0-7])*lo(t[0]*n0)
1561         $UMULL  $t1,$a1,$na0
1562         $UMULL  $t2,$a2,$na0
1563         $STU    $na0,$SIZE_T($tp)       # put aside t[0]*n0 for tail processing
1564         $UMULL  $t3,$a3,$na0
1565         # (*)   addc    $acc0,$acc0,$t0
1566         addic   $acc0,$acc0,-1          # (*)
1567         $UMULL  $t0,$a4,$na0
1568         adde    $acc0,$acc1,$t1
1569         $UMULL  $t1,$a5,$na0
1570         adde    $acc1,$acc2,$t2
1571         $UMULL  $t2,$a6,$na0
1572         adde    $acc2,$acc3,$t3
1573         $UMULL  $t3,$a7,$na0
1574         adde    $acc3,$acc4,$t0
1575         $UMULH  $t0,$a0,$na0            # hi(n[0-7])*lo(t[0]*n0)
1576         adde    $acc4,$acc5,$t1
1577         $UMULH  $t1,$a1,$na0
1578         adde    $acc5,$acc6,$t2
1579         $UMULH  $t2,$a2,$na0
1580         adde    $acc6,$acc7,$t3
1581         $UMULH  $t3,$a3,$na0
1582         addze   $acc7,$zero
1583         addc    $acc0,$acc0,$t0
1584         $UMULH  $t0,$a4,$na0
1585         adde    $acc1,$acc1,$t1
1586         $UMULH  $t1,$a5,$na0
1587         adde    $acc2,$acc2,$t2
1588         $UMULH  $t2,$a6,$na0
1589         adde    $acc3,$acc3,$t3
1590         $UMULH  $t3,$a7,$na0
1591         $UMULL  $na0,$n0,$acc0          # next t[0]*n0
1592         adde    $acc4,$acc4,$t0
1593         adde    $acc5,$acc5,$t1
1594         adde    $acc6,$acc6,$t2
1595         adde    $acc7,$acc7,$t3
1596         bdnz    .Lsqr8x_reduction
1597
1598         $LD     $t0,$SIZE_T*1($tp)
1599         $LD     $t1,$SIZE_T*2($tp)
1600         $LD     $t2,$SIZE_T*3($tp)
1601         $LD     $t3,$SIZE_T*4($tp)
1602         subi    $rp,$tp,$SIZE_T*7
1603         $UCMP   $np_end,$np             # done yet?
1604         addc    $acc0,$acc0,$t0
1605         $LD     $t0,$SIZE_T*5($tp)
1606         adde    $acc1,$acc1,$t1
1607         $LD     $t1,$SIZE_T*6($tp)
1608         adde    $acc2,$acc2,$t2
1609         $LD     $t2,$SIZE_T*7($tp)
1610         adde    $acc3,$acc3,$t3
1611         $LD     $t3,$SIZE_T*8($tp)
1612         adde    $acc4,$acc4,$t0
1613         adde    $acc5,$acc5,$t1
1614         adde    $acc6,$acc6,$t2
1615         adde    $acc7,$acc7,$t3
1616         #addze  $carry,$zero            # moved below
1617         beq     .Lsqr8x8_post_condition
1618
1619         $LD     $n0,$SIZE_T*0($rp)
1620         $LD     $a0,$SIZE_T*1($np)
1621         $LD     $a1,$SIZE_T*2($np)
1622         $LD     $a2,$SIZE_T*3($np)
1623         $LD     $a3,$SIZE_T*4($np)
1624         $LD     $a4,$SIZE_T*5($np)
1625         $LD     $a5,$SIZE_T*6($np)
1626         $LD     $a6,$SIZE_T*7($np)
1627         $LDU    $a7,$SIZE_T*8($np)
1628         li      $cnt,0
1629
1630 .align  5
1631 .Lsqr8x_tail:
1632         $UMULL  $t0,$a0,$n0
1633         addze   $carry,$zero            # carry bit, modulo-scheduled
1634         $UMULL  $t1,$a1,$n0
1635         addi    $cnt,$cnt,$SIZE_T
1636         $UMULL  $t2,$a2,$n0
1637         andi.   $cnt,$cnt,$SIZE_T*8-1
1638         $UMULL  $t3,$a3,$n0
1639         addc    $acc0,$acc0,$t0
1640         $UMULL  $t0,$a4,$n0
1641         adde    $acc1,$acc1,$t1
1642         $UMULL  $t1,$a5,$n0
1643         adde    $acc2,$acc2,$t2
1644         $UMULL  $t2,$a6,$n0
1645         adde    $acc3,$acc3,$t3
1646         $UMULL  $t3,$a7,$n0
1647         adde    $acc4,$acc4,$t0
1648         $UMULH  $t0,$a0,$n0
1649         adde    $acc5,$acc5,$t1
1650         $UMULH  $t1,$a1,$n0
1651         adde    $acc6,$acc6,$t2
1652         $UMULH  $t2,$a2,$n0
1653         adde    $acc7,$acc7,$t3
1654         $UMULH  $t3,$a3,$n0
1655         addze   $carry,$carry
1656         $STU    $acc0,$SIZE_T($tp)
1657         addc    $acc0,$acc1,$t0
1658         $UMULH  $t0,$a4,$n0
1659         adde    $acc1,$acc2,$t1
1660         $UMULH  $t1,$a5,$n0
1661         adde    $acc2,$acc3,$t2
1662         $UMULH  $t2,$a6,$n0
1663         adde    $acc3,$acc4,$t3
1664         $UMULH  $t3,$a7,$n0
1665         $LDX    $n0,$rp,$cnt
1666         adde    $acc4,$acc5,$t0
1667         adde    $acc5,$acc6,$t1
1668         adde    $acc6,$acc7,$t2
1669         adde    $acc7,$carry,$t3
1670         #addze  $carry,$zero            # moved above
1671         bne     .Lsqr8x_tail
1672                                         # note that carry flag is guaranteed
1673                                         # to be zero at this point
1674         $LD     $a0,$SIZE_T*1($tp)
1675         $POP    $carry,$SIZE_T*10($sp)  # pull top-most carry in case we break
1676         $UCMP   $np_end,$np             # done yet?
1677         $LD     $a1,$SIZE_T*2($tp)
1678         sub     $t2,$np_end,$num        # rewinded np
1679         $LD     $a2,$SIZE_T*3($tp)
1680         $LD     $a3,$SIZE_T*4($tp)
1681         $LD     $a4,$SIZE_T*5($tp)
1682         $LD     $a5,$SIZE_T*6($tp)
1683         $LD     $a6,$SIZE_T*7($tp)
1684         $LD     $a7,$SIZE_T*8($tp)
1685         beq     .Lsqr8x_tail_break
1686
1687         addc    $acc0,$acc0,$a0
1688         $LD     $a0,$SIZE_T*1($np)
1689         adde    $acc1,$acc1,$a1
1690         $LD     $a1,$SIZE_T*2($np)
1691         adde    $acc2,$acc2,$a2
1692         $LD     $a2,$SIZE_T*3($np)
1693         adde    $acc3,$acc3,$a3
1694         $LD     $a3,$SIZE_T*4($np)
1695         adde    $acc4,$acc4,$a4
1696         $LD     $a4,$SIZE_T*5($np)
1697         adde    $acc5,$acc5,$a5
1698         $LD     $a5,$SIZE_T*6($np)
1699         adde    $acc6,$acc6,$a6
1700         $LD     $a6,$SIZE_T*7($np)
1701         adde    $acc7,$acc7,$a7
1702         $LDU    $a7,$SIZE_T*8($np)
1703         #addze  $carry,$zero            # moved above
1704         b       .Lsqr8x_tail
1705
1706 .align  5
1707 .Lsqr8x_tail_break:
1708         $POP    $n0,$SIZE_T*8($sp)      # pull n0
1709         $POP    $t3,$SIZE_T*9($sp)      # &tp[2*num-1]
1710         addi    $cnt,$tp,$SIZE_T*8      # end of current t[num] window
1711
1712         addic   $carry,$carry,-1        # "move" top-most carry to carry bit
1713         adde    $t0,$acc0,$a0
1714         $LD     $acc0,$SIZE_T*8($rp)
1715         $LD     $a0,$SIZE_T*1($t2)      # recall that $t2 is &n[-1]
1716         adde    $t1,$acc1,$a1
1717         $LD     $acc1,$SIZE_T*9($rp)
1718         $LD     $a1,$SIZE_T*2($t2)
1719         adde    $acc2,$acc2,$a2
1720         $LD     $a2,$SIZE_T*3($t2)
1721         adde    $acc3,$acc3,$a3
1722         $LD     $a3,$SIZE_T*4($t2)
1723         adde    $acc4,$acc4,$a4
1724         $LD     $a4,$SIZE_T*5($t2)
1725         adde    $acc5,$acc5,$a5
1726         $LD     $a5,$SIZE_T*6($t2)
1727         adde    $acc6,$acc6,$a6
1728         $LD     $a6,$SIZE_T*7($t2)
1729         adde    $acc7,$acc7,$a7
1730         $LD     $a7,$SIZE_T*8($t2)
1731         addi    $np,$t2,$SIZE_T*8
1732         addze   $t2,$zero               # top-most carry
1733         $UMULL  $na0,$n0,$acc0
1734         $ST     $t0,$SIZE_T*1($tp)
1735         $UCMP   $cnt,$t3                # did we hit the bottom?
1736         $ST     $t1,$SIZE_T*2($tp)
1737         li      $cnt,8
1738         $ST     $acc2,$SIZE_T*3($tp)
1739         $LD     $acc2,$SIZE_T*10($rp)
1740         $ST     $acc3,$SIZE_T*4($tp)
1741         $LD     $acc3,$SIZE_T*11($rp)
1742         $ST     $acc4,$SIZE_T*5($tp)
1743         $LD     $acc4,$SIZE_T*12($rp)
1744         $ST     $acc5,$SIZE_T*6($tp)
1745         $LD     $acc5,$SIZE_T*13($rp)
1746         $ST     $acc6,$SIZE_T*7($tp)
1747         $LD     $acc6,$SIZE_T*14($rp)
1748         $ST     $acc7,$SIZE_T*8($tp)
1749         $LD     $acc7,$SIZE_T*15($rp)
1750         $PUSH   $t2,$SIZE_T*10($sp)     # off-load top-most carry
1751         addi    $tp,$rp,$SIZE_T*7       # slide the window
1752         mtctr   $cnt
1753         bne     .Lsqr8x_reduction
1754
1755         ################################################################
1756         # Final step. We see if result is larger than modulus, and
1757         # if it is, subtract the modulus. But comparison implies
1758         # subtraction. So we subtract modulus, see if it borrowed,
1759         # and conditionally copy original value.
1760         $POP    $rp,$SIZE_T*6($sp)      # pull &rp[-1]
1761         srwi    $cnt,$num,`log($SIZE_T)/log(2)+3`
1762         mr      $n0,$tp                 # put tp aside
1763         addi    $tp,$tp,$SIZE_T*8
1764         subi    $cnt,$cnt,1
1765         subfc   $t0,$a0,$acc0
1766         subfe   $t1,$a1,$acc1
1767         mr      $carry,$t2
1768         mr      $ap_end,$rp             # $rp copy
1769
1770         mtctr   $cnt
1771         b       .Lsqr8x_sub
1772
1773 .align  5
1774 .Lsqr8x_sub:
1775         $LD     $a0,$SIZE_T*1($np)
1776         $LD     $acc0,$SIZE_T*1($tp)
1777         $LD     $a1,$SIZE_T*2($np)
1778         $LD     $acc1,$SIZE_T*2($tp)
1779         subfe   $t2,$a2,$acc2
1780         $LD     $a2,$SIZE_T*3($np)
1781         $LD     $acc2,$SIZE_T*3($tp)
1782         subfe   $t3,$a3,$acc3
1783         $LD     $a3,$SIZE_T*4($np)
1784         $LD     $acc3,$SIZE_T*4($tp)
1785         $ST     $t0,$SIZE_T*1($rp)
1786         subfe   $t0,$a4,$acc4
1787         $LD     $a4,$SIZE_T*5($np)
1788         $LD     $acc4,$SIZE_T*5($tp)
1789         $ST     $t1,$SIZE_T*2($rp)
1790         subfe   $t1,$a5,$acc5
1791         $LD     $a5,$SIZE_T*6($np)
1792         $LD     $acc5,$SIZE_T*6($tp)
1793         $ST     $t2,$SIZE_T*3($rp)
1794         subfe   $t2,$a6,$acc6
1795         $LD     $a6,$SIZE_T*7($np)
1796         $LD     $acc6,$SIZE_T*7($tp)
1797         $ST     $t3,$SIZE_T*4($rp)
1798         subfe   $t3,$a7,$acc7
1799         $LDU    $a7,$SIZE_T*8($np)
1800         $LDU    $acc7,$SIZE_T*8($tp)
1801         $ST     $t0,$SIZE_T*5($rp)
1802         subfe   $t0,$a0,$acc0
1803         $ST     $t1,$SIZE_T*6($rp)
1804         subfe   $t1,$a1,$acc1
1805         $ST     $t2,$SIZE_T*7($rp)
1806         $STU    $t3,$SIZE_T*8($rp)
1807         bdnz    .Lsqr8x_sub
1808
1809         srwi    $cnt,$num,`log($SIZE_T)/log(2)+2`
1810          $LD    $a0,$SIZE_T*1($ap_end)  # original $rp
1811          $LD    $acc0,$SIZE_T*1($n0)    # original $tp
1812         subi    $cnt,$cnt,1
1813          $LD    $a1,$SIZE_T*2($ap_end)
1814          $LD    $acc1,$SIZE_T*2($n0)
1815         subfe   $t2,$a2,$acc2
1816          $LD    $a2,$SIZE_T*3($ap_end)
1817          $LD    $acc2,$SIZE_T*3($n0)
1818         subfe   $t3,$a3,$acc3
1819          $LD    $a3,$SIZE_T*4($ap_end)
1820          $LDU   $acc3,$SIZE_T*4($n0)
1821         $ST     $t0,$SIZE_T*1($rp)
1822         subfe   $t0,$a4,$acc4
1823         $ST     $t1,$SIZE_T*2($rp)
1824         subfe   $t1,$a5,$acc5
1825         $ST     $t2,$SIZE_T*3($rp)
1826         subfe   $t2,$a6,$acc6
1827         $ST     $t3,$SIZE_T*4($rp)
1828         subfe   $t3,$a7,$acc7
1829         $ST     $t0,$SIZE_T*5($rp)
1830         subfe   $carry,$zero,$carry     # did it borrow?
1831         $ST     $t1,$SIZE_T*6($rp)
1832         $ST     $t2,$SIZE_T*7($rp)
1833         $ST     $t3,$SIZE_T*8($rp)
1834
1835         addi    $tp,$sp,$SIZE_T*11
1836         mtctr   $cnt
1837
1838 .Lsqr4x_cond_copy:
1839         andc    $a0,$a0,$carry
1840          $ST    $zero,-$SIZE_T*3($n0)   # wipe stack clean
1841         and     $acc0,$acc0,$carry
1842          $ST    $zero,-$SIZE_T*2($n0)
1843         andc    $a1,$a1,$carry
1844          $ST    $zero,-$SIZE_T*1($n0)
1845         and     $acc1,$acc1,$carry
1846          $ST    $zero,-$SIZE_T*0($n0)
1847         andc    $a2,$a2,$carry
1848          $ST    $zero,$SIZE_T*1($tp)
1849         and     $acc2,$acc2,$carry
1850          $ST    $zero,$SIZE_T*2($tp)
1851         andc    $a3,$a3,$carry
1852          $ST    $zero,$SIZE_T*3($tp)
1853         and     $acc3,$acc3,$carry
1854          $STU   $zero,$SIZE_T*4($tp)
1855         or      $t0,$a0,$acc0
1856         $LD     $a0,$SIZE_T*5($ap_end)
1857         $LD     $acc0,$SIZE_T*1($n0)
1858         or      $t1,$a1,$acc1
1859         $LD     $a1,$SIZE_T*6($ap_end)
1860         $LD     $acc1,$SIZE_T*2($n0)
1861         or      $t2,$a2,$acc2
1862         $LD     $a2,$SIZE_T*7($ap_end)
1863         $LD     $acc2,$SIZE_T*3($n0)
1864         or      $t3,$a3,$acc3
1865         $LD     $a3,$SIZE_T*8($ap_end)
1866         $LDU    $acc3,$SIZE_T*4($n0)
1867         $ST     $t0,$SIZE_T*1($ap_end)
1868         $ST     $t1,$SIZE_T*2($ap_end)
1869         $ST     $t2,$SIZE_T*3($ap_end)
1870         $STU    $t3,$SIZE_T*4($ap_end)
1871         bdnz    .Lsqr4x_cond_copy
1872
1873         $POP    $ap,0($sp)              # pull saved sp
1874         andc    $a0,$a0,$carry
1875         and     $acc0,$acc0,$carry
1876         andc    $a1,$a1,$carry
1877         and     $acc1,$acc1,$carry
1878         andc    $a2,$a2,$carry
1879         and     $acc2,$acc2,$carry
1880         andc    $a3,$a3,$carry
1881         and     $acc3,$acc3,$carry
1882         or      $t0,$a0,$acc0
1883         or      $t1,$a1,$acc1
1884         or      $t2,$a2,$acc2
1885         or      $t3,$a3,$acc3
1886         $ST     $t0,$SIZE_T*1($ap_end)
1887         $ST     $t1,$SIZE_T*2($ap_end)
1888         $ST     $t2,$SIZE_T*3($ap_end)
1889         $ST     $t3,$SIZE_T*4($ap_end)
1890
1891         b       .Lsqr8x_done
1892
1893 .align  5
1894 .Lsqr8x8_post_condition:
1895         $POP    $rp,$SIZE_T*6($sp)      # pull rp
1896         $POP    $ap,0($sp)              # pull saved sp
1897         addze   $carry,$zero
1898
1899         # $acc0-7,$carry hold result, $a0-7 hold modulus
1900         subfc   $acc0,$a0,$acc0
1901         subfe   $acc1,$a1,$acc1
1902          $ST    $zero,$SIZE_T*12($sp)   # wipe stack clean
1903          $ST    $zero,$SIZE_T*13($sp)
1904         subfe   $acc2,$a2,$acc2
1905          $ST    $zero,$SIZE_T*14($sp)
1906          $ST    $zero,$SIZE_T*15($sp)
1907         subfe   $acc3,$a3,$acc3
1908          $ST    $zero,$SIZE_T*16($sp)
1909          $ST    $zero,$SIZE_T*17($sp)
1910         subfe   $acc4,$a4,$acc4
1911          $ST    $zero,$SIZE_T*18($sp)
1912          $ST    $zero,$SIZE_T*19($sp)
1913         subfe   $acc5,$a5,$acc5
1914          $ST    $zero,$SIZE_T*20($sp)
1915          $ST    $zero,$SIZE_T*21($sp)
1916         subfe   $acc6,$a6,$acc6
1917          $ST    $zero,$SIZE_T*22($sp)
1918          $ST    $zero,$SIZE_T*23($sp)
1919         subfe   $acc7,$a7,$acc7
1920          $ST    $zero,$SIZE_T*24($sp)
1921          $ST    $zero,$SIZE_T*25($sp)
1922         subfe   $carry,$zero,$carry     # did it borrow?
1923          $ST    $zero,$SIZE_T*26($sp)
1924          $ST    $zero,$SIZE_T*27($sp)
1925
1926         and     $a0,$a0,$carry
1927         and     $a1,$a1,$carry
1928         addc    $acc0,$acc0,$a0         # add modulus back if borrowed
1929         and     $a2,$a2,$carry
1930         adde    $acc1,$acc1,$a1
1931         and     $a3,$a3,$carry
1932         adde    $acc2,$acc2,$a2
1933         and     $a4,$a4,$carry
1934         adde    $acc3,$acc3,$a3
1935         and     $a5,$a5,$carry
1936         adde    $acc4,$acc4,$a4
1937         and     $a6,$a6,$carry
1938         adde    $acc5,$acc5,$a5
1939         and     $a7,$a7,$carry
1940         adde    $acc6,$acc6,$a6
1941         adde    $acc7,$acc7,$a7
1942         $ST     $acc0,$SIZE_T*1($rp)
1943         $ST     $acc1,$SIZE_T*2($rp)
1944         $ST     $acc2,$SIZE_T*3($rp)
1945         $ST     $acc3,$SIZE_T*4($rp)
1946         $ST     $acc4,$SIZE_T*5($rp)
1947         $ST     $acc5,$SIZE_T*6($rp)
1948         $ST     $acc6,$SIZE_T*7($rp)
1949         $ST     $acc7,$SIZE_T*8($rp)
1950
1951 .Lsqr8x_done:
1952         $PUSH   $zero,$SIZE_T*8($sp)
1953         $PUSH   $zero,$SIZE_T*10($sp)
1954
1955         $POP    r14,-$SIZE_T*18($ap)
1956         li      r3,1                    # signal "done"
1957         $POP    r15,-$SIZE_T*17($ap)
1958         $POP    r16,-$SIZE_T*16($ap)
1959         $POP    r17,-$SIZE_T*15($ap)
1960         $POP    r18,-$SIZE_T*14($ap)
1961         $POP    r19,-$SIZE_T*13($ap)
1962         $POP    r20,-$SIZE_T*12($ap)
1963         $POP    r21,-$SIZE_T*11($ap)
1964         $POP    r22,-$SIZE_T*10($ap)
1965         $POP    r23,-$SIZE_T*9($ap)
1966         $POP    r24,-$SIZE_T*8($ap)
1967         $POP    r25,-$SIZE_T*7($ap)
1968         $POP    r26,-$SIZE_T*6($ap)
1969         $POP    r27,-$SIZE_T*5($ap)
1970         $POP    r28,-$SIZE_T*4($ap)
1971         $POP    r29,-$SIZE_T*3($ap)
1972         $POP    r30,-$SIZE_T*2($ap)
1973         $POP    r31,-$SIZE_T*1($ap)
1974         mr      $sp,$ap
1975         blr
1976         .long   0
1977         .byte   0,12,4,0x20,0x80,18,6,0
1978         .long   0
1979 .size   __bn_sqr8x_mont,.-__bn_sqr8x_mont
1980 ___
1981 }
1982 $code.=<<___;
1983 .asciz  "Montgomery Multiplication for PPC, CRYPTOGAMS by <appro\@openssl.org>"
1984 ___
1985
1986 $code =~ s/\`([^\`]*)\`/eval $1/gem;
1987 print $code;
1988 close STDOUT;