1 ; Copyright 2000-2016 The OpenSSL Project Authors. All Rights Reserved.
3 ; Licensed under the OpenSSL license (the "License"). You may not use
4 ; this file except in compliance with the License. You can obtain a copy
5 ; in the file LICENSE in the source distribution or at
6 ; https://www.openssl.org/source/license.html
9 ; PA-RISC 64-bit implementation of bn_asm code
11 ; This code is approximately 2x faster than the C version
14 ; See http://devresource.hp.com/ for more details on the PA-RISC
15 ; architecture. Also see the book "PA-RISC 2.0 Architecture"
16 ; by Gerry Kane for information on the instruction set architecture.
18 ; Code written by Chris Ruemmler (with some help from the HP C
21 ; The code compiles with HP's assembler
26 .subspa $CODE$,QUAD=0,ALIGN=8,ACCESS=0x2c,CODE_ONLY
29 ; Global Register definitions used for the routines.
31 ; Some information about HP's runtime architecture for 64-bits.
33 ; "Caller save" means the calling function must save the register
34 ; if it wants the register to be preserved.
35 ; "Callee save" means if a function uses the register, it must save
36 ; the value before using it.
38 ; For the floating point registers
40 ; "caller save" registers: fr4-fr11, fr22-fr31
41 ; "callee save" registers: fr12-fr21
42 ; "special" registers: fr0-fr3 (status and exception registers)
44 ; For the integer registers
46 ; "caller save" registers: r1,r19-r26
47 ; "callee save" registers: r3-r18
48 ; return register : r2 (rp)
49 ; return values ; r28 (ret0,ret1)
50 ; Stack pointer ; r30 (sp)
51 ; global data pointer ; r27 (dp)
52 ; argument pointer ; r29 (ap)
53 ; millicode return ptr ; r31 (also a caller save register)
57 ; Arguments to the routines
68 ; Globals used in some routines
71 top_overflow .reg %r29
72 high_mask .reg %r22 ; value 0xffffffff80000000L
75 ;------------------------------------------------------------------------------
79 ;BN_ULONG bn_mul_add_words(BN_ULONG *r_ptr, BN_ULONG *a_ptr,
80 ; int num, BN_ULONG w)
87 ; Local register definitions
125 .export bn_mul_add_words,entry,NO_RELOCATION,LONG_RETURN
131 STD %r3,0(%sp) ; save r3
132 STD %r4,8(%sp) ; save r4
133 NOP ; Needed to make the loop 16-byte aligned
134 NOP ; Needed to make the loop 16-byte aligned
136 STD %r5,16(%sp) ; save r5
137 STD %r6,24(%sp) ; save r6
138 STD %r7,32(%sp) ; save r7
139 STD %r8,40(%sp) ; save r8
141 STD %r9,48(%sp) ; save r9
142 COPY %r0,%ret0 ; return 0 by default
143 DEPDI,Z 1,31,1,top_overflow ; top_overflow = 1 << 32
144 STD w,56(%sp) ; store w on stack
146 CMPIB,>= 0,num,bn_mul_add_words_exit ; if (num <= 0) then exit
147 LDO 128(%sp),%sp ; bump stack
150 ; The loop is unrolled twice, so if there is only 1 number
151 ; then go straight to the cleanup code.
153 CMPIB,= 1,num,bn_mul_add_words_single_top
154 FLDD -72(%sp),fw ; load up w into fp register fw (fw_h/fw_l)
157 ; This loop is unrolled 2 times (64-byte aligned as well)
159 ; PA-RISC 2.0 chips have two fully pipelined multipliers, thus
160 ; two 32-bit mutiplies can be issued per cycle.
162 bn_mul_add_words_unroll2
164 FLDD 0(a_ptr),t_float_0 ; load up 64-bit value (fr8L) ht(L)/lt(R)
165 FLDD 8(a_ptr),t_float_1 ; load up 64-bit value (fr8L) ht(L)/lt(R)
166 LDD 0(r_ptr),rp_val ; rp[0]
167 LDD 8(r_ptr),rp_val_1 ; rp[1]
169 XMPYU fht_0,fw_l,fm1 ; m1[0] = fht_0*fw_l
170 XMPYU fht_1,fw_l,fm1_1 ; m1[1] = fht_1*fw_l
171 FSTD fm1,-16(%sp) ; -16(sp) = m1[0]
172 FSTD fm1_1,-48(%sp) ; -48(sp) = m1[1]
174 XMPYU flt_0,fw_h,fm ; m[0] = flt_0*fw_h
175 XMPYU flt_1,fw_h,fm_1 ; m[1] = flt_1*fw_h
176 FSTD fm,-8(%sp) ; -8(sp) = m[0]
177 FSTD fm_1,-40(%sp) ; -40(sp) = m[1]
179 XMPYU fht_0,fw_h,ht_temp ; ht_temp = fht_0*fw_h
180 XMPYU fht_1,fw_h,ht_temp_1 ; ht_temp_1 = fht_1*fw_h
181 FSTD ht_temp,-24(%sp) ; -24(sp) = ht_temp
182 FSTD ht_temp_1,-56(%sp) ; -56(sp) = ht_temp_1
184 XMPYU flt_0,fw_l,lt_temp ; lt_temp = lt*fw_l
185 XMPYU flt_1,fw_l,lt_temp_1 ; lt_temp = lt*fw_l
186 FSTD lt_temp,-32(%sp) ; -32(sp) = lt_temp
187 FSTD lt_temp_1,-64(%sp) ; -64(sp) = lt_temp_1
189 LDD -8(%sp),m_0 ; m[0]
190 LDD -40(%sp),m_1 ; m[1]
191 LDD -16(%sp),m1_0 ; m1[0]
192 LDD -48(%sp),m1_1 ; m1[1]
194 LDD -24(%sp),ht_0 ; ht[0]
195 LDD -56(%sp),ht_1 ; ht[1]
196 ADD,L m1_0,m_0,tmp_0 ; tmp_0 = m[0] + m1[0];
197 ADD,L m1_1,m_1,tmp_1 ; tmp_1 = m[1] + m1[1];
201 CMPCLR,*>>= tmp_0,m1_0, %r0 ; if (m[0] < m1[0])
202 ADD,L ht_0,top_overflow,ht_0 ; ht[0] += (1<<32)
204 CMPCLR,*>>= tmp_1,m1_1,%r0 ; if (m[1] < m1[1])
205 ADD,L ht_1,top_overflow,ht_1 ; ht[1] += (1<<32)
206 EXTRD,U tmp_0,31,32,m_0 ; m[0]>>32
207 DEPD,Z tmp_0,31,32,m1_0 ; m1[0] = m[0]<<32
209 EXTRD,U tmp_1,31,32,m_1 ; m[1]>>32
210 DEPD,Z tmp_1,31,32,m1_1 ; m1[1] = m[1]<<32
211 ADD,L ht_0,m_0,ht_0 ; ht[0]+= (m[0]>>32)
212 ADD,L ht_1,m_1,ht_1 ; ht[1]+= (m[1]>>32)
214 ADD lt_0,m1_0,lt_0 ; lt[0] = lt[0]+m1[0];
215 ADD,DC ht_0,%r0,ht_0 ; ht[0]++
216 ADD lt_1,m1_1,lt_1 ; lt[1] = lt[1]+m1[1];
217 ADD,DC ht_1,%r0,ht_1 ; ht[1]++
219 ADD %ret0,lt_0,lt_0 ; lt[0] = lt[0] + c;
220 ADD,DC ht_0,%r0,ht_0 ; ht[0]++
221 ADD lt_0,rp_val,lt_0 ; lt[0] = lt[0]+rp[0]
222 ADD,DC ht_0,%r0,ht_0 ; ht[0]++
224 LDO -2(num),num ; num = num - 2;
225 ADD ht_0,lt_1,lt_1 ; lt[1] = lt[1] + ht_0 (c);
226 ADD,DC ht_1,%r0,ht_1 ; ht[1]++
227 STD lt_0,0(r_ptr) ; rp[0] = lt[0]
229 ADD lt_1,rp_val_1,lt_1 ; lt[1] = lt[1]+rp[1]
230 ADD,DC ht_1,%r0,%ret0 ; ht[1]++
231 LDO 16(a_ptr),a_ptr ; a_ptr += 2
233 STD lt_1,8(r_ptr) ; rp[1] = lt[1]
234 CMPIB,<= 2,num,bn_mul_add_words_unroll2 ; go again if more to do
235 LDO 16(r_ptr),r_ptr ; r_ptr += 2
237 CMPIB,=,N 0,num,bn_mul_add_words_exit ; are we done, or cleanup last one
240 ; Top of loop aligned on 64-byte boundary
242 bn_mul_add_words_single_top
243 FLDD 0(a_ptr),t_float_0 ; load up 64-bit value (fr8L) ht(L)/lt(R)
244 LDD 0(r_ptr),rp_val ; rp[0]
245 LDO 8(a_ptr),a_ptr ; a_ptr++
246 XMPYU fht_0,fw_l,fm1 ; m1 = ht*fw_l
247 FSTD fm1,-16(%sp) ; -16(sp) = m1
248 XMPYU flt_0,fw_h,fm ; m = lt*fw_h
249 FSTD fm,-8(%sp) ; -8(sp) = m
250 XMPYU fht_0,fw_h,ht_temp ; ht_temp = ht*fw_h
251 FSTD ht_temp,-24(%sp) ; -24(sp) = ht
252 XMPYU flt_0,fw_l,lt_temp ; lt_temp = lt*fw_l
253 FSTD lt_temp,-32(%sp) ; -32(sp) = lt
256 LDD -16(%sp),m1_0 ; m1 = temp1
257 ADD,L m_0,m1_0,tmp_0 ; tmp_0 = m + m1;
261 CMPCLR,*>>= tmp_0,m1_0,%r0 ; if (m < m1)
262 ADD,L ht_0,top_overflow,ht_0 ; ht += (1<<32)
264 EXTRD,U tmp_0,31,32,m_0 ; m>>32
265 DEPD,Z tmp_0,31,32,m1_0 ; m1 = m<<32
267 ADD,L ht_0,m_0,ht_0 ; ht+= (m>>32)
268 ADD lt_0,m1_0,tmp_0 ; tmp_0 = lt+m1;
269 ADD,DC ht_0,%r0,ht_0 ; ht++
270 ADD %ret0,tmp_0,lt_0 ; lt = lt + c;
271 ADD,DC ht_0,%r0,ht_0 ; ht++
272 ADD lt_0,rp_val,lt_0 ; lt = lt+rp[0]
273 ADD,DC ht_0,%r0,%ret0 ; ht++
274 STD lt_0,0(r_ptr) ; rp[0] = lt
276 bn_mul_add_words_exit
278 LDD -80(%sp),%r9 ; restore r9
279 LDD -88(%sp),%r8 ; restore r8
280 LDD -96(%sp),%r7 ; restore r7
281 LDD -104(%sp),%r6 ; restore r6
282 LDD -112(%sp),%r5 ; restore r5
283 LDD -120(%sp),%r4 ; restore r4
285 LDD,MB -128(%sp),%r3 ; restore r3
286 .PROCEND ;in=23,24,25,26,29;out=28;
288 ;----------------------------------------------------------------------------
290 ;BN_ULONG bn_mul_words(BN_ULONG *rp, BN_ULONG *ap, int num, BN_ULONG w)
301 .EXPORT bn_mul_words,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
304 STD %r3,0(%sp) ; save r3
305 STD %r4,8(%sp) ; save r4
306 STD %r5,16(%sp) ; save r5
307 STD %r6,24(%sp) ; save r6
309 STD %r7,32(%sp) ; save r7
310 COPY %r0,%ret0 ; return 0 by default
311 DEPDI,Z 1,31,1,top_overflow ; top_overflow = 1 << 32
312 STD w,56(%sp) ; w on stack
314 CMPIB,>= 0,num,bn_mul_words_exit
315 LDO 128(%sp),%sp ; bump stack
318 ; See if only 1 word to do, thus just do cleanup
320 CMPIB,= 1,num,bn_mul_words_single_top
321 FLDD -72(%sp),fw ; load up w into fp register fw (fw_h/fw_l)
324 ; This loop is unrolled 2 times (64-byte aligned as well)
326 ; PA-RISC 2.0 chips have two fully pipelined multipliers, thus
327 ; two 32-bit mutiplies can be issued per cycle.
331 FLDD 0(a_ptr),t_float_0 ; load up 64-bit value (fr8L) ht(L)/lt(R)
332 FLDD 8(a_ptr),t_float_1 ; load up 64-bit value (fr8L) ht(L)/lt(R)
333 XMPYU fht_0,fw_l,fm1 ; m1[0] = fht_0*fw_l
334 XMPYU fht_1,fw_l,fm1_1 ; m1[1] = ht*fw_l
336 FSTD fm1,-16(%sp) ; -16(sp) = m1
337 FSTD fm1_1,-48(%sp) ; -48(sp) = m1
338 XMPYU flt_0,fw_h,fm ; m = lt*fw_h
339 XMPYU flt_1,fw_h,fm_1 ; m = lt*fw_h
341 FSTD fm,-8(%sp) ; -8(sp) = m
342 FSTD fm_1,-40(%sp) ; -40(sp) = m
343 XMPYU fht_0,fw_h,ht_temp ; ht_temp = fht_0*fw_h
344 XMPYU fht_1,fw_h,ht_temp_1 ; ht_temp = ht*fw_h
346 FSTD ht_temp,-24(%sp) ; -24(sp) = ht
347 FSTD ht_temp_1,-56(%sp) ; -56(sp) = ht
348 XMPYU flt_0,fw_l,lt_temp ; lt_temp = lt*fw_l
349 XMPYU flt_1,fw_l,lt_temp_1 ; lt_temp = lt*fw_l
351 FSTD lt_temp,-32(%sp) ; -32(sp) = lt
352 FSTD lt_temp_1,-64(%sp) ; -64(sp) = lt
361 ADD,L m1_0,m_0,tmp_0 ; tmp_0 = m + m1;
362 ADD,L m1_1,m_1,tmp_1 ; tmp_1 = m + m1;
366 CMPCLR,*>>= tmp_0,m1_0, %r0 ; if (m < m1)
367 ADD,L ht_0,top_overflow,ht_0 ; ht += (1<<32)
368 CMPCLR,*>>= tmp_1,m1_1,%r0 ; if (m < m1)
369 ADD,L ht_1,top_overflow,ht_1 ; ht += (1<<32)
371 EXTRD,U tmp_0,31,32,m_0 ; m>>32
372 DEPD,Z tmp_0,31,32,m1_0 ; m1 = m<<32
373 EXTRD,U tmp_1,31,32,m_1 ; m>>32
374 DEPD,Z tmp_1,31,32,m1_1 ; m1 = m<<32
376 ADD,L ht_0,m_0,ht_0 ; ht+= (m>>32)
377 ADD,L ht_1,m_1,ht_1 ; ht+= (m>>32)
378 ADD lt_0,m1_0,lt_0 ; lt = lt+m1;
379 ADD,DC ht_0,%r0,ht_0 ; ht++
381 ADD lt_1,m1_1,lt_1 ; lt = lt+m1;
382 ADD,DC ht_1,%r0,ht_1 ; ht++
383 ADD %ret0,lt_0,lt_0 ; lt = lt + c (ret0);
384 ADD,DC ht_0,%r0,ht_0 ; ht++
386 ADD ht_0,lt_1,lt_1 ; lt = lt + c (ht_0)
387 ADD,DC ht_1,%r0,ht_1 ; ht++
388 STD lt_0,0(r_ptr) ; rp[0] = lt
389 STD lt_1,8(r_ptr) ; rp[1] = lt
391 COPY ht_1,%ret0 ; carry = ht
392 LDO -2(num),num ; num = num - 2;
393 LDO 16(a_ptr),a_ptr ; ap += 2
394 CMPIB,<= 2,num,bn_mul_words_unroll2
395 LDO 16(r_ptr),r_ptr ; rp++
397 CMPIB,=,N 0,num,bn_mul_words_exit ; are we done?
400 ; Top of loop aligned on 64-byte boundary
402 bn_mul_words_single_top
403 FLDD 0(a_ptr),t_float_0 ; load up 64-bit value (fr8L) ht(L)/lt(R)
405 XMPYU fht_0,fw_l,fm1 ; m1 = ht*fw_l
406 FSTD fm1,-16(%sp) ; -16(sp) = m1
407 XMPYU flt_0,fw_h,fm ; m = lt*fw_h
408 FSTD fm,-8(%sp) ; -8(sp) = m
409 XMPYU fht_0,fw_h,ht_temp ; ht_temp = ht*fw_h
410 FSTD ht_temp,-24(%sp) ; -24(sp) = ht
411 XMPYU flt_0,fw_l,lt_temp ; lt_temp = lt*fw_l
412 FSTD lt_temp,-32(%sp) ; -32(sp) = lt
416 ADD,L m_0,m1_0,tmp_0 ; tmp_0 = m + m1;
420 CMPCLR,*>>= tmp_0,m1_0,%r0 ; if (m < m1)
421 ADD,L ht_0,top_overflow,ht_0 ; ht += (1<<32)
423 EXTRD,U tmp_0,31,32,m_0 ; m>>32
424 DEPD,Z tmp_0,31,32,m1_0 ; m1 = m<<32
426 ADD,L ht_0,m_0,ht_0 ; ht+= (m>>32)
427 ADD lt_0,m1_0,lt_0 ; lt= lt+m1;
428 ADD,DC ht_0,%r0,ht_0 ; ht++
430 ADD %ret0,lt_0,lt_0 ; lt = lt + c;
431 ADD,DC ht_0,%r0,ht_0 ; ht++
433 COPY ht_0,%ret0 ; copy carry
434 STD lt_0,0(r_ptr) ; rp[0] = lt
438 LDD -96(%sp),%r7 ; restore r7
439 LDD -104(%sp),%r6 ; restore r6
440 LDD -112(%sp),%r5 ; restore r5
441 LDD -120(%sp),%r4 ; restore r4
443 LDD,MB -128(%sp),%r3 ; restore r3
444 .PROCEND ;in=23,24,25,26,29;out=28;
446 ;----------------------------------------------------------------------------
448 ;void bn_sqr_words(BN_ULONG *rp, BN_ULONG *ap, int num)
457 .callinfo FRAME=128,ENTRY_GR=%r3,ARGS_SAVED,ORDERING_AWARE
458 .EXPORT bn_sqr_words,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
462 STD %r3,0(%sp) ; save r3
463 STD %r4,8(%sp) ; save r4
465 STD %r5,16(%sp) ; save r5
467 CMPIB,>= 0,num,bn_sqr_words_exit
468 LDO 128(%sp),%sp ; bump stack
471 ; If only 1, the goto straight to cleanup
473 CMPIB,= 1,num,bn_sqr_words_single_top
474 DEPDI,Z -1,32,33,high_mask ; Create Mask 0xffffffff80000000L
477 ; This loop is unrolled 2 times (64-byte aligned as well)
481 FLDD 0(a_ptr),t_float_0 ; a[0]
482 FLDD 8(a_ptr),t_float_1 ; a[1]
483 XMPYU fht_0,flt_0,fm ; m[0]
484 XMPYU fht_1,flt_1,fm_1 ; m[1]
486 FSTD fm,-24(%sp) ; store m[0]
487 FSTD fm_1,-56(%sp) ; store m[1]
488 XMPYU flt_0,flt_0,lt_temp ; lt[0]
489 XMPYU flt_1,flt_1,lt_temp_1 ; lt[1]
491 FSTD lt_temp,-16(%sp) ; store lt[0]
492 FSTD lt_temp_1,-48(%sp) ; store lt[1]
493 XMPYU fht_0,fht_0,ht_temp ; ht[0]
494 XMPYU fht_1,fht_1,ht_temp_1 ; ht[1]
496 FSTD ht_temp,-8(%sp) ; store ht[0]
497 FSTD ht_temp_1,-40(%sp) ; store ht[1]
501 AND m_0,high_mask,tmp_0 ; m[0] & Mask
502 AND m_1,high_mask,tmp_1 ; m[1] & Mask
503 DEPD,Z m_0,30,31,m_0 ; m[0] << 32+1
504 DEPD,Z m_1,30,31,m_1 ; m[1] << 32+1
508 EXTRD,U tmp_0,32,33,tmp_0 ; tmp_0 = m[0]&Mask >> 32-1
509 EXTRD,U tmp_1,32,33,tmp_1 ; tmp_1 = m[1]&Mask >> 32-1
513 ADD,L ht_0,tmp_0,ht_0 ; ht[0] += tmp_0
514 ADD,L ht_1,tmp_1,ht_1 ; ht[1] += tmp_1
516 ADD lt_0,m_0,lt_0 ; lt = lt+m
517 ADD,DC ht_0,%r0,ht_0 ; ht[0]++
518 STD lt_0,0(r_ptr) ; rp[0] = lt[0]
519 STD ht_0,8(r_ptr) ; rp[1] = ht[1]
521 ADD lt_1,m_1,lt_1 ; lt = lt+m
522 ADD,DC ht_1,%r0,ht_1 ; ht[1]++
523 STD lt_1,16(r_ptr) ; rp[2] = lt[1]
524 STD ht_1,24(r_ptr) ; rp[3] = ht[1]
526 LDO -2(num),num ; num = num - 2;
527 LDO 16(a_ptr),a_ptr ; ap += 2
528 CMPIB,<= 2,num,bn_sqr_words_unroll2
529 LDO 32(r_ptr),r_ptr ; rp += 4
531 CMPIB,=,N 0,num,bn_sqr_words_exit ; are we done?
534 ; Top of loop aligned on 64-byte boundary
536 bn_sqr_words_single_top
537 FLDD 0(a_ptr),t_float_0 ; load up 64-bit value (fr8L) ht(L)/lt(R)
539 XMPYU fht_0,flt_0,fm ; m
540 FSTD fm,-24(%sp) ; store m
542 XMPYU flt_0,flt_0,lt_temp ; lt
543 FSTD lt_temp,-16(%sp) ; store lt
545 XMPYU fht_0,fht_0,ht_temp ; ht
546 FSTD ht_temp,-8(%sp) ; store ht
548 LDD -24(%sp),m_0 ; load m
549 AND m_0,high_mask,tmp_0 ; m & Mask
550 DEPD,Z m_0,30,31,m_0 ; m << 32+1
551 LDD -16(%sp),lt_0 ; lt
553 LDD -8(%sp),ht_0 ; ht
554 EXTRD,U tmp_0,32,33,tmp_0 ; tmp_0 = m&Mask >> 32-1
555 ADD m_0,lt_0,lt_0 ; lt = lt+m
556 ADD,L ht_0,tmp_0,ht_0 ; ht += tmp_0
557 ADD,DC ht_0,%r0,ht_0 ; ht++
559 STD lt_0,0(r_ptr) ; rp[0] = lt
560 STD ht_0,8(r_ptr) ; rp[1] = ht
564 LDD -112(%sp),%r5 ; restore r5
565 LDD -120(%sp),%r4 ; restore r4
568 .PROCEND ;in=23,24,25,26,29;out=28;
571 ;----------------------------------------------------------------------------
573 ;BN_ULONG bn_add_words(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n)
588 .EXPORT bn_add_words,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
591 CMPIB,>= 0,n,bn_add_words_exit
592 COPY %r0,%ret0 ; return 0 by default
595 ; If 2 or more numbers do the loop
597 CMPIB,= 1,n,bn_add_words_single_top
601 ; This loop is unrolled 2 times (64-byte aligned as well)
606 ADD t,%ret0,t ; t = t+c;
607 ADD,DC %r0,%r0,%ret0 ; set c to carry
608 ADD t,b,l ; l = t + b[0]
609 ADD,DC %ret0,%r0,%ret0 ; c+= carry
614 ADD t,%ret0,t ; t = t+c;
615 ADD,DC %r0,%r0,%ret0 ; set c to carry
616 ADD t,b,l ; l = t + b[0]
617 ADD,DC %ret0,%r0,%ret0 ; c+= carry
624 CMPIB,<= 2,n,bn_add_words_unroll2
627 CMPIB,=,N 0,n,bn_add_words_exit ; are we done?
629 bn_add_words_single_top
633 ADD t,%ret0,t ; t = t+c;
634 ADD,DC %r0,%r0,%ret0 ; set c to carry (could use CMPCLR??)
635 ADD t,b,l ; l = t + b[0]
636 ADD,DC %ret0,%r0,%ret0 ; c+= carry
643 .PROCEND ;in=23,24,25,26,29;out=28;
645 ;----------------------------------------------------------------------------
647 ;BN_ULONG bn_sub_words(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n)
663 .EXPORT bn_sub_words,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
667 CMPIB,>= 0,n,bn_sub_words_exit
668 COPY %r0,%ret0 ; return 0 by default
671 ; If 2 or more numbers do the loop
673 CMPIB,= 1,n,bn_sub_words_single_top
677 ; This loop is unrolled 2 times (64-byte aligned as well)
682 SUB t1,t2,sub_tmp1 ; t3 = t1-t2;
683 SUB sub_tmp1,%ret0,sub_tmp1 ; t3 = t3- c;
685 CMPCLR,*>> t1,t2,sub_tmp2 ; clear if t1 > t2
690 STD sub_tmp1,0(r_ptr)
694 SUB t1,t2,sub_tmp1 ; t3 = t1-t2;
695 SUB sub_tmp1,%ret0,sub_tmp1 ; t3 = t3- c;
696 CMPCLR,*>> t1,t2,sub_tmp2 ; clear if t1 > t2
701 STD sub_tmp1,8(r_ptr)
707 CMPIB,<= 2,n,bn_sub_words_unroll2
710 CMPIB,=,N 0,n,bn_sub_words_exit ; are we done?
712 bn_sub_words_single_top
715 SUB t1,t2,sub_tmp1 ; t3 = t1-t2;
716 SUB sub_tmp1,%ret0,sub_tmp1 ; t3 = t3- c;
717 CMPCLR,*>> t1,t2,sub_tmp2 ; clear if t1 > t2
723 STD sub_tmp1,0(r_ptr)
729 .PROCEND ;in=23,24,25,26,29;out=28;
731 ;------------------------------------------------------------------------------
733 ; unsigned long bn_div_words(unsigned long h, unsigned long l, unsigned long d)
739 ; This is mainly just modified assembly from the compiler, thus the
740 ; lack of variable names.
742 ;------------------------------------------------------------------------------
745 .callinfo CALLER,FRAME=272,ENTRY_GR=%r10,SAVE_RP,ARGS_SAVED,ORDERING_AWARE
746 .EXPORT bn_div_words,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
747 .IMPORT BN_num_bits_word,CODE,NO_RELOCATION
749 .IMPORT fprintf,CODE,NO_RELOCATION
750 .IMPORT abort,CODE,NO_RELOCATION
751 .IMPORT $$div2U,MILLICODE
763 STD %r27,-288(%r30) ; save gp
765 COPY %r24,%r3 ; save d
766 COPY %r26,%r4 ; save h (high 64-bits)
767 LDO -1(%r0),%ret0 ; return -1 by default
769 CMPB,*= %r0,%arg2,$D3 ; if (d == 0)
770 COPY %r25,%r5 ; save l (low 64-bits)
772 LDO -48(%r30),%r29 ; create ap
773 .CALL ;in=26,29;out=28;
774 B,L BN_num_bits_word,%r2
776 LDD -288(%r30),%r27 ; restore gp
779 CMPB,= %r21,%ret0,$00000012 ;if (i == 64) (forward)
782 DEPDI,Z -1,%sar,1,%r29
783 CMPB,*<<,N %r29,%r4,bn_div_err_case ; if (h > 1<<i) (forward)
786 SUBI 64,%r24,%r31 ; i = 64 - i;
787 CMPCLR,*<< %r4,%r3,%r0 ; if (h >= d)
788 SUB %r4,%r3,%r4 ; h -= d
789 CMPB,= %r31,%r0,$0000001A ; if (i)
790 COPY %r0,%r10 ; ret = 0
791 MTSARCM %r31 ; i to shift
792 DEPD,Z %r3,%sar,64,%r3 ; d <<= i;
793 SUBI 64,%r31,%r19 ; 64 - i; redundent
794 MTSAR %r19 ; (64 -i) to shift
795 SHRPD %r4,%r5,%sar,%r4 ; l>> (64-i)
796 MTSARCM %r31 ; i to shift
797 DEPD,Z %r5,%sar,64,%r5 ; l <<= i;
800 DEPDI,Z -1,31,32,%r19
801 EXTRD,U %r3,31,32,%r6 ; dh=(d&0xfff)>>32
802 EXTRD,U %r3,63,32,%r8 ; dl = d&0xffffff
804 STD %r3,-280(%r30) ; "d" to stack
807 DEPDI,Z -1,63,32,%r29 ;
808 EXTRD,U %r4,31,32,%r31 ; h >> 32
809 CMPB,*=,N %r31,%r6,$D2 ; if ((h>>32) != dh)(forward) div
811 EXTRD,U %r4,31,32,%r25
813 .CALL ;in=23,24,25,26;out=20,21,22,28,29; (MILLICALL)
815 EXTRD,U %r6,31,32,%r23
818 STD %r29,-272(%r30) ; q
819 AND %r5,%r19,%r24 ; t & 0xffffffff00000000;
820 EXTRD,U %r24,31,32,%r24 ; ???
821 FLDD -272(%r30),%fr7 ; q
822 FLDD -280(%r30),%fr8 ; d
823 XMPYU %fr8L,%fr7L,%fr10
824 FSTD %fr10,-256(%r30)
825 XMPYU %fr8L,%fr7R,%fr22
826 FSTD %fr22,-264(%r30)
827 XMPYU %fr8R,%fr7L,%fr11
828 XMPYU %fr8R,%fr7R,%fr23
829 FSTD %fr11,-232(%r30)
830 FSTD %fr23,-240(%r30)
832 DEPD,Z %r28,31,32,%r2
836 DEPD,Z %r22,31,32,%r22
838 B $00000024 ; enter loop
847 CMPB,*<>,N %r0,%r26,$00000046 ; (forward)
848 DEPD,Z %r25,31,32,%r20
850 CMPB,*<<,N %r21,%r23,$0000002A ;(backward)
852 ;-------------Break path---------------------
855 DEPD,Z %r23,31,32,%r25 ;tl
856 EXTRD,U %r23,31,32,%r26 ;t
857 AND %r25,%r19,%r24 ;tl = (tl<<32)&0xfffffff0000000L
858 ADD,L %r31,%r26,%r31 ;th += t;
859 CMPCLR,*>>= %r5,%r24,%r0 ;if (l<tl)
860 LDO 1(%r31),%r31 ; th++;
861 CMPB,*<<=,N %r31,%r4,$00000036 ;if (n < th) (forward)
862 LDO -1(%r29),%r29 ;q--;
863 ADD,L %r4,%r3,%r4 ;h += d;
865 ADDIB,=,N -1,%r9,$D1 ;if (--count == 0) break (forward)
866 SUB %r5,%r24,%r28 ; l -= tl;
867 SUB %r4,%r31,%r24 ; h -= th;
868 SHRPD %r24,%r28,32,%r4 ; h = ((h<<32)|(l>>32));
869 DEPD,Z %r29,31,32,%r10 ; ret = q<<32
871 DEPD,Z %r28,31,32,%r5 ; l = l << 32
874 OR %r10,%r29,%r28 ; ret |= q
887 LDD,MB -352(%r30),%r3
891 ADDIL L'bn_div_words-bn_div_err_case,%r6,%r1
892 LDO R'bn_div_words-bn_div_err_case(%r1),%r6
893 ADDIL LT'__iob,%r27,%r1
894 LDD RT'__iob(%r1),%r26
895 ADDIL L'C$4-bn_div_words,%r6,%r1
896 LDO R'C$4-bn_div_words(%r1),%r25
898 .CALL ;in=24,25,26,29;out=28;
908 .PROCEND ;in=24,25,26,29;out=28;
910 ;----------------------------------------------------------------------------
912 ; Registers to hold 64-bit values to manipulate. The "L" part
913 ; of the register corresponds to the upper 32-bits, while the "R"
914 ; part corresponds to the lower 32-bits
916 ; Note, that when using b6 and b7, the code must save these before
917 ; using them because they are callee save registers
920 ; Floating point registers to use to save values that
921 ; are manipulated. These don't collide with ftemp1-6 and
922 ; are all caller save registers
965 ; Temporary floating point variables, these are all caller save
974 ; The B set of registers when used.
1001 c1 .reg %r21 ; only reg
1002 temp1 .reg %r20 ; only reg
1003 temp2 .reg %r19 ; only reg
1004 temp3 .reg %r31 ; only reg
1014 SQR_ADD_C .macro A0L,A0R,C1,C2,C3
1015 XMPYU A0L,A0R,ftemp1 ; m
1016 FSTD ftemp1,-24(%sp) ; store m
1018 XMPYU A0R,A0R,ftemp2 ; lt
1019 FSTD ftemp2,-16(%sp) ; store lt
1021 XMPYU A0L,A0L,ftemp3 ; ht
1022 FSTD ftemp3,-8(%sp) ; store ht
1024 LDD -24(%sp),m ; load m
1025 AND m,high_mask,temp2 ; m & Mask
1026 DEPD,Z m,30,31,temp3 ; m << 32+1
1027 LDD -16(%sp),lt ; lt
1030 EXTRD,U temp2,32,33,temp1 ; temp1 = m&Mask >> 32-1
1031 ADD temp3,lt,lt ; lt = lt+m
1032 ADD,L ht,temp1,ht ; ht += temp1
1033 ADD,DC ht,%r0,ht ; ht++
1035 ADD C1,lt,C1 ; c1=c1+lt
1036 ADD,DC ht,%r0,ht ; ht++
1038 ADD C2,ht,C2 ; c2=c2+ht
1039 ADD,DC C3,%r0,C3 ; c3++
1042 SQR_ADD_C2 .macro A0L,A0R,A1L,A1R,C1,C2,C3
1043 XMPYU A0L,A1R,ftemp1 ; m1 = bl*ht
1044 FSTD ftemp1,-16(%sp) ;
1045 XMPYU A0R,A1L,ftemp2 ; m = bh*lt
1046 FSTD ftemp2,-8(%sp) ;
1047 XMPYU A0R,A1R,ftemp3 ; lt = bl*lt
1048 FSTD ftemp3,-32(%sp)
1049 XMPYU A0L,A1L,ftemp4 ; ht = bh*ht
1050 FSTD ftemp4,-24(%sp) ;
1052 LDD -8(%sp),m ; r21 = m
1053 LDD -16(%sp),m1 ; r19 = m1
1056 DEPD,Z m,31,32,temp3 ; (m+m1<<32)
1057 LDD -24(%sp),ht ; r24 = ht
1059 CMPCLR,*>>= m,m1,%r0 ; if (m < m1)
1060 ADD,L ht,high_one,ht ; ht+=high_one
1062 EXTRD,U m,31,32,temp1 ; m >> 32
1063 LDD -32(%sp),lt ; lt
1064 ADD,L ht,temp1,ht ; ht+= m>>32
1065 ADD lt,temp3,lt ; lt = lt+m1
1066 ADD,DC ht,%r0,ht ; ht++
1068 ADD ht,ht,ht ; ht=ht+ht;
1069 ADD,DC C3,%r0,C3 ; add in carry (c3++)
1071 ADD lt,lt,lt ; lt=lt+lt;
1072 ADD,DC ht,%r0,ht ; add in carry (ht++)
1074 ADD C1,lt,C1 ; c1=c1+lt
1075 ADD,DC,*NUV ht,%r0,ht ; add in carry (ht++)
1076 LDO 1(C3),C3 ; bump c3 if overflow,nullify otherwise
1078 ADD C2,ht,C2 ; c2 = c2 + ht
1079 ADD,DC C3,%r0,C3 ; add in carry (c3++)
1083 ;void bn_sqr_comba8(BN_ULONG *r, BN_ULONG *a)
1090 .CALLINFO FRAME=128,ENTRY_GR=%r3,ARGS_SAVED,ORDERING_AWARE
1091 .EXPORT bn_sqr_comba8,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
1095 STD %r3,0(%sp) ; save r3
1096 STD %r4,8(%sp) ; save r4
1097 STD %r5,16(%sp) ; save r5
1098 STD %r6,24(%sp) ; save r6
1107 LDO 128(%sp),%sp ; bump stack
1108 DEPDI,Z -1,32,33,high_mask ; Create Mask 0xffffffff80000000L
1109 DEPDI,Z 1,31,1,high_one ; Create Value 1 << 32
1112 ; Load up all of the values we are going to use
1123 SQR_ADD_C a0L,a0R,c1,c2,c3
1124 STD c1,0(r_ptr) ; r[0] = c1;
1127 SQR_ADD_C2 a1L,a1R,a0L,a0R,c2,c3,c1
1128 STD c2,8(r_ptr) ; r[1] = c2;
1131 SQR_ADD_C a1L,a1R,c3,c1,c2
1132 SQR_ADD_C2 a2L,a2R,a0L,a0R,c3,c1,c2
1133 STD c3,16(r_ptr) ; r[2] = c3;
1136 SQR_ADD_C2 a3L,a3R,a0L,a0R,c1,c2,c3
1137 SQR_ADD_C2 a2L,a2R,a1L,a1R,c1,c2,c3
1138 STD c1,24(r_ptr) ; r[3] = c1;
1141 SQR_ADD_C a2L,a2R,c2,c3,c1
1142 SQR_ADD_C2 a3L,a3R,a1L,a1R,c2,c3,c1
1143 SQR_ADD_C2 a4L,a4R,a0L,a0R,c2,c3,c1
1144 STD c2,32(r_ptr) ; r[4] = c2;
1147 SQR_ADD_C2 a5L,a5R,a0L,a0R,c3,c1,c2
1148 SQR_ADD_C2 a4L,a4R,a1L,a1R,c3,c1,c2
1149 SQR_ADD_C2 a3L,a3R,a2L,a2R,c3,c1,c2
1150 STD c3,40(r_ptr) ; r[5] = c3;
1153 SQR_ADD_C a3L,a3R,c1,c2,c3
1154 SQR_ADD_C2 a4L,a4R,a2L,a2R,c1,c2,c3
1155 SQR_ADD_C2 a5L,a5R,a1L,a1R,c1,c2,c3
1156 SQR_ADD_C2 a6L,a6R,a0L,a0R,c1,c2,c3
1157 STD c1,48(r_ptr) ; r[6] = c1;
1160 SQR_ADD_C2 a7L,a7R,a0L,a0R,c2,c3,c1
1161 SQR_ADD_C2 a6L,a6R,a1L,a1R,c2,c3,c1
1162 SQR_ADD_C2 a5L,a5R,a2L,a2R,c2,c3,c1
1163 SQR_ADD_C2 a4L,a4R,a3L,a3R,c2,c3,c1
1164 STD c2,56(r_ptr) ; r[7] = c2;
1167 SQR_ADD_C a4L,a4R,c3,c1,c2
1168 SQR_ADD_C2 a5L,a5R,a3L,a3R,c3,c1,c2
1169 SQR_ADD_C2 a6L,a6R,a2L,a2R,c3,c1,c2
1170 SQR_ADD_C2 a7L,a7R,a1L,a1R,c3,c1,c2
1171 STD c3,64(r_ptr) ; r[8] = c3;
1174 SQR_ADD_C2 a7L,a7R,a2L,a2R,c1,c2,c3
1175 SQR_ADD_C2 a6L,a6R,a3L,a3R,c1,c2,c3
1176 SQR_ADD_C2 a5L,a5R,a4L,a4R,c1,c2,c3
1177 STD c1,72(r_ptr) ; r[9] = c1;
1180 SQR_ADD_C a5L,a5R,c2,c3,c1
1181 SQR_ADD_C2 a6L,a6R,a4L,a4R,c2,c3,c1
1182 SQR_ADD_C2 a7L,a7R,a3L,a3R,c2,c3,c1
1183 STD c2,80(r_ptr) ; r[10] = c2;
1186 SQR_ADD_C2 a7L,a7R,a4L,a4R,c3,c1,c2
1187 SQR_ADD_C2 a6L,a6R,a5L,a5R,c3,c1,c2
1188 STD c3,88(r_ptr) ; r[11] = c3;
1191 SQR_ADD_C a6L,a6R,c1,c2,c3
1192 SQR_ADD_C2 a7L,a7R,a5L,a5R,c1,c2,c3
1193 STD c1,96(r_ptr) ; r[12] = c1;
1196 SQR_ADD_C2 a7L,a7R,a6L,a6R,c2,c3,c1
1197 STD c2,104(r_ptr) ; r[13] = c2;
1200 SQR_ADD_C a7L,a7R,c3,c1,c2
1201 STD c3, 112(r_ptr) ; r[14] = c3
1202 STD c1, 120(r_ptr) ; r[15] = c1
1205 LDD -104(%sp),%r6 ; restore r6
1206 LDD -112(%sp),%r5 ; restore r5
1207 LDD -120(%sp),%r4 ; restore r4
1209 LDD,MB -128(%sp),%r3
1213 ;-----------------------------------------------------------------------------
1215 ;void bn_sqr_comba4(BN_ULONG *r, BN_ULONG *a)
1222 .callinfo FRAME=128,ENTRY_GR=%r3,ARGS_SAVED,ORDERING_AWARE
1223 .EXPORT bn_sqr_comba4,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
1226 STD %r3,0(%sp) ; save r3
1227 STD %r4,8(%sp) ; save r4
1228 STD %r5,16(%sp) ; save r5
1229 STD %r6,24(%sp) ; save r6
1238 LDO 128(%sp),%sp ; bump stack
1239 DEPDI,Z -1,32,33,high_mask ; Create Mask 0xffffffff80000000L
1240 DEPDI,Z 1,31,1,high_one ; Create Value 1 << 32
1243 ; Load up all of the values we are going to use
1254 SQR_ADD_C a0L,a0R,c1,c2,c3
1256 STD c1,0(r_ptr) ; r[0] = c1;
1259 SQR_ADD_C2 a1L,a1R,a0L,a0R,c2,c3,c1
1261 STD c2,8(r_ptr) ; r[1] = c2;
1264 SQR_ADD_C a1L,a1R,c3,c1,c2
1265 SQR_ADD_C2 a2L,a2R,a0L,a0R,c3,c1,c2
1267 STD c3,16(r_ptr) ; r[2] = c3;
1270 SQR_ADD_C2 a3L,a3R,a0L,a0R,c1,c2,c3
1271 SQR_ADD_C2 a2L,a2R,a1L,a1R,c1,c2,c3
1273 STD c1,24(r_ptr) ; r[3] = c1;
1276 SQR_ADD_C a2L,a2R,c2,c3,c1
1277 SQR_ADD_C2 a3L,a3R,a1L,a1R,c2,c3,c1
1279 STD c2,32(r_ptr) ; r[4] = c2;
1282 SQR_ADD_C2 a3L,a3R,a2L,a2R,c3,c1,c2
1283 STD c3,40(r_ptr) ; r[5] = c3;
1286 SQR_ADD_C a3L,a3R,c1,c2,c3
1287 STD c1,48(r_ptr) ; r[6] = c1;
1288 STD c2,56(r_ptr) ; r[7] = c2;
1291 LDD -104(%sp),%r6 ; restore r6
1292 LDD -112(%sp),%r5 ; restore r5
1293 LDD -120(%sp),%r4 ; restore r4
1295 LDD,MB -128(%sp),%r3
1300 ;---------------------------------------------------------------------------
1302 MUL_ADD_C .macro A0L,A0R,B0L,B0R,C1,C2,C3
1303 XMPYU A0L,B0R,ftemp1 ; m1 = bl*ht
1304 FSTD ftemp1,-16(%sp) ;
1305 XMPYU A0R,B0L,ftemp2 ; m = bh*lt
1306 FSTD ftemp2,-8(%sp) ;
1307 XMPYU A0R,B0R,ftemp3 ; lt = bl*lt
1308 FSTD ftemp3,-32(%sp)
1309 XMPYU A0L,B0L,ftemp4 ; ht = bh*ht
1310 FSTD ftemp4,-24(%sp) ;
1312 LDD -8(%sp),m ; r21 = m
1313 LDD -16(%sp),m1 ; r19 = m1
1316 DEPD,Z m,31,32,temp3 ; (m+m1<<32)
1317 LDD -24(%sp),ht ; r24 = ht
1319 CMPCLR,*>>= m,m1,%r0 ; if (m < m1)
1320 ADD,L ht,high_one,ht ; ht+=high_one
1322 EXTRD,U m,31,32,temp1 ; m >> 32
1323 LDD -32(%sp),lt ; lt
1324 ADD,L ht,temp1,ht ; ht+= m>>32
1325 ADD lt,temp3,lt ; lt = lt+m1
1326 ADD,DC ht,%r0,ht ; ht++
1328 ADD C1,lt,C1 ; c1=c1+lt
1329 ADD,DC ht,%r0,ht ; bump c3 if overflow,nullify otherwise
1331 ADD C2,ht,C2 ; c2 = c2 + ht
1332 ADD,DC C3,%r0,C3 ; add in carry (c3++)
1337 ;void bn_mul_comba8(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
1345 .callinfo FRAME=128,ENTRY_GR=%r3,ARGS_SAVED,ORDERING_AWARE
1346 .EXPORT bn_mul_comba8,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
1350 STD %r3,0(%sp) ; save r3
1351 STD %r4,8(%sp) ; save r4
1352 STD %r5,16(%sp) ; save r5
1353 STD %r6,24(%sp) ; save r6
1354 FSTD %fr12,32(%sp) ; save r6
1355 FSTD %fr13,40(%sp) ; save r7
1364 LDO 128(%sp),%sp ; bump stack
1365 DEPDI,Z 1,31,1,high_one ; Create Value 1 << 32
1368 ; Load up all of the values we are going to use
1388 MUL_ADD_C a0L,a0R,b0L,b0R,c1,c2,c3
1392 MUL_ADD_C a0L,a0R,b1L,b1R,c2,c3,c1
1393 MUL_ADD_C a1L,a1R,b0L,b0R,c2,c3,c1
1397 MUL_ADD_C a2L,a2R,b0L,b0R,c3,c1,c2
1398 MUL_ADD_C a1L,a1R,b1L,b1R,c3,c1,c2
1399 MUL_ADD_C a0L,a0R,b2L,b2R,c3,c1,c2
1403 MUL_ADD_C a0L,a0R,b3L,b3R,c1,c2,c3
1404 MUL_ADD_C a1L,a1R,b2L,b2R,c1,c2,c3
1405 MUL_ADD_C a2L,a2R,b1L,b1R,c1,c2,c3
1406 MUL_ADD_C a3L,a3R,b0L,b0R,c1,c2,c3
1410 MUL_ADD_C a4L,a4R,b0L,b0R,c2,c3,c1
1411 MUL_ADD_C a3L,a3R,b1L,b1R,c2,c3,c1
1412 MUL_ADD_C a2L,a2R,b2L,b2R,c2,c3,c1
1413 MUL_ADD_C a1L,a1R,b3L,b3R,c2,c3,c1
1414 MUL_ADD_C a0L,a0R,b4L,b4R,c2,c3,c1
1418 MUL_ADD_C a0L,a0R,b5L,b5R,c3,c1,c2
1419 MUL_ADD_C a1L,a1R,b4L,b4R,c3,c1,c2
1420 MUL_ADD_C a2L,a2R,b3L,b3R,c3,c1,c2
1421 MUL_ADD_C a3L,a3R,b2L,b2R,c3,c1,c2
1422 MUL_ADD_C a4L,a4R,b1L,b1R,c3,c1,c2
1423 MUL_ADD_C a5L,a5R,b0L,b0R,c3,c1,c2
1427 MUL_ADD_C a6L,a6R,b0L,b0R,c1,c2,c3
1428 MUL_ADD_C a5L,a5R,b1L,b1R,c1,c2,c3
1429 MUL_ADD_C a4L,a4R,b2L,b2R,c1,c2,c3
1430 MUL_ADD_C a3L,a3R,b3L,b3R,c1,c2,c3
1431 MUL_ADD_C a2L,a2R,b4L,b4R,c1,c2,c3
1432 MUL_ADD_C a1L,a1R,b5L,b5R,c1,c2,c3
1433 MUL_ADD_C a0L,a0R,b6L,b6R,c1,c2,c3
1437 MUL_ADD_C a0L,a0R,b7L,b7R,c2,c3,c1
1438 MUL_ADD_C a1L,a1R,b6L,b6R,c2,c3,c1
1439 MUL_ADD_C a2L,a2R,b5L,b5R,c2,c3,c1
1440 MUL_ADD_C a3L,a3R,b4L,b4R,c2,c3,c1
1441 MUL_ADD_C a4L,a4R,b3L,b3R,c2,c3,c1
1442 MUL_ADD_C a5L,a5R,b2L,b2R,c2,c3,c1
1443 MUL_ADD_C a6L,a6R,b1L,b1R,c2,c3,c1
1444 MUL_ADD_C a7L,a7R,b0L,b0R,c2,c3,c1
1448 MUL_ADD_C a7L,a7R,b1L,b1R,c3,c1,c2
1449 MUL_ADD_C a6L,a6R,b2L,b2R,c3,c1,c2
1450 MUL_ADD_C a5L,a5R,b3L,b3R,c3,c1,c2
1451 MUL_ADD_C a4L,a4R,b4L,b4R,c3,c1,c2
1452 MUL_ADD_C a3L,a3R,b5L,b5R,c3,c1,c2
1453 MUL_ADD_C a2L,a2R,b6L,b6R,c3,c1,c2
1454 MUL_ADD_C a1L,a1R,b7L,b7R,c3,c1,c2
1458 MUL_ADD_C a2L,a2R,b7L,b7R,c1,c2,c3
1459 MUL_ADD_C a3L,a3R,b6L,b6R,c1,c2,c3
1460 MUL_ADD_C a4L,a4R,b5L,b5R,c1,c2,c3
1461 MUL_ADD_C a5L,a5R,b4L,b4R,c1,c2,c3
1462 MUL_ADD_C a6L,a6R,b3L,b3R,c1,c2,c3
1463 MUL_ADD_C a7L,a7R,b2L,b2R,c1,c2,c3
1467 MUL_ADD_C a7L,a7R,b3L,b3R,c2,c3,c1
1468 MUL_ADD_C a6L,a6R,b4L,b4R,c2,c3,c1
1469 MUL_ADD_C a5L,a5R,b5L,b5R,c2,c3,c1
1470 MUL_ADD_C a4L,a4R,b6L,b6R,c2,c3,c1
1471 MUL_ADD_C a3L,a3R,b7L,b7R,c2,c3,c1
1475 MUL_ADD_C a4L,a4R,b7L,b7R,c3,c1,c2
1476 MUL_ADD_C a5L,a5R,b6L,b6R,c3,c1,c2
1477 MUL_ADD_C a6L,a6R,b5L,b5R,c3,c1,c2
1478 MUL_ADD_C a7L,a7R,b4L,b4R,c3,c1,c2
1482 MUL_ADD_C a7L,a7R,b5L,b5R,c1,c2,c3
1483 MUL_ADD_C a6L,a6R,b6L,b6R,c1,c2,c3
1484 MUL_ADD_C a5L,a5R,b7L,b7R,c1,c2,c3
1488 MUL_ADD_C a6L,a6R,b7L,b7R,c2,c3,c1
1489 MUL_ADD_C a7L,a7R,b6L,b6R,c2,c3,c1
1493 MUL_ADD_C a7L,a7R,b7L,b7R,c3,c1,c2
1500 LDD -104(%sp),%r6 ; restore r6
1501 LDD -112(%sp),%r5 ; restore r5
1502 LDD -120(%sp),%r4 ; restore r4
1504 LDD,MB -128(%sp),%r3
1508 ;-----------------------------------------------------------------------------
1510 ;void bn_mul_comba4(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
1518 .callinfo FRAME=128,ENTRY_GR=%r3,ARGS_SAVED,ORDERING_AWARE
1519 .EXPORT bn_mul_comba4,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
1523 STD %r3,0(%sp) ; save r3
1524 STD %r4,8(%sp) ; save r4
1525 STD %r5,16(%sp) ; save r5
1526 STD %r6,24(%sp) ; save r6
1527 FSTD %fr12,32(%sp) ; save r6
1528 FSTD %fr13,40(%sp) ; save r7
1537 LDO 128(%sp),%sp ; bump stack
1538 DEPDI,Z 1,31,1,high_one ; Create Value 1 << 32
1541 ; Load up all of the values we are going to use
1553 MUL_ADD_C a0L,a0R,b0L,b0R,c1,c2,c3
1557 MUL_ADD_C a0L,a0R,b1L,b1R,c2,c3,c1
1558 MUL_ADD_C a1L,a1R,b0L,b0R,c2,c3,c1
1562 MUL_ADD_C a2L,a2R,b0L,b0R,c3,c1,c2
1563 MUL_ADD_C a1L,a1R,b1L,b1R,c3,c1,c2
1564 MUL_ADD_C a0L,a0R,b2L,b2R,c3,c1,c2
1568 MUL_ADD_C a0L,a0R,b3L,b3R,c1,c2,c3
1569 MUL_ADD_C a1L,a1R,b2L,b2R,c1,c2,c3
1570 MUL_ADD_C a2L,a2R,b1L,b1R,c1,c2,c3
1571 MUL_ADD_C a3L,a3R,b0L,b0R,c1,c2,c3
1575 MUL_ADD_C a3L,a3R,b1L,b1R,c2,c3,c1
1576 MUL_ADD_C a2L,a2R,b2L,b2R,c2,c3,c1
1577 MUL_ADD_C a1L,a1R,b3L,b3R,c2,c3,c1
1581 MUL_ADD_C a2L,a2R,b3L,b3R,c3,c1,c2
1582 MUL_ADD_C a3L,a3R,b2L,b2R,c3,c1,c2
1586 MUL_ADD_C a3L,a3R,b3L,b3R,c1,c2,c3
1593 LDD -104(%sp),%r6 ; restore r6
1594 LDD -112(%sp),%r5 ; restore r5
1595 LDD -120(%sp),%r4 ; restore r4
1597 LDD,MB -128(%sp),%r3
1604 .SPACE $PRIVATE$,SORT=16
1605 .IMPORT $global$,DATA
1608 .SUBSPA $LIT$,ACCESS=0x2c
1611 .STRINGZ "Division would overflow (%d)\n"