From ef428d568137917d4d1a3bffa9818088efe8f3ef Mon Sep 17 00:00:00 2001 From: Andy Polyakov Date: Mon, 18 Jul 2005 09:54:14 +0000 Subject: [PATCH] Fix unwind directives in IA-64 assembler modules. This helps symbolic debugging and doesn't affect functionality. Submitted by: David Mosberger Obtained from: http://www.hpl.hp.com/research/linux/crypto/ --- crypto/aes/asm/aes-ia64.S | 6 ++---- crypto/bn/asm/ia64.S | 25 ++++++++----------------- crypto/rc4/asm/rc4-ia64.S | 5 ++--- crypto/sha/asm/sha1-ia64.pl | 8 ++------ crypto/sha/asm/sha512-ia64.pl | 5 ++--- 5 files changed, 16 insertions(+), 33 deletions(-) diff --git a/crypto/aes/asm/aes-ia64.S b/crypto/aes/asm/aes-ia64.S index 542cf335e9..3377838000 100644 --- a/crypto/aes/asm/aes-ia64.S +++ b/crypto/aes/asm/aes-ia64.S @@ -179,11 +179,10 @@ _ia64_AES_encrypt: .skip 16 AES_encrypt: .prologue - .fframe 0 .save ar.pfs,r2 - .save ar.lc,r3 { .mmi; alloc r2=ar.pfs,3,0,12,0 addl out8=@ltoff(AES_Te#),gp + .save ar.lc,r3 mov r3=ar.lc } { .mmi; and out0=3,in0 ADDP in0=0,in0 @@ -472,11 +471,10 @@ _ia64_AES_decrypt: .skip 16 AES_decrypt: .prologue - .fframe 0 .save ar.pfs,r2 - .save ar.lc,r3 { .mmi; alloc r2=ar.pfs,3,0,12,0 addl out8=@ltoff(AES_Td#),gp + .save ar.lc,r3 mov r3=ar.lc } { .mmi; and out0=3,in0 ADDP in0=0,in0 diff --git a/crypto/bn/asm/ia64.S b/crypto/bn/asm/ia64.S index 7b82b820e6..0d9c0d3e98 100644 --- a/crypto/bn/asm/ia64.S +++ b/crypto/bn/asm/ia64.S @@ -171,15 +171,14 @@ .skip 32 // makes the loop body aligned at 64-byte boundary bn_add_words: .prologue - .fframe 0 .save ar.pfs,r2 { .mii; alloc r2=ar.pfs,4,12,0,16 cmp4.le p6,p0=r35,r0 };; { .mfb; mov r8=r0 // return value (p6) br.ret.spnt.many b0 };; - .save ar.lc,r3 { .mib; sub r10=r35,r0,1 + .save ar.lc,r3 mov r3=ar.lc brp.loop.imp .L_bn_add_words_ctop,.L_bn_add_words_cend-16 } @@ -224,15 +223,14 @@ bn_add_words: .skip 32 // makes the loop body aligned at 64-byte boundary bn_sub_words: .prologue - .fframe 0 .save ar.pfs,r2 { .mii; alloc r2=ar.pfs,4,12,0,16 cmp4.le p6,p0=r35,r0 };; { .mfb; mov r8=r0 // return value (p6) br.ret.spnt.many b0 };; - .save ar.lc,r3 { .mib; sub r10=r35,r0,1 + .save ar.lc,r3 mov r3=ar.lc brp.loop.imp .L_bn_sub_words_ctop,.L_bn_sub_words_cend-16 } @@ -283,7 +281,6 @@ bn_sub_words: .skip 32 // makes the loop body aligned at 64-byte boundary bn_mul_words: .prologue - .fframe 0 .save ar.pfs,r2 #ifdef XMA_TEMPTATION { .mfi; alloc r2=ar.pfs,4,0,0,0 };; @@ -294,8 +291,8 @@ bn_mul_words: cmp4.le p6,p0=r34,r0 (p6) br.ret.spnt.many b0 };; - .save ar.lc,r3 { .mii; sub r10=r34,r0,1 + .save ar.lc,r3 mov r3=ar.lc mov r9=pr };; @@ -397,12 +394,10 @@ bn_mul_words: .skip 48 // makes the loop body aligned at 64-byte boundary bn_mul_add_words: .prologue - .fframe 0 .save ar.pfs,r2 - .save ar.lc,r3 - .save pr,r9 { .mmi; alloc r2=ar.pfs,4,4,0,8 cmp4.le p6,p0=r34,r0 + .save ar.lc,r3 mov r3=ar.lc };; { .mib; mov r8=r0 // return value sub r10=r34,r0,1 @@ -410,6 +405,7 @@ bn_mul_add_words: .body { .mib; setf.sig f8=r35 // w + .save pr,r9 mov r9=pr brp.loop.imp .L_bn_mul_add_words_ctop,.L_bn_mul_add_words_cend-16 } @@ -466,7 +462,6 @@ bn_mul_add_words: .skip 32 // makes the loop body aligned at 64-byte boundary bn_sqr_words: .prologue - .fframe 0 .save ar.pfs,r2 { .mii; alloc r2=ar.pfs,3,0,0,0 sxt4 r34=r34 };; @@ -476,9 +471,10 @@ bn_sqr_words: nop.f 0x0 (p6) br.ret.spnt.many b0 };; - .save ar.lc,r3 { .mii; sub r10=r34,r0,1 + .save ar.lc,r3 mov r3=ar.lc + .save pr,r9 mov r9=pr };; .body @@ -545,7 +541,6 @@ bn_sqr_words: .align 64 bn_sqr_comba8: .prologue - .fframe 0 .save ar.pfs,r2 #if defined(_HPUX_SOURCE) && !defined(_LP64) { .mii; alloc r2=ar.pfs,2,1,0,0 @@ -617,7 +612,6 @@ bn_sqr_comba8: .align 64 bn_mul_comba8: .prologue - .fframe 0 .save ar.pfs,r2 #if defined(_HPUX_SOURCE) && !defined(_LP64) { .mii; alloc r2=ar.pfs,3,0,0,0 @@ -1175,7 +1169,6 @@ bn_mul_comba8: .align 64 bn_sqr_comba4: .prologue - .fframe 0 .save ar.pfs,r2 #if defined(_HPUX_SOURCE) && !defined(_LP64) { .mii; alloc r2=ar.pfs,2,1,0,0 @@ -1208,7 +1201,6 @@ bn_sqr_comba4: .align 64 bn_mul_comba4: .prologue - .fframe 0 .save ar.pfs,r2 #if defined(_HPUX_SOURCE) && !defined(_LP64) { .mii; alloc r2=ar.pfs,3,0,0,0 @@ -1411,10 +1403,9 @@ equ=p24 .align 64 bn_div_words: .prologue - .fframe 0 .save ar.pfs,r2 - .save b0,r3 { .mii; alloc r2=ar.pfs,3,5,0,8 + .save b0,r3 mov r3=b0 mov r10=pr };; { .mmb; cmp.eq p6,p0=r34,r0 diff --git a/crypto/rc4/asm/rc4-ia64.S b/crypto/rc4/asm/rc4-ia64.S index a322d0c718..8210c47d04 100644 --- a/crypto/rc4/asm/rc4-ia64.S +++ b/crypto/rc4/asm/rc4-ia64.S @@ -75,14 +75,13 @@ yy=r31; .skip 16 RC4: .prologue - .fframe 0 .save ar.pfs,r2 - .save ar.lc,r3 - .save pr,prsave { .mii; alloc r2=ar.pfs,4,12,0,16 + .save pr,prsave mov prsave=pr ADDP key=0,in0 };; { .mib; cmp.eq p6,p0=0,in1 // len==0? + .save ar.lc,r3 mov r3=ar.lc (p6) br.ret.spnt.many b0 };; // emergency exit diff --git a/crypto/sha/asm/sha1-ia64.pl b/crypto/sha/asm/sha1-ia64.pl index cb9dfad124..9478f5dd5d 100644 --- a/crypto/sha/asm/sha1-ia64.pl +++ b/crypto/sha/asm/sha1-ia64.pl @@ -251,11 +251,9 @@ inp=r33; // in1 .align 32 sha1_block_asm_host_order: .prologue - .fframe 0 - .save ar.pfs,r0 - .save ar.lc,r3 { .mmi; alloc tmp1=ar.pfs,3,15,0,0 $ADDP tmp0=4,ctx + .save ar.lc,r3 mov r3=ar.lc } { .mmi; $ADDP ctx=0,ctx $ADDP inp=0,inp @@ -406,11 +404,9 @@ $code.=<<___ if ($big_endian); ___ $code.=<<___; .prologue - .fframe 0 - .save ar.pfs,r0 - .save ar.lc,r3 { .mmi; alloc tmp1=ar.pfs,3,15,0,0 $ADDP tmp0=4,ctx + .save ar.lc,r3 mov r3=ar.lc } { .mmi; $ADDP ctx=0,ctx $ADDP inp=0,inp diff --git a/crypto/sha/asm/sha512-ia64.pl b/crypto/sha/asm/sha512-ia64.pl index 0aea02399a..9de9174992 100755 --- a/crypto/sha/asm/sha512-ia64.pl +++ b/crypto/sha/asm/sha512-ia64.pl @@ -128,15 +128,14 @@ sgm0=r50; sgm1=r51; // small constants .align 32 $func: .prologue - .fframe 0 .save ar.pfs,r2 - .save ar.lc,r3 - .save pr,prsave { .mmi; alloc r2=ar.pfs,3,17,0,16 $ADDP ctx=0,r32 // 1st arg + .save ar.lc,r3 mov r3=ar.lc } { .mmi; $ADDP input=0,r33 // 2nd arg addl Ktbl=\@ltoff($TABLE#),gp + .save pr,prsave mov prsave=pr };; .body -- 2.25.1