From 9676402c3a6657781a65836c716066d3d39ee54f Mon Sep 17 00:00:00 2001 From: Andy Polyakov Date: Thu, 31 Mar 2016 18:47:17 +0200 Subject: [PATCH] PPC assembly pack: remove branch hints. As it turns out branch hints grew as kind of a misconception. In addition their interpretation by GNU assembler is affected by assembler flags and can end up with opposite meaning on different processors. As we have to loose quite a lot on misinterprerations, especially on newer processors, we just omit them altogether. Reviewed-by: Tim Hudson (cherry picked from commit 20b88bb160865b0c2998056fe841b1cbacd6d4c7) --- crypto/aes/asm/aes-ppc.pl | 4 ++-- crypto/bn/asm/ppc-mont.pl | 10 +++++----- crypto/bn/asm/ppc.pl | 10 +++++----- crypto/bn/asm/ppc64-mont.pl | 12 ++++++------ crypto/sha/asm/sha1-ppc.pl | 6 +++--- crypto/sha/asm/sha512-ppc.pl | 8 ++++---- 6 files changed, 25 insertions(+), 25 deletions(-) diff --git a/crypto/aes/asm/aes-ppc.pl b/crypto/aes/asm/aes-ppc.pl index 7a99fc3d04..5b83016efa 100644 --- a/crypto/aes/asm/aes-ppc.pl +++ b/crypto/aes/asm/aes-ppc.pl @@ -590,7 +590,7 @@ Lenc_loop: xor $s2,$t2,$acc14 xor $s3,$t3,$acc15 addi $key,$key,16 - bdnz- Lenc_loop + bdnz Lenc_loop addi $Tbl2,$Tbl0,2048 nop @@ -1068,7 +1068,7 @@ Ldec_loop: xor $s2,$t2,$acc14 xor $s3,$t3,$acc15 addi $key,$key,16 - bdnz- Ldec_loop + bdnz Ldec_loop addi $Tbl2,$Tbl0,2048 nop diff --git a/crypto/bn/asm/ppc-mont.pl b/crypto/bn/asm/ppc-mont.pl index da69c6aaaf..6930a3aceb 100644 --- a/crypto/bn/asm/ppc-mont.pl +++ b/crypto/bn/asm/ppc-mont.pl @@ -191,7 +191,7 @@ L1st: addi $j,$j,$BNSZ ; j++ addi $tp,$tp,$BNSZ ; tp++ - bdnz- L1st + bdnz L1st ;L1st addc $lo0,$alo,$hi0 addze $hi0,$ahi @@ -253,7 +253,7 @@ Linner: addze $hi1,$hi1 $ST $lo1,0($tp) ; tp[j-1] addi $tp,$tp,$BNSZ ; tp++ - bdnz- Linner + bdnz Linner ;Linner $LD $tj,$BNSZ($tp) ; tp[j] addc $lo0,$alo,$hi0 @@ -276,7 +276,7 @@ Linner: slwi $tj,$num,`log($BNSZ)/log(2)` $UCMP $i,$tj addi $i,$i,$BNSZ - ble- Louter + ble Louter addi $num,$num,2 ; restore $num subfc $j,$j,$j ; j=0 and "clear" XER[CA] @@ -289,7 +289,7 @@ Lsub: $LDX $tj,$tp,$j subfe $aj,$nj,$tj ; tp[j]-np[j] $STX $aj,$rp,$j addi $j,$j,$BNSZ - bdnz- Lsub + bdnz Lsub li $j,0 mtctr $num @@ -304,7 +304,7 @@ Lcopy: ; copy or in-place refresh $STX $tj,$rp,$j $STX $j,$tp,$j ; zap at once addi $j,$j,$BNSZ - bdnz- Lcopy + bdnz Lcopy $POP $tj,0($sp) li r3,1 diff --git a/crypto/bn/asm/ppc.pl b/crypto/bn/asm/ppc.pl index 04df1fe5cc..446d8ba949 100644 --- a/crypto/bn/asm/ppc.pl +++ b/crypto/bn/asm/ppc.pl @@ -1556,7 +1556,7 @@ Lppcasm_sub_mainloop: # if carry = 1 this is r7-r8. Else it # is r7-r8 -1 as we need. $STU r6,$BNSZ(r3) - bdnz- Lppcasm_sub_mainloop + bdnz Lppcasm_sub_mainloop Lppcasm_sub_adios: subfze r3,r0 # if carry bit is set then r3 = 0 else -1 andi. r3,r3,1 # keep only last bit. @@ -1603,7 +1603,7 @@ Lppcasm_add_mainloop: $LDU r8,$BNSZ(r5) adde r8,r7,r8 $STU r8,$BNSZ(r3) - bdnz- Lppcasm_add_mainloop + bdnz Lppcasm_add_mainloop Lppcasm_add_adios: addze r3,r0 #return carry bit. blr @@ -1762,7 +1762,7 @@ Lppcasm_sqr_mainloop: $UMULH r8,r6,r6 $STU r7,$BNSZ(r3) $STU r8,$BNSZ(r3) - bdnz- Lppcasm_sqr_mainloop + bdnz Lppcasm_sqr_mainloop Lppcasm_sqr_adios: blr .long 0 @@ -1827,7 +1827,7 @@ Lppcasm_mw_LOOP: addi r3,r3,`4*$BNSZ` addi r4,r4,`4*$BNSZ` - bdnz- Lppcasm_mw_LOOP + bdnz Lppcasm_mw_LOOP Lppcasm_mw_REM: andi. r5,r5,0x3 @@ -1951,7 +1951,7 @@ Lppcasm_maw_mainloop: $ST r11,`3*$BNSZ`(r3) addi r3,r3,`4*$BNSZ` addi r4,r4,`4*$BNSZ` - bdnz- Lppcasm_maw_mainloop + bdnz Lppcasm_maw_mainloop Lppcasm_maw_leftover: andi. r5,r5,0x3 diff --git a/crypto/bn/asm/ppc64-mont.pl b/crypto/bn/asm/ppc64-mont.pl index 9e3c12d788..595fc6d31f 100644 --- a/crypto/bn/asm/ppc64-mont.pl +++ b/crypto/bn/asm/ppc64-mont.pl @@ -734,7 +734,7 @@ $code.=<<___; ___ } $code.=<<___; - bdnz- L1st + bdnz L1st fctid $dota,$dota fctid $dotb,$dotb @@ -1280,7 +1280,7 @@ $code.=<<___; ___ } $code.=<<___; - bdnz- Linner + bdnz Linner fctid $dota,$dota fctid $dotb,$dotb @@ -1490,7 +1490,7 @@ Lsub: ldx $t0,$tp,$i stdx $t0,$rp,$i stdx $t2,$t6,$i addi $i,$i,16 - bdnz- Lsub + bdnz Lsub li $i,0 subfe $ovf,$i,$ovf ; handle upmost overflow bit @@ -1517,7 +1517,7 @@ Lcopy: ; copy or in-place refresh stdx $i,$tp,$i ; zap tp at once stdx $i,$t4,$i addi $i,$i,16 - bdnz- Lcopy + bdnz Lcopy ___ $code.=<<___ if ($SIZE_T==4); subf $np,$num,$np ; rewind np @@ -1550,7 +1550,7 @@ Lsub: lwz $t0,12($tp) ; load tp[j..j+3] in 64-bit word order stw $t5,8($rp) stw $t6,12($rp) stwu $t7,16($rp) - bdnz- Lsub + bdnz Lsub li $i,0 subfe $ovf,$i,$ovf ; handle upmost overflow bit @@ -1582,7 +1582,7 @@ Lcopy: ; copy or in-place refresh stwu $t3,16($rp) std $i,8($tp) ; zap tp at once stdu $i,16($tp) - bdnz- Lcopy + bdnz Lcopy ___ $code.=<<___; diff --git a/crypto/sha/asm/sha1-ppc.pl b/crypto/sha/asm/sha1-ppc.pl index df5989610c..ab655021cc 100755 --- a/crypto/sha/asm/sha1-ppc.pl +++ b/crypto/sha/asm/sha1-ppc.pl @@ -227,7 +227,7 @@ Lunaligned: srwi. $t1,$t1,6 ; t1/=64 beq Lcross_page $UCMP $num,$t1 - ble- Laligned ; didn't cross the page boundary + ble Laligned ; didn't cross the page boundary mtctr $t1 subfc $num,$t1,$num bl Lsha1_block_private @@ -255,7 +255,7 @@ Lmemcpy: bl Lsha1_block_private $POP $inp,`$FRAME-$SIZE_T*18`($sp) addic. $num,$num,-1 - bne- Lunaligned + bne Lunaligned Ldone: $POP r0,`$FRAME+$LRSAVE`($sp) @@ -329,7 +329,7 @@ $code.=<<___; stw r20,16($ctx) mr $E,r20 addi $inp,$inp,`16*4` - bdnz- Lsha1_block_private + bdnz Lsha1_block_private blr .long 0 .byte 0,12,0x14,0,0,0,0,0 diff --git a/crypto/sha/asm/sha512-ppc.pl b/crypto/sha/asm/sha512-ppc.pl index 734f3c1ca0..17fdc6e8e5 100755 --- a/crypto/sha/asm/sha512-ppc.pl +++ b/crypto/sha/asm/sha512-ppc.pl @@ -259,7 +259,7 @@ Lunaligned: andi. $t1,$t1,`4096-16*$SZ` ; distance to closest page boundary beq Lcross_page $UCMP $num,$t1 - ble- Laligned ; didn't cross the page boundary + ble Laligned ; didn't cross the page boundary subfc $num,$t1,$num add $t1,$inp,$t1 $PUSH $num,`$FRAME-$SIZE_T*25`($sp) ; save real remaining num @@ -317,7 +317,7 @@ $code.=<<___; $POP $inp,`$FRAME-$SIZE_T*26`($sp) ; restore real inp $POP $num,`$FRAME-$SIZE_T*25`($sp) ; restore real num addic. $num,$num,`-16*$SZ` ; num-- - bne- Lunaligned + bne Lunaligned Ldone: $POP r0,`$FRAME+$LRSAVE`($sp) @@ -396,7 +396,7 @@ for(;$i<32;$i++) { unshift(@V,pop(@V)); } $code.=<<___; - bdnz- Lrounds + bdnz Lrounds $POP $ctx,`$FRAME-$SIZE_T*22`($sp) $POP $inp,`$FRAME-$SIZE_T*23`($sp) ; inp pointer @@ -644,7 +644,7 @@ for(;$i<32;$i++) { ($a0,$a1,$a2,$a3) = ($a2,$a3,$a0,$a1); } $code.=<<___; - bdnz- Lrounds + bdnz Lrounds $POP $ctx,`$FRAME-$SIZE_T*22`($sp) $POP $inp,`$FRAME-$SIZE_T*23`($sp) ; inp pointer -- 2.25.1