X-Git-Url: https://git.librecmc.org/?a=blobdiff_plain;f=crypto%2Faes%2Fasm%2Faes-s390x.pl;h=1495917d261cc28d6b1b095b9fed2e331ca57b13;hb=bc4e831ccd81a1d22a7462df645c884ce33ea7c0;hp=54a9fe5c276e32650ca0be6bf29dd2eda0adaa89;hpb=7ef643360d6ac4f9637f477ce41be4b668ab1eb8;p=oweals%2Fopenssl.git diff --git a/crypto/aes/asm/aes-s390x.pl b/crypto/aes/asm/aes-s390x.pl index 54a9fe5c27..1495917d26 100644 --- a/crypto/aes/asm/aes-s390x.pl +++ b/crypto/aes/asm/aes-s390x.pl @@ -1,7 +1,14 @@ -#!/usr/bin/env perl +#! /usr/bin/env perl +# Copyright 2007-2016 The OpenSSL Project Authors. All Rights Reserved. +# +# Licensed under the OpenSSL license (the "License"). You may not use +# this file except in compliance with the License. You can obtain a copy +# in the file LICENSE in the source distribution or at +# https://www.openssl.org/source/license.html + # ==================================================================== -# Written by Andy Polyakov for the OpenSSL +# Written by Andy Polyakov for the OpenSSL # project. The module is, however, dual licensed under OpenSSL and # CRYPTOGAMS licenses depending on where you obtain it. For further # details see http://www.openssl.org/~appro/cryptogams/. @@ -31,10 +38,76 @@ # Implement AES_set_[en|de]crypt_key. Key schedule setup is avoided # for 128-bit keys, if hardware support is detected. -$t1="%r0"; -$t2="%r1"; -$t3="%r2"; $inp="%r2"; -$out="%r3"; $mask="%r3"; $bits="%r3"; +# Januray 2009. +# +# Add support for hardware AES192/256 and reschedule instructions to +# minimize/avoid Address Generation Interlock hazard and to favour +# dual-issue z10 pipeline. This gave ~25% improvement on z10 and +# almost 50% on z9. The gain is smaller on z10, because being dual- +# issue z10 makes it improssible to eliminate the interlock condition: +# critial path is not long enough. Yet it spends ~24 cycles per byte +# processed with 128-bit key. +# +# Unlike previous version hardware support detection takes place only +# at the moment of key schedule setup, which is denoted in key->rounds. +# This is done, because deferred key setup can't be made MT-safe, not +# for keys longer than 128 bits. +# +# Add AES_cbc_encrypt, which gives incredible performance improvement, +# it was measured to be ~6.6x. It's less than previously mentioned 8x, +# because software implementation was optimized. + +# May 2010. +# +# Add AES_ctr32_encrypt. If hardware-assisted, it provides up to 4.3x +# performance improvement over "generic" counter mode routine relying +# on single-block, also hardware-assisted, AES_encrypt. "Up to" refers +# to the fact that exact throughput value depends on current stack +# frame alignment within 4KB page. In worst case you get ~75% of the +# maximum, but *on average* it would be as much as ~98%. Meaning that +# worst case is unlike, it's like hitting ravine on plateau. + +# November 2010. +# +# Adapt for -m31 build. If kernel supports what's called "highgprs" +# feature on Linux [see /proc/cpuinfo], it's possible to use 64-bit +# instructions and achieve "64-bit" performance even in 31-bit legacy +# application context. The feature is not specific to any particular +# processor, as long as it's "z-CPU". Latter implies that the code +# remains z/Architecture specific. On z990 it was measured to perform +# 2x better than code generated by gcc 4.3. + +# December 2010. +# +# Add support for z196 "cipher message with counter" instruction. +# Note however that it's disengaged, because it was measured to +# perform ~12% worse than vanilla km-based code... + +# February 2011. +# +# Add AES_xts_[en|de]crypt. This includes support for z196 km-xts-aes +# instructions, which deliver ~70% improvement at 8KB block size over +# vanilla km-based code, 37% - at most like 512-bytes block size. + +$flavour = shift; + +if ($flavour =~ /3[12]/) { + $SIZE_T=4; + $g=""; +} else { + $SIZE_T=8; + $g="g"; +} + +while (($output=shift) && ($output!~/\w[\w\-]*\.\w+$/)) {} +open STDOUT,">$output"; + +$softonly=0; # allow hardware support + +$t0="%r0"; $mask="%r0"; +$t1="%r1"; +$t2="%r2"; $inp="%r2"; +$t3="%r3"; $out="%r3"; $bits="%r3"; $key="%r4"; $i1="%r5"; $i2="%r6"; @@ -48,16 +121,20 @@ $rounds="%r13"; $ra="%r14"; $sp="%r15"; +$stdframe=16*$SIZE_T+4*8; + sub _data_word() { my $i; while(defined($i=shift)) { $code.=sprintf".long\t0x%08x,0x%08x\n",$i,$i; } } $code=<<___; +#include "s390x_arch.h" + .text .type AES_Te,\@object -.align 64 +.align 256 AES_Te: ___ &_data_word( @@ -163,6 +240,7 @@ $code.=<<___; .long 0x01000000, 0x02000000, 0x04000000, 0x08000000 .long 0x10000000, 0x20000000, 0x40000000, 0x80000000 .long 0x1B000000, 0x36000000, 0, 0, 0, 0, 0, 0 +.align 256 .size AES_Te,.-AES_Te # void AES_encrypt(const unsigned char *inp, unsigned char *out, @@ -170,81 +248,66 @@ $code.=<<___; .globl AES_encrypt .type AES_encrypt,\@function AES_encrypt: - stg $ra,112($sp) - lghi %r0,10 - c %r0,240($key) - jne .Lesoft - lghi %r0,0 # query capability vector - la %r1,16($sp) - .long 0xb92e0042 # km %r4,%r2 - lg %r0,16($sp) - tmhl %r0,`0x8000>>2` - jz .Lesoft128 - lghi %r0,`0x00|0x12` # encrypt AES-128 +___ +$code.=<<___ if (!$softonly); + l %r0,240($key) + lhi %r1,16 + clr %r0,%r1 + jl .Lesoft + la %r1,0($key) #la %r2,0($inp) la %r4,0($out) lghi %r3,16 # single block length .long 0xb92e0042 # km %r4,%r2 - bcr 8,%r14 # return if done - la $out,0(%r4) # restore arguments - la $key,0(%r1) -.Lesoft128: - lghi %r0,0 - c %r0,236($key) - je .Lesoft - stmg $inp,$key,16($sp) - la $inp,0($key) - lghi $bits,128 - bras $ra,.Lekey_internal # postponed key schedule setup - lmg $inp,$key,16($sp) + brc 1,.-4 # can this happen? + br %r14 +.align 64 .Lesoft: - stmg %r3,%r13,24($sp) - - bras $tbl,1f -1: aghi $tbl,AES_Te-. +___ +$code.=<<___; + stm${g} %r3,$ra,3*$SIZE_T($sp) llgf $s0,0($inp) llgf $s1,4($inp) llgf $s2,8($inp) llgf $s3,12($inp) - llill $mask,`0xff<<3` + larl $tbl,AES_Te bras $ra,_s390x_AES_encrypt - lg $out,24($sp) + l${g} $out,3*$SIZE_T($sp) st $s0,0($out) st $s1,4($out) st $s2,8($out) st $s3,12($out) - lmg %r6,$ra,48($sp) + lm${g} %r6,$ra,6*$SIZE_T($sp) br $ra .size AES_encrypt,.-AES_encrypt .type _s390x_AES_encrypt,\@function .align 16 _s390x_AES_encrypt: + st${g} $ra,15*$SIZE_T($sp) x $s0,0($key) x $s1,4($key) x $s2,8($key) x $s3,12($key) l $rounds,240($key) + llill $mask,`0xff<<3` aghi $rounds,-1 - + j .Lenc_loop +.align 16 .Lenc_loop: - sllg $i1,$s0,`0+3` - srlg $i2,$s0,`8-3` - srlg $i3,$s0,`16-3` + sllg $t1,$s0,`0+3` + srlg $t2,$s0,`8-3` + srlg $t3,$s0,`16-3` srl $s0,`24-3` nr $s0,$mask - ngr $i1,$mask - nr $i2,$mask - nr $i3,$mask - l $s0,0($s0,$tbl) # Te0[s0>>24] - l $t1,1($i1,$tbl) # Te3[s0>>0] - l $t2,2($i2,$tbl) # Te2[s0>>8] - l $t3,3($i3,$tbl) # Te1[s0>>16] + ngr $t1,$mask + nr $t2,$mask + nr $t3,$mask srlg $i1,$s1,`16-3` # i0 sllg $i2,$s1,`0+3` @@ -254,72 +317,84 @@ _s390x_AES_encrypt: nr $s1,$mask ngr $i2,$mask nr $i3,$mask + + l $s0,0($s0,$tbl) # Te0[s0>>24] + l $t1,1($t1,$tbl) # Te3[s0>>0] + l $t2,2($t2,$tbl) # Te2[s0>>8] + l $t3,3($t3,$tbl) # Te1[s0>>16] + x $s0,3($i1,$tbl) # Te1[s1>>16] l $s1,0($s1,$tbl) # Te0[s1>>24] x $t2,1($i2,$tbl) # Te3[s1>>0] x $t3,2($i3,$tbl) # Te2[s1>>8] - xr $s1,$t1 srlg $i1,$s2,`8-3` # i0 srlg $i2,$s2,`16-3` # i1 - sllg $i3,$s2,`0+3` - srl $s2,`24-3` nr $i1,$mask nr $i2,$mask + sllg $i3,$s2,`0+3` + srl $s2,`24-3` nr $s2,$mask ngr $i3,$mask + + xr $s1,$t1 + srlg $ra,$s3,`8-3` # i1 + sllg $t1,$s3,`0+3` # i0 + nr $ra,$mask + la $key,16($key) + ngr $t1,$mask + x $s0,2($i1,$tbl) # Te2[s2>>8] x $s1,3($i2,$tbl) # Te1[s2>>16] l $s2,0($s2,$tbl) # Te0[s2>>24] x $t3,1($i3,$tbl) # Te3[s2>>0] - xr $s2,$t2 - sllg $i1,$s3,`0+3` # i0 - srlg $i2,$s3,`8-3` # i1 srlg $i3,$s3,`16-3` # i2 + xr $s2,$t2 srl $s3,`24-3` - ngr $i1,$mask - nr $i2,$mask nr $i3,$mask nr $s3,$mask - x $s0,1($i1,$tbl) # Te3[s3>>0] - x $s1,2($i2,$tbl) # Te2[s3>>8] - x $s2,3($i3,$tbl) # Te1[s3>>16] - l $s3,0($s3,$tbl) # Te0[s3>>24] - xr $s3,$t3 - la $key,16($key) x $s0,0($key) x $s1,4($key) x $s2,8($key) - x $s3,12($key) + x $t3,12($key) + + x $s0,1($t1,$tbl) # Te3[s3>>0] + x $s1,2($ra,$tbl) # Te2[s3>>8] + x $s2,3($i3,$tbl) # Te1[s3>>16] + l $s3,0($s3,$tbl) # Te0[s3>>24] + xr $s3,$t3 brct $rounds,.Lenc_loop + .align 16 - sllg $i1,$s0,`0+3` - srlg $i2,$s0,`8-3` - srlg $i3,$s0,`16-3` + sllg $t1,$s0,`0+3` + srlg $t2,$s0,`8-3` + ngr $t1,$mask + srlg $t3,$s0,`16-3` srl $s0,`24-3` nr $s0,$mask - ngr $i1,$mask - nr $i2,$mask - nr $i3,$mask - llgc $s0,2($s0,$tbl) # Te4[s0>>24] - llgc $t1,2($i1,$tbl) # Te4[s0>>0] - llgc $t2,2($i2,$tbl) # Te4[s0>>8] - llgc $t3,2($i3,$tbl) # Te4[s0>>16] - sll $s0,24 - sll $t2,8 - sll $t3,16 + nr $t2,$mask + nr $t3,$mask srlg $i1,$s1,`16-3` # i0 sllg $i2,$s1,`0+3` + ngr $i2,$mask srlg $i3,$s1,`8-3` srl $s1,`24-3` nr $i1,$mask nr $s1,$mask - ngr $i2,$mask nr $i3,$mask + + llgc $s0,2($s0,$tbl) # Te4[s0>>24] + llgc $t1,2($t1,$tbl) # Te4[s0>>0] + sll $s0,24 + llgc $t2,2($t2,$tbl) # Te4[s0>>8] + llgc $t3,2($t3,$tbl) # Te4[s0>>16] + sll $t2,8 + sll $t3,16 + llgc $i1,2($i1,$tbl) # Te4[s1>>16] llgc $s1,2($s1,$tbl) # Te4[s1>>24] llgc $i2,2($i2,$tbl) # Te4[s1>>0] @@ -331,37 +406,43 @@ _s390x_AES_encrypt: or $s1,$t1 or $t2,$i2 or $t3,$i3 - + srlg $i1,$s2,`8-3` # i0 srlg $i2,$s2,`16-3` # i1 - sllg $i3,$s2,`0+3` - srl $s2,`24-3` nr $i1,$mask nr $i2,$mask - nr $s2,$mask + sllg $i3,$s2,`0+3` + srl $s2,`24-3` ngr $i3,$mask + nr $s2,$mask + + sllg $t1,$s3,`0+3` # i0 + srlg $ra,$s3,`8-3` # i1 + ngr $t1,$mask + llgc $i1,2($i1,$tbl) # Te4[s2>>8] llgc $i2,2($i2,$tbl) # Te4[s2>>16] + sll $i1,8 llgc $s2,2($s2,$tbl) # Te4[s2>>24] llgc $i3,2($i3,$tbl) # Te4[s2>>0] - sll $i1,8 sll $i2,16 + nr $ra,$mask sll $s2,24 or $s0,$i1 or $s1,$i2 or $s2,$t2 or $t3,$i3 - sllg $i1,$s3,`0+3` # i0 - srlg $i2,$s3,`8-3` # i1 srlg $i3,$s3,`16-3` # i2 srl $s3,`24-3` - ngr $i1,$mask - nr $i2,$mask nr $i3,$mask nr $s3,$mask - llgc $i1,2($i1,$tbl) # Te4[s3>>0] - llgc $i2,2($i2,$tbl) # Te4[s3>>8] + + l $t0,16($key) + l $t2,20($key) + + llgc $i1,2($t1,$tbl) # Te4[s3>>0] + llgc $i2,2($ra,$tbl) # Te4[s3>>8] llgc $i3,2($i3,$tbl) # Te4[s3>>16] llgc $s3,2($s3,$tbl) # Te4[s3>>24] sll $i2,8 @@ -372,18 +453,19 @@ _s390x_AES_encrypt: or $s2,$i3 or $s3,$t3 - x $s0,16($key) - x $s1,20($key) + l${g} $ra,15*$SIZE_T($sp) + xr $s0,$t0 + xr $s1,$t2 x $s2,24($key) x $s3,28($key) - br $ra + br $ra .size _s390x_AES_encrypt,.-_s390x_AES_encrypt ___ $code.=<<___; .type AES_Td,\@object -.align 64 +.align 256 AES_Td: ___ &_data_word( @@ -492,82 +574,66 @@ $code.=<<___; .globl AES_decrypt .type AES_decrypt,\@function AES_decrypt: - stg $ra,112($sp) - lghi %r0,10 - c %r0,240($key) - jne .Ldsoft - lghi %r0,0 # query capability vector - la %r1,16($sp) - .long 0xb92e0042 # km %r4,%r2 - lg %r0,16($sp) - tmhl %r0,`0x8000>>2` - jz .Ldsoft128 - lghi %r0,`0x80|0x12` # decrypt AES-128 - la %r1,160($key) +___ +$code.=<<___ if (!$softonly); + l %r0,240($key) + lhi %r1,16 + clr %r0,%r1 + jl .Ldsoft + + la %r1,0($key) #la %r2,0($inp) la %r4,0($out) lghi %r3,16 # single block length .long 0xb92e0042 # km %r4,%r2 - bcr 8,%r14 # return if done - la $out,0(%r4) # restore arguments - lghi $key,-160 - la $key,0($key,%r1) -.Ldsoft128: - lghi %r0,0 - c %r0,236($key) - je .Ldsoft - stmg $inp,$key,16($sp) - la $inp,160($key) - lghi $bits,128 - bras $ra,.Ldkey_internal # postponed key schedule setup - lmg $inp,$key,16($sp) + brc 1,.-4 # can this happen? + br %r14 +.align 64 .Ldsoft: - stmg %r3,%r13,24($sp) - - bras $tbl,1f -1: aghi $tbl,AES_Td-. +___ +$code.=<<___; + stm${g} %r3,$ra,3*$SIZE_T($sp) llgf $s0,0($inp) llgf $s1,4($inp) llgf $s2,8($inp) llgf $s3,12($inp) - llill $mask,`0xff<<3` + larl $tbl,AES_Td bras $ra,_s390x_AES_decrypt - lg $out,24($sp) + l${g} $out,3*$SIZE_T($sp) st $s0,0($out) st $s1,4($out) st $s2,8($out) st $s3,12($out) - lmg %r6,$ra,48($sp) + lm${g} %r6,$ra,6*$SIZE_T($sp) br $ra .size AES_decrypt,.-AES_decrypt .type _s390x_AES_decrypt,\@function .align 16 _s390x_AES_decrypt: + st${g} $ra,15*$SIZE_T($sp) x $s0,0($key) x $s1,4($key) x $s2,8($key) x $s3,12($key) l $rounds,240($key) + llill $mask,`0xff<<3` aghi $rounds,-1 - + j .Ldec_loop +.align 16 .Ldec_loop: - srlg $i1,$s0,`16-3` - srlg $i2,$s0,`8-3` - sllg $i3,$s0,`0+3` + srlg $t1,$s0,`16-3` + srlg $t2,$s0,`8-3` + sllg $t3,$s0,`0+3` srl $s0,`24-3` nr $s0,$mask - nr $i1,$mask - nr $i2,$mask - ngr $i3,$mask - l $s0,0($s0,$tbl) # Td0[s0>>24] - l $t1,3($i1,$tbl) # Td1[s0>>16] - l $t2,2($i2,$tbl) # Td2[s0>>8] - l $t3,1($i3,$tbl) # Td3[s0>>0] + nr $t1,$mask + nr $t2,$mask + ngr $t3,$mask sllg $i1,$s1,`0+3` # i0 srlg $i2,$s1,`16-3` @@ -577,11 +643,16 @@ _s390x_AES_decrypt: nr $s1,$mask nr $i2,$mask nr $i3,$mask + + l $s0,0($s0,$tbl) # Td0[s0>>24] + l $t1,3($t1,$tbl) # Td1[s0>>16] + l $t2,2($t2,$tbl) # Td2[s0>>8] + l $t3,1($t3,$tbl) # Td3[s0>>0] + x $s0,1($i1,$tbl) # Td3[s1>>0] l $s1,0($s1,$tbl) # Td0[s1>>24] x $t2,3($i2,$tbl) # Td1[s1>>16] x $t3,2($i3,$tbl) # Td2[s1>>8] - xr $s1,$t1 srlg $i1,$s2,`8-3` # i0 sllg $i2,$s2,`0+3` # i1 @@ -591,69 +662,72 @@ _s390x_AES_decrypt: ngr $i2,$mask nr $s2,$mask nr $i3,$mask + + xr $s1,$t1 + srlg $ra,$s3,`8-3` # i1 + srlg $t1,$s3,`16-3` # i0 + nr $ra,$mask + la $key,16($key) + nr $t1,$mask + x $s0,2($i1,$tbl) # Td2[s2>>8] x $s1,1($i2,$tbl) # Td3[s2>>0] l $s2,0($s2,$tbl) # Td0[s2>>24] x $t3,3($i3,$tbl) # Td1[s2>>16] - xr $s2,$t2 - srlg $i1,$s3,`16-3` # i0 - srlg $i2,$s3,`8-3` # i1 sllg $i3,$s3,`0+3` # i2 srl $s3,`24-3` - nr $i1,$mask - nr $i2,$mask ngr $i3,$mask nr $s3,$mask - x $s0,3($i1,$tbl) # Td1[s3>>16] - x $s1,2($i2,$tbl) # Td2[s3>>8] - x $s2,1($i3,$tbl) # Td3[s3>>0] - l $s3,0($s3,$tbl) # Td0[s3>>24] - xr $s3,$t3 - la $key,16($key) + xr $s2,$t2 x $s0,0($key) x $s1,4($key) x $s2,8($key) - x $s3,12($key) + x $t3,12($key) + + x $s0,3($t1,$tbl) # Td1[s3>>16] + x $s1,2($ra,$tbl) # Td2[s3>>8] + x $s2,1($i3,$tbl) # Td3[s3>>0] + l $s3,0($s3,$tbl) # Td0[s3>>24] + xr $s3,$t3 brct $rounds,.Ldec_loop + .align 16 l $t1,`2048+0`($tbl) # prefetch Td4 - l $t2,`2048+32`($tbl) - l $t3,`2048+64`($tbl) - l $i1,`2048+96`($tbl) - l $i2,`2048+128`($tbl) - l $i3,`2048+160`($tbl) - l $t1,`2048+192`($tbl) - l $t2,`2048+224`($tbl) + l $t2,`2048+64`($tbl) + l $t3,`2048+128`($tbl) + l $i1,`2048+192`($tbl) llill $mask,0xff srlg $i3,$s0,24 # i0 - srlg $i1,$s0,16 - srlg $i2,$s0,8 + srlg $t1,$s0,16 + srlg $t2,$s0,8 nr $s0,$mask # i3 - nr $i1,$mask + nr $t1,$mask + + srlg $i1,$s1,24 + nr $t2,$mask + srlg $i2,$s1,16 + srlg $ra,$s1,8 + nr $s1,$mask # i0 nr $i2,$mask + nr $ra,$mask + llgc $i3,2048($i3,$tbl) # Td4[s0>>24] - llgc $t1,2048($i1,$tbl) # Td4[s0>>16] - llgc $t2,2048($i2,$tbl) # Td4[s0>>8] + llgc $t1,2048($t1,$tbl) # Td4[s0>>16] + llgc $t2,2048($t2,$tbl) # Td4[s0>>8] + sll $t1,16 llgc $t3,2048($s0,$tbl) # Td4[s0>>0] sllg $s0,$i3,24 - sll $t1,16 sll $t2,8 - srlg $i1,$s1,24 - srlg $i2,$s1,16 - srlg $i3,$s1,8 - nr $s1,$mask # i0 - nr $i2,$mask - nr $i3,$mask llgc $s1,2048($s1,$tbl) # Td4[s1>>0] llgc $i1,2048($i1,$tbl) # Td4[s1>>24] llgc $i2,2048($i2,$tbl) # Td4[s1>>16] - llgc $i3,2048($i3,$tbl) # Td4[s1>>8] sll $i1,24 + llgc $i3,2048($ra,$tbl) # Td4[s1>>8] sll $i2,16 sll $i3,8 or $s0,$s1 @@ -673,9 +747,8 @@ _s390x_AES_decrypt: llgc $i3,2048($i3,$tbl) # Td4[s2>>16] sll $i1,8 sll $i2,24 - sll $i3,16 or $s0,$i1 - or $s1,$t1 + sll $i3,16 or $t2,$i2 or $t3,$i3 @@ -685,11 +758,17 @@ _s390x_AES_decrypt: nr $s3,$mask # i2 nr $i1,$mask nr $i2,$mask + + l${g} $ra,15*$SIZE_T($sp) + or $s1,$t1 + l $t0,16($key) + l $t1,20($key) + llgc $i1,2048($i1,$tbl) # Td4[s3>>16] llgc $i2,2048($i2,$tbl) # Td4[s3>>8] + sll $i1,16 llgc $s2,2048($s3,$tbl) # Td4[s3>>0] llgc $s3,2048($i3,$tbl) # Td4[s3>>24] - sll $i1,16 sll $i2,8 sll $s3,24 or $s0,$i1 @@ -697,67 +776,81 @@ _s390x_AES_decrypt: or $s2,$t2 or $s3,$t3 - x $s0,16($key) - x $s1,20($key) + xr $s0,$t0 + xr $s1,$t1 x $s2,24($key) x $s3,28($key) - br $ra + br $ra .size _s390x_AES_decrypt,.-_s390x_AES_decrypt +___ +$code.=<<___; # void AES_set_encrypt_key(const unsigned char *in, int bits, # AES_KEY *key) { .globl AES_set_encrypt_key .type AES_set_encrypt_key,\@function .align 16 AES_set_encrypt_key: - lghi $t1,0 - clgr $inp,$t1 +_s390x_AES_set_encrypt_key: + lghi $t0,0 + cl${g}r $inp,$t0 je .Lminus1 - clgr $key,$t1 + cl${g}r $key,$t0 je .Lminus1 - lghi $t1,128 - clr $bits,$t1 - je .Lproceed128 - lghi $t1,192 - clr $bits,$t1 - je .Lekey_internal - lghi $t1,256 - clr $bits,$t1 - je .Lekey_internal + lghi $t0,128 + clr $bits,$t0 + je .Lproceed + lghi $t0,192 + clr $bits,$t0 + je .Lproceed + lghi $t0,256 + clr $bits,$t0 + je .Lproceed lghi %r2,-2 br %r14 -.align 4 -.Lproceed128: - lghi %r0,0 # query capability vector - la %r1,16($sp) - .long 0xb92e0042 # km %r4,%r2 - lg %r0,16($sp) - tmhl %r0,`0x8000>>2` +.align 16 +.Lproceed: +___ +$code.=<<___ if (!$softonly); + # convert bits to km(c) code, [128,192,256]->[18,19,20] + lhi %r5,-128 + lhi %r0,18 + ar %r5,$bits + srl %r5,6 + ar %r5,%r0 + + larl %r1,OPENSSL_s390xcap_P + llihh %r0,0x8000 + srlg %r0,%r0,0(%r5) + ng %r0,S390X_KM(%r1) # check availability of both km... + ng %r0,S390X_KMC(%r1) # ...and kmc support for given key length jz .Lekey_internal - l $t1,0($inp) # just copy 128 bits... - l $t2,4($inp) - l $bits,8($inp) - l $inp,12($inp) - st $t1,0($key) - st $t2,4($key) - st $bits,8($key) - st $inp,12($key) - lghi $t1,10 - st $t1,236($key) # ... postpone key setup - st $t1,240($key) + lmg %r0,%r1,0($inp) # just copy 128 bits... + stmg %r0,%r1,0($key) + lhi %r0,192 + cr $bits,%r0 + jl 1f + lg %r1,16($inp) + stg %r1,16($key) + je 1f + lg %r1,24($inp) + stg %r1,24($key) +1: st $bits,236($key) # save bits [for debugging purposes] + lgr $t0,%r5 + st %r5,240($key) # save km(c) code lghi %r2,0 br %r14 - +___ +$code.=<<___; .align 16 .Lekey_internal: - stmg %r6,%r13,48($sp) # all volatile regs, but $ra! + stm${g} %r4,%r13,4*$SIZE_T($sp) # all non-volatile regs and $key - bras $tbl,1f -1: aghi $tbl,AES_Te+2048-. + larl $tbl,AES_Te+2048 llgf $s0,0($inp) llgf $s1,4($inp) @@ -767,18 +860,15 @@ AES_set_encrypt_key: st $s1,4($key) st $s2,8($key) st $s3,12($key) - lghi $t1,128 - cr $bits,$t1 + lghi $t0,128 + cr $bits,$t0 jne .Lnot128 llill $mask,0xff lghi $t3,0 # i=0 lghi $rounds,10 - st $t3,236($key) # mark as set up st $rounds,240($key) -.align 8 -.L128_loop: llgfr $t2,$s3 # temp=rk[3] srlg $i1,$s3,8 srlg $i2,$s3,16 @@ -786,6 +876,9 @@ AES_set_encrypt_key: nr $t2,$mask nr $i1,$mask nr $i2,$mask + +.align 16 +.L128_loop: la $t2,0($t2,$tbl) la $i1,0($i1,$tbl) la $i2,0($i2,$tbl) @@ -799,6 +892,15 @@ AES_set_encrypt_key: xr $s1,$s0 # rk[5]=rk[1]^rk[4] xr $s2,$s1 # rk[6]=rk[2]^rk[5] xr $s3,$s2 # rk[7]=rk[3]^rk[6] + + llgfr $t2,$s3 # temp=rk[3] + srlg $i1,$s3,8 + srlg $i2,$s3,16 + nr $t2,$mask + nr $i1,$mask + srlg $i3,$s3,24 + nr $i2,$mask + st $s0,16($key) st $s1,20($key) st $s2,24($key) @@ -806,18 +908,19 @@ AES_set_encrypt_key: la $key,16($key) # key+=4 la $t3,4($t3) # i++ brct $rounds,.L128_loop + lghi $t0,10 lghi %r2,0 - lmg %r6,%r13,48($sp) + lm${g} %r4,%r13,4*$SIZE_T($sp) br $ra -.align 4 +.align 16 .Lnot128: - llgf $t1,16($inp) - llgf $t2,20($inp) - st $t1,16($key) - st $t2,20($key) - lghi $t1,192 - cr $bits,$t1 + llgf $t0,16($inp) + llgf $t1,20($inp) + st $t0,16($key) + st $t1,20($key) + lghi $t0,192 + cr $bits,$t0 jne .Lnot192 llill $mask,0xff @@ -826,76 +929,89 @@ AES_set_encrypt_key: st $rounds,240($key) lghi $rounds,8 -.align 8 -.L192_loop: - srlg $i1,$t2,8 - srlg $i2,$t2,16 - srlg $i3,$t2,24 - nr $t2,$mask + srlg $i1,$t1,8 + srlg $i2,$t1,16 + srlg $i3,$t1,24 + nr $t1,$mask nr $i1,$mask nr $i2,$mask - la $t2,0($t2,$tbl) + +.align 16 +.L192_loop: + la $t1,0($t1,$tbl) la $i1,0($i1,$tbl) la $i2,0($i2,$tbl) la $i3,0($i3,$tbl) - icm $t2,2,0($t2) # Te4[rk[5]>>0]<<8 - icm $t2,4,0($i1) # Te4[rk[5]>>8]<<16 - icm $t2,8,0($i2) # Te4[rk[5]>>16]<<24 - icm $t2,1,0($i3) # Te4[rk[5]>>24] - x $t2,256($t3,$tbl) # rcon[i] - xr $s0,$t2 # rk[6]=rk[0]^... + icm $t1,2,0($t1) # Te4[rk[5]>>0]<<8 + icm $t1,4,0($i1) # Te4[rk[5]>>8]<<16 + icm $t1,8,0($i2) # Te4[rk[5]>>16]<<24 + icm $t1,1,0($i3) # Te4[rk[5]>>24] + x $t1,256($t3,$tbl) # rcon[i] + xr $s0,$t1 # rk[6]=rk[0]^... xr $s1,$s0 # rk[7]=rk[1]^rk[6] xr $s2,$s1 # rk[8]=rk[2]^rk[7] xr $s3,$s2 # rk[9]=rk[3]^rk[8] + st $s0,24($key) st $s1,28($key) st $s2,32($key) st $s3,36($key) brct $rounds,.L192_continue + lghi $t0,12 lghi %r2,0 - lmg %r6,%r13,48($sp) + lm${g} %r4,%r13,4*$SIZE_T($sp) br $ra -.align 4 + +.align 16 .L192_continue: - lgr $t2,$s3 - x $t2,16($key) # rk[10]=rk[4]^rk[9] - st $t2,40($key) - x $t2,20($key) # rk[11]=rk[5]^rk[10] - st $t2,44($key) + lgr $t1,$s3 + x $t1,16($key) # rk[10]=rk[4]^rk[9] + st $t1,40($key) + x $t1,20($key) # rk[11]=rk[5]^rk[10] + st $t1,44($key) + + srlg $i1,$t1,8 + srlg $i2,$t1,16 + srlg $i3,$t1,24 + nr $t1,$mask + nr $i1,$mask + nr $i2,$mask + la $key,24($key) # key+=6 la $t3,4($t3) # i++ j .L192_loop -.align 4 +.align 16 .Lnot192: - llgf $t1,24($inp) - llgf $t2,28($inp) - st $t1,24($key) - st $t2,28($key) + llgf $t0,24($inp) + llgf $t1,28($inp) + st $t0,24($key) + st $t1,28($key) llill $mask,0xff lghi $t3,0 # i=0 lghi $rounds,14 st $rounds,240($key) lghi $rounds,7 -.align 8 -.L256_loop: - srlg $i1,$t2,8 - srlg $i2,$t2,16 - srlg $i3,$t2,24 - nr $t2,$mask + srlg $i1,$t1,8 + srlg $i2,$t1,16 + srlg $i3,$t1,24 + nr $t1,$mask nr $i1,$mask nr $i2,$mask - la $t2,0($t2,$tbl) + +.align 16 +.L256_loop: + la $t1,0($t1,$tbl) la $i1,0($i1,$tbl) la $i2,0($i2,$tbl) la $i3,0($i3,$tbl) - icm $t2,2,0($t2) # Te4[rk[7]>>0]<<8 - icm $t2,4,0($i1) # Te4[rk[7]>>8]<<16 - icm $t2,8,0($i2) # Te4[rk[7]>>16]<<24 - icm $t2,1,0($i3) # Te4[rk[7]>>24] - x $t2,256($t3,$tbl) # rcon[i] - xr $s0,$t2 # rk[8]=rk[0]^... + icm $t1,2,0($t1) # Te4[rk[7]>>0]<<8 + icm $t1,4,0($i1) # Te4[rk[7]>>8]<<16 + icm $t1,8,0($i2) # Te4[rk[7]>>16]<<24 + icm $t1,1,0($i3) # Te4[rk[7]>>24] + x $t1,256($t3,$tbl) # rcon[i] + xr $s0,$t1 # rk[8]=rk[0]^... xr $s1,$s0 # rk[9]=rk[1]^rk[8] xr $s2,$s1 # rk[10]=rk[2]^rk[9] xr $s3,$s2 # rk[11]=rk[3]^rk[10] @@ -904,42 +1020,51 @@ AES_set_encrypt_key: st $s2,40($key) st $s3,44($key) brct $rounds,.L256_continue + lghi $t0,14 lghi %r2,0 - lmg %r6,%r13,48($sp) + lm${g} %r4,%r13,4*$SIZE_T($sp) br $ra -.align 4 + +.align 16 .L256_continue: - lgr $t2,$s3 # temp=rk[11] + lgr $t1,$s3 # temp=rk[11] srlg $i1,$s3,8 srlg $i2,$s3,16 srlg $i3,$s3,24 - nr $t2,$mask + nr $t1,$mask nr $i1,$mask nr $i2,$mask - la $t2,0($t2,$tbl) + la $t1,0($t1,$tbl) la $i1,0($i1,$tbl) la $i2,0($i2,$tbl) la $i3,0($i3,$tbl) - icm $t2,1,0($t2) # Te4[rk[11]>>0] - icm $t2,2,0($i1) # Te4[rk[11]>>8]<<8 - icm $t2,4,0($i2) # Te4[rk[11]>>16]<<16 - icm $t2,8,0($i3) # Te4[rk[11]>>24]<<24 - x $t2,16($key) # rk[12]=rk[4]^... - st $t2,48($key) - x $t2,20($key) # rk[13]=rk[5]^rk[12] - st $t2,52($key) - x $t2,24($key) # rk[14]=rk[6]^rk[13] - st $t2,56($key) - x $t2,28($key) # rk[15]=rk[7]^rk[14] - st $t2,60($key) + llgc $t1,0($t1) # Te4[rk[11]>>0] + icm $t1,2,0($i1) # Te4[rk[11]>>8]<<8 + icm $t1,4,0($i2) # Te4[rk[11]>>16]<<16 + icm $t1,8,0($i3) # Te4[rk[11]>>24]<<24 + x $t1,16($key) # rk[12]=rk[4]^... + st $t1,48($key) + x $t1,20($key) # rk[13]=rk[5]^rk[12] + st $t1,52($key) + x $t1,24($key) # rk[14]=rk[6]^rk[13] + st $t1,56($key) + x $t1,28($key) # rk[15]=rk[7]^rk[14] + st $t1,60($key) + + srlg $i1,$t1,8 + srlg $i2,$t1,16 + srlg $i3,$t1,24 + nr $t1,$mask + nr $i1,$mask + nr $i2,$mask la $key,32($key) # key+=8 la $t3,4($t3) # i++ j .L256_loop -.align 4 + .Lminus1: lghi %r2,-1 - br %r14 + br $ra .size AES_set_encrypt_key,.-AES_set_encrypt_key # void AES_set_decrypt_key(const unsigned char *in, int bits, @@ -948,64 +1073,39 @@ AES_set_encrypt_key: .type AES_set_decrypt_key,\@function .align 16 AES_set_decrypt_key: - stg $key,32($sp) # I rely on AES_set_encrypt_key to - stg $ra,112($sp) # save [other] volatile registers! - bras $ra,AES_set_encrypt_key - lg $key,32($sp) - lg $ra,112($sp) + #st${g} $key,4*$SIZE_T($sp) # I rely on AES_set_encrypt_key to + st${g} $ra,14*$SIZE_T($sp) # save non-volatile registers and $key! + bras $ra,_s390x_AES_set_encrypt_key + #l${g} $key,4*$SIZE_T($sp) + l${g} $ra,14*$SIZE_T($sp) ltgr %r2,%r2 bnzr $ra - - lghi $t1,10 - c $t1,240($key) - jne .Lgo - lghi $t1,0 - c $t1,236($key) - je .Lgo - - l $t1,0($key) # just copy 128 bits otherwise - l $t2,4($key) - l $t3,8($key) - l $bits,12($key) - st $t1,160($key) - st $t2,164($key) - st $t3,168($key) - st $bits,172($key) - lghi %r2,0 +___ +$code.=<<___ if (!$softonly); + #l $t0,240($key) + lhi $t1,16 + cr $t0,$t1 + jl .Lgo + oill $t0,0x80 # set "decrypt" bit + st $t0,240($key) br $ra - +___ +$code.=<<___; .align 16 -.Ldkey_internal: - stg $key,32($sp) - stg $ra,40($sp) - bras $ra,.Lekey_internal - lg $key,32($sp) - lg $ra,40($sp) - -.Lgo: llgf $rounds,240($key) - lghi $i1,0 +.Lgo: lgr $rounds,$t0 #llgf $rounds,240($key) + la $i1,0($key) sllg $i2,$rounds,4 + la $i2,0($i2,$key) srl $rounds,1 + lghi $t1,-16 -.align 8 -.Linv: l $s0,0($i1,$key) - l $s1,4($i1,$key) - l $s2,8($i1,$key) - l $s3,12($i1,$key) - l $t1,0($i2,$key) - l $t2,4($i2,$key) - l $t3,8($i2,$key) - l $i3,12($i2,$key) - st $s0,0($i2,$key) - st $s1,4($i2,$key) - st $s2,8($i2,$key) - st $s3,12($i2,$key) - st $t1,0($i1,$key) - st $t2,4($i1,$key) - st $t3,8($i1,$key) - st $i3,12($i1,$key) - aghi $i1,16 - aghi $i2,-16 +.align 16 +.Linv: lmg $s0,$s1,0($i1) + lmg $s2,$s3,0($i2) + stmg $s0,$s1,0($i2) + stmg $s2,$s3,0($i1) + la $i1,16($i1) + la $i2,0($t1,$i2) brct $rounds,.Linv ___ $mask80=$i1; @@ -1016,13 +1116,13 @@ $code.=<<___; aghi $rounds,-1 sll $rounds,2 # (rounds-1)*4 llilh $mask80,0x8080 - oill $mask80,0x8080 llilh $mask1b,0x1b1b - oill $mask1b,0x1b1b llilh $maskfe,0xfefe + oill $mask80,0x8080 + oill $mask1b,0x1b1b oill $maskfe,0xfefe -.align 8 +.align 16 .Lmix: l $s0,16($key) # tp1 lr $s1,$s0 ngr $s1,$mask80 @@ -1054,28 +1154,1075 @@ $code.=<<___; xr $s1,$s0 # tp2^tp1 xr $s2,$s0 # tp4^tp1 rll $s0,$s0,24 # = ROTATE(tp1,8) + xr $s2,$s3 # ^=tp8 xr $s0,$s1 # ^=tp2^tp1 - xr $s0,$s2 # ^=tp4^tp1 - xr $s0,$s3 # ^= tp8[^(tp4^tp1)^(tp2^tp1)=tp4^tp2] xr $s1,$s3 # tp2^tp1^tp8 + xr $s0,$s2 # ^=tp4^tp1^tp8 rll $s1,$s1,8 - xr $s0,$s1 # ^= ROTATE(tp8^tp2^tp1,24) - xr $s2,$s3 # tp4^tp1^tp8 rll $s2,$s2,16 - xr $s0,$s2 # ^= ROTATE(tp8^tp4^tp1,16) + xr $s0,$s1 # ^= ROTATE(tp8^tp2^tp1,24) rll $s3,$s3,24 + xr $s0,$s2 # ^= ROTATE(tp8^tp4^tp1,16) xr $s0,$s3 # ^= ROTATE(tp8,8) st $s0,16($key) la $key,4($key) brct $rounds,.Lmix - lmg %r6,%r13,48($sp)# this was saved by AES_set_encrypt_key! + lm${g} %r6,%r13,6*$SIZE_T($sp)# as was saved by AES_set_encrypt_key! lghi %r2,0 br $ra .size AES_set_decrypt_key,.-AES_set_decrypt_key +___ + +######################################################################## +# void AES_cbc_encrypt(const unsigned char *in, unsigned char *out, +# size_t length, const AES_KEY *key, +# unsigned char *ivec, const int enc) +{ +my $inp="%r2"; +my $out="%r4"; # length and out are swapped +my $len="%r3"; +my $key="%r5"; +my $ivp="%r6"; + +$code.=<<___; +.globl AES_cbc_encrypt +.type AES_cbc_encrypt,\@function +.align 16 +AES_cbc_encrypt: + xgr %r3,%r4 # flip %r3 and %r4, out and len + xgr %r4,%r3 + xgr %r3,%r4 +___ +$code.=<<___ if (!$softonly); + lhi %r0,16 + cl %r0,240($key) + jh .Lcbc_software + + lg %r0,0($ivp) # copy ivec + lg %r1,8($ivp) + stmg %r0,%r1,16($sp) + lmg %r0,%r1,0($key) # copy key, cover 256 bit + stmg %r0,%r1,32($sp) + lmg %r0,%r1,16($key) + stmg %r0,%r1,48($sp) + l %r0,240($key) # load kmc code + lghi $key,15 # res=len%16, len-=res; + ngr $key,$len + sl${g}r $len,$key + la %r1,16($sp) # parameter block - ivec || key + jz .Lkmc_truncated + .long 0xb92f0042 # kmc %r4,%r2 + brc 1,.-4 # pay attention to "partial completion" + ltr $key,$key + jnz .Lkmc_truncated +.Lkmc_done: + lmg %r0,%r1,16($sp) # copy ivec to caller + stg %r0,0($ivp) + stg %r1,8($ivp) + br $ra +.align 16 +.Lkmc_truncated: + ahi $key,-1 # it's the way it's encoded in mvc + tmll %r0,0x80 + jnz .Lkmc_truncated_dec + lghi %r1,0 + stg %r1,16*$SIZE_T($sp) + stg %r1,16*$SIZE_T+8($sp) + bras %r1,1f + mvc 16*$SIZE_T(1,$sp),0($inp) +1: ex $key,0(%r1) + la %r1,16($sp) # restore parameter block + la $inp,16*$SIZE_T($sp) + lghi $len,16 + .long 0xb92f0042 # kmc %r4,%r2 + j .Lkmc_done +.align 16 +.Lkmc_truncated_dec: + st${g} $out,4*$SIZE_T($sp) + la $out,16*$SIZE_T($sp) + lghi $len,16 + .long 0xb92f0042 # kmc %r4,%r2 + l${g} $out,4*$SIZE_T($sp) + bras %r1,2f + mvc 0(1,$out),16*$SIZE_T($sp) +2: ex $key,0(%r1) + j .Lkmc_done +.align 16 +.Lcbc_software: +___ +$code.=<<___; + stm${g} $key,$ra,5*$SIZE_T($sp) + lhi %r0,0 + cl %r0,`$stdframe+$SIZE_T-4`($sp) + je .Lcbc_decrypt + + larl $tbl,AES_Te + + llgf $s0,0($ivp) + llgf $s1,4($ivp) + llgf $s2,8($ivp) + llgf $s3,12($ivp) + + lghi $t0,16 + sl${g}r $len,$t0 + brc 4,.Lcbc_enc_tail # if borrow +.Lcbc_enc_loop: + stm${g} $inp,$out,2*$SIZE_T($sp) + x $s0,0($inp) + x $s1,4($inp) + x $s2,8($inp) + x $s3,12($inp) + lgr %r4,$key + + bras $ra,_s390x_AES_encrypt + + lm${g} $inp,$key,2*$SIZE_T($sp) + st $s0,0($out) + st $s1,4($out) + st $s2,8($out) + st $s3,12($out) + + la $inp,16($inp) + la $out,16($out) + lghi $t0,16 + lt${g}r $len,$len + jz .Lcbc_enc_done + sl${g}r $len,$t0 + brc 4,.Lcbc_enc_tail # if borrow + j .Lcbc_enc_loop +.align 16 +.Lcbc_enc_done: + l${g} $ivp,6*$SIZE_T($sp) + st $s0,0($ivp) + st $s1,4($ivp) + st $s2,8($ivp) + st $s3,12($ivp) + + lm${g} %r7,$ra,7*$SIZE_T($sp) + br $ra + +.align 16 +.Lcbc_enc_tail: + aghi $len,15 + lghi $t0,0 + stg $t0,16*$SIZE_T($sp) + stg $t0,16*$SIZE_T+8($sp) + bras $t1,3f + mvc 16*$SIZE_T(1,$sp),0($inp) +3: ex $len,0($t1) + lghi $len,0 + la $inp,16*$SIZE_T($sp) + j .Lcbc_enc_loop + +.align 16 +.Lcbc_decrypt: + larl $tbl,AES_Td + + lg $t0,0($ivp) + lg $t1,8($ivp) + stmg $t0,$t1,16*$SIZE_T($sp) + +.Lcbc_dec_loop: + stm${g} $inp,$out,2*$SIZE_T($sp) + llgf $s0,0($inp) + llgf $s1,4($inp) + llgf $s2,8($inp) + llgf $s3,12($inp) + lgr %r4,$key + + bras $ra,_s390x_AES_decrypt + + lm${g} $inp,$key,2*$SIZE_T($sp) + sllg $s0,$s0,32 + sllg $s2,$s2,32 + lr $s0,$s1 + lr $s2,$s3 + + lg $t0,0($inp) + lg $t1,8($inp) + xg $s0,16*$SIZE_T($sp) + xg $s2,16*$SIZE_T+8($sp) + lghi $s1,16 + sl${g}r $len,$s1 + brc 4,.Lcbc_dec_tail # if borrow + brc 2,.Lcbc_dec_done # if zero + stg $s0,0($out) + stg $s2,8($out) + stmg $t0,$t1,16*$SIZE_T($sp) + + la $inp,16($inp) + la $out,16($out) + j .Lcbc_dec_loop + +.Lcbc_dec_done: + stg $s0,0($out) + stg $s2,8($out) +.Lcbc_dec_exit: + lm${g} %r6,$ra,6*$SIZE_T($sp) + stmg $t0,$t1,0($ivp) + + br $ra + +.align 16 +.Lcbc_dec_tail: + aghi $len,15 + stg $s0,16*$SIZE_T($sp) + stg $s2,16*$SIZE_T+8($sp) + bras $s1,4f + mvc 0(1,$out),16*$SIZE_T($sp) +4: ex $len,0($s1) + j .Lcbc_dec_exit +.size AES_cbc_encrypt,.-AES_cbc_encrypt +___ +} +######################################################################## +# void AES_ctr32_encrypt(const unsigned char *in, unsigned char *out, +# size_t blocks, const AES_KEY *key, +# const unsigned char *ivec) +{ +my $inp="%r2"; +my $out="%r4"; # blocks and out are swapped +my $len="%r3"; +my $key="%r5"; my $iv0="%r5"; +my $ivp="%r6"; +my $fp ="%r7"; + +$code.=<<___; +.globl AES_ctr32_encrypt +.type AES_ctr32_encrypt,\@function +.align 16 +AES_ctr32_encrypt: + xgr %r3,%r4 # flip %r3 and %r4, $out and $len + xgr %r4,%r3 + xgr %r3,%r4 + llgfr $len,$len # safe in ctr32 subroutine even in 64-bit case +___ +$code.=<<___ if (!$softonly); + l %r0,240($key) + lhi %r1,16 + clr %r0,%r1 + jl .Lctr32_software + + stm${g} %r6,$s3,6*$SIZE_T($sp) + + slgr $out,$inp + la %r1,0($key) # %r1 is permanent copy of $key + lg $iv0,0($ivp) # load ivec + lg $ivp,8($ivp) + + # prepare and allocate stack frame at the top of 4K page + # with 1K reserved for eventual signal handling + lghi $s0,-1024-256-16# guarantee at least 256-bytes buffer + lghi $s1,-4096 + algr $s0,$sp + lgr $fp,$sp + ngr $s0,$s1 # align at page boundary + slgr $fp,$s0 # total buffer size + lgr $s2,$sp + lghi $s1,1024+16 # sl[g]fi is extended-immediate facility + slgr $fp,$s1 # deduct reservation to get usable buffer size + # buffer size is at lest 256 and at most 3072+256-16 + + la $sp,1024($s0) # alloca + srlg $fp,$fp,4 # convert bytes to blocks, minimum 16 + st${g} $s2,0($sp) # back-chain + st${g} $fp,$SIZE_T($sp) + + slgr $len,$fp + brc 1,.Lctr32_hw_switch # not zero, no borrow + algr $fp,$len # input is shorter than allocated buffer + lghi $len,0 + st${g} $fp,$SIZE_T($sp) + +.Lctr32_hw_switch: +___ +$code.=<<___ if (!$softonly && 0);# kmctr code was measured to be ~12% slower + llgfr $s0,%r0 + lgr $s1,%r1 + larl %r1,OPENSSL_s390xcap_P + llihh %r0,0x8000 # check if kmctr supports the function code + srlg %r0,%r0,0($s0) + ng %r0,S390X_KMCTR(%r1) # check kmctr capability vector + lgr %r0,$s0 + lgr %r1,$s1 + jz .Lctr32_km_loop + +####### kmctr code + algr $out,$inp # restore $out + lgr $s1,$len # $s1 undertakes $len + j .Lctr32_kmctr_loop +.align 16 +.Lctr32_kmctr_loop: + la $s2,16($sp) + lgr $s3,$fp +.Lctr32_kmctr_prepare: + stg $iv0,0($s2) + stg $ivp,8($s2) + la $s2,16($s2) + ahi $ivp,1 # 32-bit increment, preserves upper half + brct $s3,.Lctr32_kmctr_prepare + + #la $inp,0($inp) # inp + sllg $len,$fp,4 # len + #la $out,0($out) # out + la $s2,16($sp) # iv + .long 0xb92da042 # kmctr $out,$s2,$inp + brc 1,.-4 # pay attention to "partial completion" + + slgr $s1,$fp + brc 1,.Lctr32_kmctr_loop # not zero, no borrow + algr $fp,$s1 + lghi $s1,0 + brc 4+1,.Lctr32_kmctr_loop # not zero + + l${g} $sp,0($sp) + lm${g} %r6,$s3,6*$SIZE_T($sp) + br $ra +.align 16 +___ +$code.=<<___ if (!$softonly); +.Lctr32_km_loop: + la $s2,16($sp) + lgr $s3,$fp +.Lctr32_km_prepare: + stg $iv0,0($s2) + stg $ivp,8($s2) + la $s2,16($s2) + ahi $ivp,1 # 32-bit increment, preserves upper half + brct $s3,.Lctr32_km_prepare + + la $s0,16($sp) # inp + sllg $s1,$fp,4 # len + la $s2,16($sp) # out + .long 0xb92e00a8 # km %r10,%r8 + brc 1,.-4 # pay attention to "partial completion" + + la $s2,16($sp) + lgr $s3,$fp + slgr $s2,$inp +.Lctr32_km_xor: + lg $s0,0($inp) + lg $s1,8($inp) + xg $s0,0($s2,$inp) + xg $s1,8($s2,$inp) + stg $s0,0($out,$inp) + stg $s1,8($out,$inp) + la $inp,16($inp) + brct $s3,.Lctr32_km_xor + + slgr $len,$fp + brc 1,.Lctr32_km_loop # not zero, no borrow + algr $fp,$len + lghi $len,0 + brc 4+1,.Lctr32_km_loop # not zero + + l${g} $s0,0($sp) + l${g} $s1,$SIZE_T($sp) + la $s2,16($sp) +.Lctr32_km_zap: + stg $s0,0($s2) + stg $s0,8($s2) + la $s2,16($s2) + brct $s1,.Lctr32_km_zap + + la $sp,0($s0) + lm${g} %r6,$s3,6*$SIZE_T($sp) + br $ra +.align 16 +.Lctr32_software: +___ +$code.=<<___; + stm${g} $key,$ra,5*$SIZE_T($sp) + sl${g}r $inp,$out + larl $tbl,AES_Te + llgf $t1,12($ivp) + +.Lctr32_loop: + stm${g} $inp,$out,2*$SIZE_T($sp) + llgf $s0,0($ivp) + llgf $s1,4($ivp) + llgf $s2,8($ivp) + lgr $s3,$t1 + st $t1,16*$SIZE_T($sp) + lgr %r4,$key + + bras $ra,_s390x_AES_encrypt + + lm${g} $inp,$ivp,2*$SIZE_T($sp) + llgf $t1,16*$SIZE_T($sp) + x $s0,0($inp,$out) + x $s1,4($inp,$out) + x $s2,8($inp,$out) + x $s3,12($inp,$out) + stm $s0,$s3,0($out) + + la $out,16($out) + ahi $t1,1 # 32-bit increment + brct $len,.Lctr32_loop + + lm${g} %r6,$ra,6*$SIZE_T($sp) + br $ra +.size AES_ctr32_encrypt,.-AES_ctr32_encrypt +___ +} + +######################################################################## +# void AES_xts_encrypt(const unsigned char *inp, unsigned char *out, +# size_t len, const AES_KEY *key1, const AES_KEY *key2, +# const unsigned char iv[16]); +# +{ +my $inp="%r2"; +my $out="%r4"; # len and out are swapped +my $len="%r3"; +my $key1="%r5"; # $i1 +my $key2="%r6"; # $i2 +my $fp="%r7"; # $i3 +my $tweak=16*$SIZE_T+16; # or $stdframe-16, bottom of the frame... + +$code.=<<___; +.type _s390x_xts_km,\@function +.align 16 +_s390x_xts_km: +___ +$code.=<<___ if(1); + llgfr $s0,%r0 # put aside the function code + lghi $s1,0x7f + nr $s1,%r0 + larl %r1,OPENSSL_s390xcap_P + llihh %r0,0x8000 + srlg %r0,%r0,32($s1) # check for 32+function code + ng %r0,S390X_KM(%r1) # check km capability vector + lgr %r0,$s0 # restore the function code + la %r1,0($key1) # restore $key1 + jz .Lxts_km_vanilla + + lmg $i2,$i3,$tweak($sp) # put aside the tweak value + algr $out,$inp + + oill %r0,32 # switch to xts function code + aghi $s1,-18 # + sllg $s1,$s1,3 # (function code - 18)*8, 0 or 16 + la %r1,$tweak-16($sp) + slgr %r1,$s1 # parameter block position + lmg $s0,$s3,0($key1) # load 256 bits of key material, + stmg $s0,$s3,0(%r1) # and copy it to parameter block. + # yes, it contains junk and overlaps + # with the tweak in 128-bit case. + # it's done to avoid conditional + # branch. + stmg $i2,$i3,$tweak($sp) # "re-seat" the tweak value + + .long 0xb92e0042 # km %r4,%r2 + brc 1,.-4 # pay attention to "partial completion" + + lrvg $s0,$tweak+0($sp) # load the last tweak + lrvg $s1,$tweak+8($sp) + stmg %r0,%r3,$tweak-32($sp) # wipe copy of the key + + nill %r0,0xffdf # switch back to original function code + la %r1,0($key1) # restore pointer to $key1 + slgr $out,$inp + + llgc $len,2*$SIZE_T-1($sp) + nill $len,0x0f # $len%=16 + br $ra + +.align 16 +.Lxts_km_vanilla: +___ +$code.=<<___; + # prepare and allocate stack frame at the top of 4K page + # with 1K reserved for eventual signal handling + lghi $s0,-1024-256-16# guarantee at least 256-bytes buffer + lghi $s1,-4096 + algr $s0,$sp + lgr $fp,$sp + ngr $s0,$s1 # align at page boundary + slgr $fp,$s0 # total buffer size + lgr $s2,$sp + lghi $s1,1024+16 # sl[g]fi is extended-immediate facility + slgr $fp,$s1 # deduct reservation to get usable buffer size + # buffer size is at lest 256 and at most 3072+256-16 + + la $sp,1024($s0) # alloca + nill $fp,0xfff0 # round to 16*n + st${g} $s2,0($sp) # back-chain + nill $len,0xfff0 # redundant + st${g} $fp,$SIZE_T($sp) + + slgr $len,$fp + brc 1,.Lxts_km_go # not zero, no borrow + algr $fp,$len # input is shorter than allocated buffer + lghi $len,0 + st${g} $fp,$SIZE_T($sp) + +.Lxts_km_go: + lrvg $s0,$tweak+0($s2) # load the tweak value in little-endian + lrvg $s1,$tweak+8($s2) + + la $s2,16($sp) # vector of ascending tweak values + slgr $s2,$inp + srlg $s3,$fp,4 + j .Lxts_km_start + +.Lxts_km_loop: + la $s2,16($sp) + slgr $s2,$inp + srlg $s3,$fp,4 +.Lxts_km_prepare: + lghi $i1,0x87 + srag $i2,$s1,63 # broadcast upper bit + ngr $i1,$i2 # rem + algr $s0,$s0 + alcgr $s1,$s1 + xgr $s0,$i1 +.Lxts_km_start: + lrvgr $i1,$s0 # flip byte order + lrvgr $i2,$s1 + stg $i1,0($s2,$inp) + stg $i2,8($s2,$inp) + xg $i1,0($inp) + xg $i2,8($inp) + stg $i1,0($out,$inp) + stg $i2,8($out,$inp) + la $inp,16($inp) + brct $s3,.Lxts_km_prepare + + slgr $inp,$fp # rewind $inp + la $s2,0($out,$inp) + lgr $s3,$fp + .long 0xb92e00aa # km $s2,$s2 + brc 1,.-4 # pay attention to "partial completion" + + la $s2,16($sp) + slgr $s2,$inp + srlg $s3,$fp,4 +.Lxts_km_xor: + lg $i1,0($out,$inp) + lg $i2,8($out,$inp) + xg $i1,0($s2,$inp) + xg $i2,8($s2,$inp) + stg $i1,0($out,$inp) + stg $i2,8($out,$inp) + la $inp,16($inp) + brct $s3,.Lxts_km_xor + + slgr $len,$fp + brc 1,.Lxts_km_loop # not zero, no borrow + algr $fp,$len + lghi $len,0 + brc 4+1,.Lxts_km_loop # not zero + + l${g} $i1,0($sp) # back-chain + llgf $fp,`2*$SIZE_T-4`($sp) # bytes used + la $i2,16($sp) + srlg $fp,$fp,4 +.Lxts_km_zap: + stg $i1,0($i2) + stg $i1,8($i2) + la $i2,16($i2) + brct $fp,.Lxts_km_zap + + la $sp,0($i1) + llgc $len,2*$SIZE_T-1($i1) + nill $len,0x0f # $len%=16 + bzr $ra + + # generate one more tweak... + lghi $i1,0x87 + srag $i2,$s1,63 # broadcast upper bit + ngr $i1,$i2 # rem + algr $s0,$s0 + alcgr $s1,$s1 + xgr $s0,$i1 + + ltr $len,$len # clear zero flag + br $ra +.size _s390x_xts_km,.-_s390x_xts_km + +.globl AES_xts_encrypt +.type AES_xts_encrypt,\@function +.align 16 +AES_xts_encrypt: + xgr %r3,%r4 # flip %r3 and %r4, $out and $len + xgr %r4,%r3 + xgr %r3,%r4 +___ +$code.=<<___ if ($SIZE_T==4); + llgfr $len,$len +___ +$code.=<<___; + st${g} $len,1*$SIZE_T($sp) # save copy of $len + srag $len,$len,4 # formally wrong, because it expands + # sign byte, but who can afford asking + # to process more than 2^63-1 bytes? + # I use it, because it sets condition + # code... + bcr 8,$ra # abort if zero (i.e. less than 16) +___ +$code.=<<___ if (!$softonly); + llgf %r0,240($key2) + lhi %r1,16 + clr %r0,%r1 + jl .Lxts_enc_software + + st${g} $ra,5*$SIZE_T($sp) + stm${g} %r6,$s3,6*$SIZE_T($sp) + + sllg $len,$len,4 # $len&=~15 + slgr $out,$inp + + # generate the tweak value + l${g} $s3,$stdframe($sp) # pointer to iv + la $s2,$tweak($sp) + lmg $s0,$s1,0($s3) + lghi $s3,16 + stmg $s0,$s1,0($s2) + la %r1,0($key2) # $key2 is not needed anymore + .long 0xb92e00aa # km $s2,$s2, generate the tweak + brc 1,.-4 # can this happen? + + l %r0,240($key1) + la %r1,0($key1) # $key1 is not needed anymore + bras $ra,_s390x_xts_km + jz .Lxts_enc_km_done + + aghi $inp,-16 # take one step back + la $i3,0($out,$inp) # put aside real $out +.Lxts_enc_km_steal: + llgc $i1,16($inp) + llgc $i2,0($out,$inp) + stc $i1,0($out,$inp) + stc $i2,16($out,$inp) + la $inp,1($inp) + brct $len,.Lxts_enc_km_steal + + la $s2,0($i3) + lghi $s3,16 + lrvgr $i1,$s0 # flip byte order + lrvgr $i2,$s1 + xg $i1,0($s2) + xg $i2,8($s2) + stg $i1,0($s2) + stg $i2,8($s2) + .long 0xb92e00aa # km $s2,$s2 + brc 1,.-4 # can this happen? + lrvgr $i1,$s0 # flip byte order + lrvgr $i2,$s1 + xg $i1,0($i3) + xg $i2,8($i3) + stg $i1,0($i3) + stg $i2,8($i3) + +.Lxts_enc_km_done: + stg $sp,$tweak+0($sp) # wipe tweak + stg $sp,$tweak+8($sp) + l${g} $ra,5*$SIZE_T($sp) + lm${g} %r6,$s3,6*$SIZE_T($sp) + br $ra +.align 16 +.Lxts_enc_software: +___ +$code.=<<___; + stm${g} %r6,$ra,6*$SIZE_T($sp) + + slgr $out,$inp + + l${g} $s3,$stdframe($sp) # ivp + llgf $s0,0($s3) # load iv + llgf $s1,4($s3) + llgf $s2,8($s3) + llgf $s3,12($s3) + stm${g} %r2,%r5,2*$SIZE_T($sp) + la $key,0($key2) + larl $tbl,AES_Te + bras $ra,_s390x_AES_encrypt # generate the tweak + lm${g} %r2,%r5,2*$SIZE_T($sp) + stm $s0,$s3,$tweak($sp) # save the tweak + j .Lxts_enc_enter + +.align 16 +.Lxts_enc_loop: + lrvg $s1,$tweak+0($sp) # load the tweak in little-endian + lrvg $s3,$tweak+8($sp) + lghi %r1,0x87 + srag %r0,$s3,63 # broadcast upper bit + ngr %r1,%r0 # rem + algr $s1,$s1 + alcgr $s3,$s3 + xgr $s1,%r1 + lrvgr $s1,$s1 # flip byte order + lrvgr $s3,$s3 + srlg $s0,$s1,32 # smash the tweak to 4x32-bits + stg $s1,$tweak+0($sp) # save the tweak + llgfr $s1,$s1 + srlg $s2,$s3,32 + stg $s3,$tweak+8($sp) + llgfr $s3,$s3 + la $inp,16($inp) # $inp+=16 +.Lxts_enc_enter: + x $s0,0($inp) # ^=*($inp) + x $s1,4($inp) + x $s2,8($inp) + x $s3,12($inp) + stm${g} %r2,%r3,2*$SIZE_T($sp) # only two registers are changing + la $key,0($key1) + bras $ra,_s390x_AES_encrypt + lm${g} %r2,%r5,2*$SIZE_T($sp) + x $s0,$tweak+0($sp) # ^=tweak + x $s1,$tweak+4($sp) + x $s2,$tweak+8($sp) + x $s3,$tweak+12($sp) + st $s0,0($out,$inp) + st $s1,4($out,$inp) + st $s2,8($out,$inp) + st $s3,12($out,$inp) + brct${g} $len,.Lxts_enc_loop + + llgc $len,`2*$SIZE_T-1`($sp) + nill $len,0x0f # $len%16 + jz .Lxts_enc_done + + la $i3,0($inp,$out) # put aside real $out +.Lxts_enc_steal: + llgc %r0,16($inp) + llgc %r1,0($out,$inp) + stc %r0,0($out,$inp) + stc %r1,16($out,$inp) + la $inp,1($inp) + brct $len,.Lxts_enc_steal + la $out,0($i3) # restore real $out + + # generate last tweak... + lrvg $s1,$tweak+0($sp) # load the tweak in little-endian + lrvg $s3,$tweak+8($sp) + lghi %r1,0x87 + srag %r0,$s3,63 # broadcast upper bit + ngr %r1,%r0 # rem + algr $s1,$s1 + alcgr $s3,$s3 + xgr $s1,%r1 + lrvgr $s1,$s1 # flip byte order + lrvgr $s3,$s3 + srlg $s0,$s1,32 # smash the tweak to 4x32-bits + stg $s1,$tweak+0($sp) # save the tweak + llgfr $s1,$s1 + srlg $s2,$s3,32 + stg $s3,$tweak+8($sp) + llgfr $s3,$s3 + + x $s0,0($out) # ^=*(inp)|stolen cipther-text + x $s1,4($out) + x $s2,8($out) + x $s3,12($out) + st${g} $out,4*$SIZE_T($sp) + la $key,0($key1) + bras $ra,_s390x_AES_encrypt + l${g} $out,4*$SIZE_T($sp) + x $s0,`$tweak+0`($sp) # ^=tweak + x $s1,`$tweak+4`($sp) + x $s2,`$tweak+8`($sp) + x $s3,`$tweak+12`($sp) + st $s0,0($out) + st $s1,4($out) + st $s2,8($out) + st $s3,12($out) + +.Lxts_enc_done: + stg $sp,$tweak+0($sp) # wipe tweak + stg $sp,$twesk+8($sp) + lm${g} %r6,$ra,6*$SIZE_T($sp) + br $ra +.size AES_xts_encrypt,.-AES_xts_encrypt +___ +# void AES_xts_decrypt(const unsigned char *inp, unsigned char *out, +# size_t len, const AES_KEY *key1, const AES_KEY *key2, +# const unsigned char iv[16]); +# +$code.=<<___; +.globl AES_xts_decrypt +.type AES_xts_decrypt,\@function +.align 16 +AES_xts_decrypt: + xgr %r3,%r4 # flip %r3 and %r4, $out and $len + xgr %r4,%r3 + xgr %r3,%r4 +___ +$code.=<<___ if ($SIZE_T==4); + llgfr $len,$len +___ +$code.=<<___; + st${g} $len,1*$SIZE_T($sp) # save copy of $len + aghi $len,-16 + bcr 4,$ra # abort if less than zero. formally + # wrong, because $len is unsigned, + # but who can afford asking to + # process more than 2^63-1 bytes? + tmll $len,0x0f + jnz .Lxts_dec_proceed + aghi $len,16 +.Lxts_dec_proceed: +___ +$code.=<<___ if (!$softonly); + llgf %r0,240($key2) + lhi %r1,16 + clr %r0,%r1 + jl .Lxts_dec_software + + st${g} $ra,5*$SIZE_T($sp) + stm${g} %r6,$s3,6*$SIZE_T($sp) + + nill $len,0xfff0 # $len&=~15 + slgr $out,$inp + + # generate the tweak value + l${g} $s3,$stdframe($sp) # pointer to iv + la $s2,$tweak($sp) + lmg $s0,$s1,0($s3) + lghi $s3,16 + stmg $s0,$s1,0($s2) + la %r1,0($key2) # $key2 is not needed past this point + .long 0xb92e00aa # km $s2,$s2, generate the tweak + brc 1,.-4 # can this happen? + + l %r0,240($key1) + la %r1,0($key1) # $key1 is not needed anymore + + ltgr $len,$len + jz .Lxts_dec_km_short + bras $ra,_s390x_xts_km + jz .Lxts_dec_km_done + + lrvgr $s2,$s0 # make copy in reverse byte order + lrvgr $s3,$s1 + j .Lxts_dec_km_2ndtweak + +.Lxts_dec_km_short: + llgc $len,`2*$SIZE_T-1`($sp) + nill $len,0x0f # $len%=16 + lrvg $s0,$tweak+0($sp) # load the tweak + lrvg $s1,$tweak+8($sp) + lrvgr $s2,$s0 # make copy in reverse byte order + lrvgr $s3,$s1 + +.Lxts_dec_km_2ndtweak: + lghi $i1,0x87 + srag $i2,$s1,63 # broadcast upper bit + ngr $i1,$i2 # rem + algr $s0,$s0 + alcgr $s1,$s1 + xgr $s0,$i1 + lrvgr $i1,$s0 # flip byte order + lrvgr $i2,$s1 + + xg $i1,0($inp) + xg $i2,8($inp) + stg $i1,0($out,$inp) + stg $i2,8($out,$inp) + la $i2,0($out,$inp) + lghi $i3,16 + .long 0xb92e0066 # km $i2,$i2 + brc 1,.-4 # can this happen? + lrvgr $i1,$s0 + lrvgr $i2,$s1 + xg $i1,0($out,$inp) + xg $i2,8($out,$inp) + stg $i1,0($out,$inp) + stg $i2,8($out,$inp) + + la $i3,0($out,$inp) # put aside real $out +.Lxts_dec_km_steal: + llgc $i1,16($inp) + llgc $i2,0($out,$inp) + stc $i1,0($out,$inp) + stc $i2,16($out,$inp) + la $inp,1($inp) + brct $len,.Lxts_dec_km_steal + + lgr $s0,$s2 + lgr $s1,$s3 + xg $s0,0($i3) + xg $s1,8($i3) + stg $s0,0($i3) + stg $s1,8($i3) + la $s0,0($i3) + lghi $s1,16 + .long 0xb92e0088 # km $s0,$s0 + brc 1,.-4 # can this happen? + xg $s2,0($i3) + xg $s3,8($i3) + stg $s2,0($i3) + stg $s3,8($i3) +.Lxts_dec_km_done: + stg $sp,$tweak+0($sp) # wipe tweak + stg $sp,$tweak+8($sp) + l${g} $ra,5*$SIZE_T($sp) + lm${g} %r6,$s3,6*$SIZE_T($sp) + br $ra +.align 16 +.Lxts_dec_software: +___ +$code.=<<___; + stm${g} %r6,$ra,6*$SIZE_T($sp) + + srlg $len,$len,4 + slgr $out,$inp + + l${g} $s3,$stdframe($sp) # ivp + llgf $s0,0($s3) # load iv + llgf $s1,4($s3) + llgf $s2,8($s3) + llgf $s3,12($s3) + stm${g} %r2,%r5,2*$SIZE_T($sp) + la $key,0($key2) + larl $tbl,AES_Te + bras $ra,_s390x_AES_encrypt # generate the tweak + lm${g} %r2,%r5,2*$SIZE_T($sp) + larl $tbl,AES_Td + lt${g}r $len,$len + stm $s0,$s3,$tweak($sp) # save the tweak + jz .Lxts_dec_short + j .Lxts_dec_enter + +.align 16 +.Lxts_dec_loop: + lrvg $s1,$tweak+0($sp) # load the tweak in little-endian + lrvg $s3,$tweak+8($sp) + lghi %r1,0x87 + srag %r0,$s3,63 # broadcast upper bit + ngr %r1,%r0 # rem + algr $s1,$s1 + alcgr $s3,$s3 + xgr $s1,%r1 + lrvgr $s1,$s1 # flip byte order + lrvgr $s3,$s3 + srlg $s0,$s1,32 # smash the tweak to 4x32-bits + stg $s1,$tweak+0($sp) # save the tweak + llgfr $s1,$s1 + srlg $s2,$s3,32 + stg $s3,$tweak+8($sp) + llgfr $s3,$s3 +.Lxts_dec_enter: + x $s0,0($inp) # tweak^=*(inp) + x $s1,4($inp) + x $s2,8($inp) + x $s3,12($inp) + stm${g} %r2,%r3,2*$SIZE_T($sp) # only two registers are changing + la $key,0($key1) + bras $ra,_s390x_AES_decrypt + lm${g} %r2,%r5,2*$SIZE_T($sp) + x $s0,$tweak+0($sp) # ^=tweak + x $s1,$tweak+4($sp) + x $s2,$tweak+8($sp) + x $s3,$tweak+12($sp) + st $s0,0($out,$inp) + st $s1,4($out,$inp) + st $s2,8($out,$inp) + st $s3,12($out,$inp) + la $inp,16($inp) + brct${g} $len,.Lxts_dec_loop + + llgc $len,`2*$SIZE_T-1`($sp) + nill $len,0x0f # $len%16 + jz .Lxts_dec_done + + # generate pair of tweaks... + lrvg $s1,$tweak+0($sp) # load the tweak in little-endian + lrvg $s3,$tweak+8($sp) + lghi %r1,0x87 + srag %r0,$s3,63 # broadcast upper bit + ngr %r1,%r0 # rem + algr $s1,$s1 + alcgr $s3,$s3 + xgr $s1,%r1 + lrvgr $i2,$s1 # flip byte order + lrvgr $i3,$s3 + stmg $i2,$i3,$tweak($sp) # save the 1st tweak + j .Lxts_dec_2ndtweak + +.align 16 +.Lxts_dec_short: + llgc $len,`2*$SIZE_T-1`($sp) + nill $len,0x0f # $len%16 + lrvg $s1,$tweak+0($sp) # load the tweak in little-endian + lrvg $s3,$tweak+8($sp) +.Lxts_dec_2ndtweak: + lghi %r1,0x87 + srag %r0,$s3,63 # broadcast upper bit + ngr %r1,%r0 # rem + algr $s1,$s1 + alcgr $s3,$s3 + xgr $s1,%r1 + lrvgr $s1,$s1 # flip byte order + lrvgr $s3,$s3 + srlg $s0,$s1,32 # smash the tweak to 4x32-bits + stg $s1,$tweak-16+0($sp) # save the 2nd tweak + llgfr $s1,$s1 + srlg $s2,$s3,32 + stg $s3,$tweak-16+8($sp) + llgfr $s3,$s3 + + x $s0,0($inp) # tweak_the_2nd^=*(inp) + x $s1,4($inp) + x $s2,8($inp) + x $s3,12($inp) + stm${g} %r2,%r3,2*$SIZE_T($sp) + la $key,0($key1) + bras $ra,_s390x_AES_decrypt + lm${g} %r2,%r5,2*$SIZE_T($sp) + x $s0,$tweak-16+0($sp) # ^=tweak_the_2nd + x $s1,$tweak-16+4($sp) + x $s2,$tweak-16+8($sp) + x $s3,$tweak-16+12($sp) + st $s0,0($out,$inp) + st $s1,4($out,$inp) + st $s2,8($out,$inp) + st $s3,12($out,$inp) + + la $i3,0($out,$inp) # put aside real $out +.Lxts_dec_steal: + llgc %r0,16($inp) + llgc %r1,0($out,$inp) + stc %r0,0($out,$inp) + stc %r1,16($out,$inp) + la $inp,1($inp) + brct $len,.Lxts_dec_steal + la $out,0($i3) # restore real $out + + lm $s0,$s3,$tweak($sp) # load the 1st tweak + x $s0,0($out) # tweak^=*(inp)|stolen cipher-text + x $s1,4($out) + x $s2,8($out) + x $s3,12($out) + st${g} $out,4*$SIZE_T($sp) + la $key,0($key1) + bras $ra,_s390x_AES_decrypt + l${g} $out,4*$SIZE_T($sp) + x $s0,$tweak+0($sp) # ^=tweak + x $s1,$tweak+4($sp) + x $s2,$tweak+8($sp) + x $s3,$tweak+12($sp) + st $s0,0($out) + st $s1,4($out) + st $s2,8($out) + st $s3,12($out) + stg $sp,$tweak-16+0($sp) # wipe 2nd tweak + stg $sp,$tweak-16+8($sp) +.Lxts_dec_done: + stg $sp,$tweak+0($sp) # wipe tweak + stg $sp,$twesk+8($sp) + lm${g} %r6,$ra,6*$SIZE_T($sp) + br $ra +.size AES_xts_decrypt,.-AES_xts_decrypt +___ +} +$code.=<<___; .string "AES for s390x, CRYPTOGAMS by " ___ $code =~ s/\`([^\`]*)\`/eval $1/gem; print $code; +close STDOUT; # force flush