.type _x86_64_AES_encrypt_compact,\@abi-omnipotent
.align 16
_x86_64_AES_encrypt_compact:
+.cfi_startproc
lea 128($sbox),$inp # size optimization
mov 0-128($inp),$acc1 # prefetch Te4
mov 32-128($inp),$acc2
xor 8($key),$s2
xor 12($key),$s3
.byte 0xf3,0xc3 # rep ret
+.cfi_endproc
.size _x86_64_AES_encrypt_compact,.-_x86_64_AES_encrypt_compact
___
.type _x86_64_AES_decrypt_compact,\@abi-omnipotent
.align 16
_x86_64_AES_decrypt_compact:
+.cfi_startproc
lea 128($sbox),$inp # size optimization
mov 0-128($inp),$acc1 # prefetch Td4
mov 32-128($inp),$acc2
xor 8($key),$s2
xor 12($key),$s3
.byte 0xf3,0xc3 # rep ret
+.cfi_endproc
.size _x86_64_AES_decrypt_compact,.-_x86_64_AES_decrypt_compact
___
.type _x86_64_AES_set_encrypt_key,\@abi-omnipotent
.align 16
_x86_64_AES_set_encrypt_key:
+.cfi_startproc
mov %esi,%ecx # %ecx=bits
mov %rdi,%rsi # %rsi=userKey
mov %rdx,%rdi # %rdi=key
mov \$-1,%rax
.Lexit:
.byte 0xf3,0xc3 # rep ret
+.cfi_endproc
.size _x86_64_AES_set_encrypt_key,.-_x86_64_AES_set_encrypt_key
___
cmp \$0,%rdx # check length
je .Lcbc_epilogue
pushfq
-.cfi_push 49 # %rflags
+# This could be .cfi_push 49, but libunwind fails on registers it does not
+# recognize. See https://bugzilla.redhat.com/show_bug.cgi?id=217087.
+.cfi_adjust_cfa_offset 8
push %rbx
.cfi_push %rbx
push %rbp
cmp \$0,%r9
cmoveq %r10,$sbox
+.cfi_remember_state
mov OPENSSL_ia32cap_P(%rip),%r10d
cmp \$$speed_limit,%rdx
jb .Lcbc_slow_prologue
#--------------------------- SLOW ROUTINE ---------------------------#
.align 16
.Lcbc_slow_prologue:
+.cfi_restore_state
# allocate aligned stack frame...
lea -88(%rsp),%rbp
and \$-64,%rbp
sub %r10,%rbp
xchg %rsp,%rbp
+.cfi_def_cfa_register %rbp
#add \$8,%rsp # reserve for return address!
mov %rbp,$_rsp # save %rsp
+.cfi_cfa_expression $_rsp,deref,+64
.Lcbc_slow_body:
#mov %rdi,$_inp # save copy of inp
#mov %rsi,$_out # save copy of out
.cfi_def_cfa %rsp,16
.Lcbc_popfq:
popfq
-.cfi_pop 49 # %rflags
+# This could be .cfi_pop 49, but libunwind fails on registers it does not
+# recognize. See https://bugzilla.redhat.com/show_bug.cgi?id=217087.
+.cfi_adjust_cfa_offset -8
.Lcbc_epilogue:
ret
.cfi_endproc
.type ${PREFIX}_encrypt,\@abi-omnipotent
.align 16
${PREFIX}_encrypt:
+.cfi_startproc
movups ($inp),$inout0 # load input
mov 240($key),$rounds # key->rounds
___
movups $inout0,($out) # output
pxor $inout0,$inout0
ret
+.cfi_endproc
.size ${PREFIX}_encrypt,.-${PREFIX}_encrypt
.globl ${PREFIX}_decrypt
.type ${PREFIX}_decrypt,\@abi-omnipotent
.align 16
${PREFIX}_decrypt:
+.cfi_startproc
movups ($inp),$inout0 # load input
mov 240($key),$rounds # key->rounds
___
movups $inout0,($out) # output
pxor $inout0,$inout0
ret
+.cfi_endproc
.size ${PREFIX}_decrypt, .-${PREFIX}_decrypt
___
}
.type _aesni_${dir}rypt2,\@abi-omnipotent
.align 16
_aesni_${dir}rypt2:
+.cfi_startproc
$movkey ($key),$rndkey0
shl \$4,$rounds
$movkey 16($key),$rndkey1
aes${dir}last $rndkey0,$inout0
aes${dir}last $rndkey0,$inout1
ret
+.cfi_endproc
.size _aesni_${dir}rypt2,.-_aesni_${dir}rypt2
___
}
.type _aesni_${dir}rypt3,\@abi-omnipotent
.align 16
_aesni_${dir}rypt3:
+.cfi_startproc
$movkey ($key),$rndkey0
shl \$4,$rounds
$movkey 16($key),$rndkey1
aes${dir}last $rndkey0,$inout1
aes${dir}last $rndkey0,$inout2
ret
+.cfi_endproc
.size _aesni_${dir}rypt3,.-_aesni_${dir}rypt3
___
}
.type _aesni_${dir}rypt4,\@abi-omnipotent
.align 16
_aesni_${dir}rypt4:
+.cfi_startproc
$movkey ($key),$rndkey0
shl \$4,$rounds
$movkey 16($key),$rndkey1
aes${dir}last $rndkey0,$inout2
aes${dir}last $rndkey0,$inout3
ret
+.cfi_endproc
.size _aesni_${dir}rypt4,.-_aesni_${dir}rypt4
___
}
.type _aesni_${dir}rypt6,\@abi-omnipotent
.align 16
_aesni_${dir}rypt6:
+.cfi_startproc
$movkey ($key),$rndkey0
shl \$4,$rounds
$movkey 16($key),$rndkey1
aes${dir}last $rndkey0,$inout4
aes${dir}last $rndkey0,$inout5
ret
+.cfi_endproc
.size _aesni_${dir}rypt6,.-_aesni_${dir}rypt6
___
}
.type _aesni_${dir}rypt8,\@abi-omnipotent
.align 16
_aesni_${dir}rypt8:
+.cfi_startproc
$movkey ($key),$rndkey0
shl \$4,$rounds
$movkey 16($key),$rndkey1
aes${dir}last $rndkey0,$inout6
aes${dir}last $rndkey0,$inout7
ret
+.cfi_endproc
.size _aesni_${dir}rypt8,.-_aesni_${dir}rypt8
___
}
.type aesni_ecb_encrypt,\@function,5
.align 16
aesni_ecb_encrypt:
+.cfi_startproc
___
$code.=<<___ if ($win64);
lea -0x58(%rsp),%rsp
___
$code.=<<___;
ret
+.cfi_endproc
.size aesni_ecb_encrypt,.-aesni_ecb_encrypt
___
\f
.type _bsaes_encrypt8,\@abi-omnipotent
.align 64
_bsaes_encrypt8:
+.cfi_startproc
lea .LBS0(%rip), $const # constants table
movdqa ($key), @XMM[9] # round 0 key
pxor @XMM[8], @XMM[0]
pxor @XMM[8], @XMM[1]
ret
+.cfi_endproc
.size _bsaes_encrypt8,.-_bsaes_encrypt8
.type _bsaes_decrypt8,\@abi-omnipotent
.align 64
_bsaes_decrypt8:
+.cfi_startproc
lea .LBS0(%rip), $const # constants table
movdqa ($key), @XMM[9] # round 0 key
pxor @XMM[8], @XMM[0]
pxor @XMM[8], @XMM[1]
ret
+.cfi_endproc
.size _bsaes_decrypt8,.-_bsaes_decrypt8
___
}
.type _bsaes_key_convert,\@abi-omnipotent
.align 16
_bsaes_key_convert:
+.cfi_startproc
lea .Lmasks(%rip), $const
movdqu ($inp), %xmm7 # load round 0 key
lea 0x10($inp), $inp
movdqa 0x50($const), %xmm7 # .L63
#movdqa %xmm6, ($out) # don't save last round key
ret
+.cfi_endproc
.size _bsaes_key_convert,.-_bsaes_key_convert
___
}
.type _vpaes_encrypt_core,\@abi-omnipotent
.align 16
_vpaes_encrypt_core:
+.cfi_startproc
mov %rdx, %r9
mov \$16, %r11
mov 240(%rdx),%eax
pxor %xmm4, %xmm0 # 0 = A
pshufb %xmm1, %xmm0
ret
+.cfi_endproc
.size _vpaes_encrypt_core,.-_vpaes_encrypt_core
##
.type _vpaes_decrypt_core,\@abi-omnipotent
.align 16
_vpaes_decrypt_core:
+.cfi_startproc
mov %rdx, %r9 # load key
mov 240(%rdx),%eax
movdqa %xmm9, %xmm1
pxor %xmm4, %xmm0 # 0 = A
pshufb %xmm2, %xmm0
ret
+.cfi_endproc
.size _vpaes_decrypt_core,.-_vpaes_decrypt_core
########################################################
.type _vpaes_schedule_core,\@abi-omnipotent
.align 16
_vpaes_schedule_core:
+.cfi_startproc
# rdi = key
# rsi = size in bits
# rdx = buffer
pxor %xmm6, %xmm6
pxor %xmm7, %xmm7
ret
+.cfi_endproc
.size _vpaes_schedule_core,.-_vpaes_schedule_core
##
.type _vpaes_schedule_192_smear,\@abi-omnipotent
.align 16
_vpaes_schedule_192_smear:
+.cfi_startproc
pshufd \$0x80, %xmm6, %xmm1 # d c 0 0 -> c 0 0 0
pshufd \$0xFE, %xmm7, %xmm0 # b a _ _ -> b b b a
pxor %xmm1, %xmm6 # -> c+d c 0 0
movdqa %xmm6, %xmm0
movhlps %xmm1, %xmm6 # clobber low side with zeros
ret
+.cfi_endproc
.size _vpaes_schedule_192_smear,.-_vpaes_schedule_192_smear
##
.type _vpaes_schedule_round,\@abi-omnipotent
.align 16
_vpaes_schedule_round:
+.cfi_startproc
# extract rcon from xmm8
pxor %xmm1, %xmm1
palignr \$15, %xmm8, %xmm1
pxor %xmm7, %xmm0
movdqa %xmm0, %xmm7
ret
+.cfi_endproc
.size _vpaes_schedule_round,.-_vpaes_schedule_round
##
.type _vpaes_schedule_transform,\@abi-omnipotent
.align 16
_vpaes_schedule_transform:
+.cfi_startproc
movdqa %xmm9, %xmm1
pandn %xmm0, %xmm1
psrld \$4, %xmm1
pshufb %xmm1, %xmm0
pxor %xmm2, %xmm0
ret
+.cfi_endproc
.size _vpaes_schedule_transform,.-_vpaes_schedule_transform
##
.type _vpaes_schedule_mangle,\@abi-omnipotent
.align 16
_vpaes_schedule_mangle:
+.cfi_startproc
movdqa %xmm0, %xmm4 # save xmm0 for later
movdqa .Lk_mc_forward(%rip),%xmm5
test %rcx, %rcx
and \$0x30, %r8
movdqu %xmm3, (%rdx)
ret
+.cfi_endproc
.size _vpaes_schedule_mangle,.-_vpaes_schedule_mangle
#
.type ${PREFIX}_set_encrypt_key,\@function,3
.align 16
${PREFIX}_set_encrypt_key:
+.cfi_startproc
___
$code.=<<___ if ($win64);
lea -0xb8(%rsp),%rsp
$code.=<<___;
xor %eax,%eax
ret
+.cfi_endproc
.size ${PREFIX}_set_encrypt_key,.-${PREFIX}_set_encrypt_key
.globl ${PREFIX}_set_decrypt_key
.type ${PREFIX}_set_decrypt_key,\@function,3
.align 16
${PREFIX}_set_decrypt_key:
+.cfi_startproc
___
$code.=<<___ if ($win64);
lea -0xb8(%rsp),%rsp
$code.=<<___;
xor %eax,%eax
ret
+.cfi_endproc
.size ${PREFIX}_set_decrypt_key,.-${PREFIX}_set_decrypt_key
.globl ${PREFIX}_encrypt
.type ${PREFIX}_encrypt,\@function,3
.align 16
${PREFIX}_encrypt:
+.cfi_startproc
___
$code.=<<___ if ($win64);
lea -0xb8(%rsp),%rsp
___
$code.=<<___;
ret
+.cfi_endproc
.size ${PREFIX}_encrypt,.-${PREFIX}_encrypt
.globl ${PREFIX}_decrypt
.type ${PREFIX}_decrypt,\@function,3
.align 16
${PREFIX}_decrypt:
+.cfi_startproc
___
$code.=<<___ if ($win64);
lea -0xb8(%rsp),%rsp
___
$code.=<<___;
ret
+.cfi_endproc
.size ${PREFIX}_decrypt,.-${PREFIX}_decrypt
___
{
.type ${PREFIX}_cbc_encrypt,\@function,6
.align 16
${PREFIX}_cbc_encrypt:
+.cfi_startproc
xchg $key,$len
___
($len,$key)=($key,$len);
$code.=<<___;
.Lcbc_abort:
ret
+.cfi_endproc
.size ${PREFIX}_cbc_encrypt,.-${PREFIX}_cbc_encrypt
___
}
.type _vpaes_preheat,\@abi-omnipotent
.align 16
_vpaes_preheat:
+.cfi_startproc
lea .Lk_s0F(%rip), %r10
movdqa -0x20(%r10), %xmm10 # .Lk_inv
movdqa -0x10(%r10), %xmm11 # .Lk_inv+16
movdqa 0x50(%r10), %xmm15 # .Lk_sb2
movdqa 0x60(%r10), %xmm14 # .Lk_sb2+16
ret
+.cfi_endproc
.size _vpaes_preheat,.-_vpaes_preheat
########################################################
## ##
.type rsaz_1024_red2norm_avx2,\@abi-omnipotent
.align 32
rsaz_1024_red2norm_avx2:
+.cfi_startproc
sub \$-128,$inp # size optimization
xor %rax,%rax
___
}
$code.=<<___;
ret
+.cfi_endproc
.size rsaz_1024_red2norm_avx2,.-rsaz_1024_red2norm_avx2
.globl rsaz_1024_norm2red_avx2
.type rsaz_1024_norm2red_avx2,\@abi-omnipotent
.align 32
rsaz_1024_norm2red_avx2:
+.cfi_startproc
sub \$-128,$out # size optimization
mov ($inp),@T[0]
mov \$0x1fffffff,%eax
mov @T[0],`8*($j+2)-128`($out)
mov @T[0],`8*($j+3)-128`($out)
ret
+.cfi_endproc
.size rsaz_1024_norm2red_avx2,.-rsaz_1024_norm2red_avx2
___
}
.type rsaz_1024_scatter5_avx2,\@abi-omnipotent
.align 32
rsaz_1024_scatter5_avx2:
+.cfi_startproc
vzeroupper
vmovdqu .Lscatter_permd(%rip),%ymm5
shl \$4,$power
vzeroupper
ret
+.cfi_endproc
.size rsaz_1024_scatter5_avx2,.-rsaz_1024_scatter5_avx2
.globl rsaz_1024_gather5_avx2
.align 32
bn_sqrx8x_internal:
__bn_sqrx8x_internal:
+.cfi_startproc
##################################################################
# Squaring part:
#
cmp 8+8(%rsp),%r8 # end of t[]?
jb .Lsqrx8x_reduction_loop
ret
+.cfi_endproc
.size bn_sqrx8x_internal,.-bn_sqrx8x_internal
___
}\f
.type __ecp_nistz256_mul_montq,\@abi-omnipotent
.align 32
__ecp_nistz256_mul_montq:
+.cfi_startproc
########################################################################
# Multiply a by b[0]
mov %rax, $t1
mov $acc1, 8*3($r_ptr)
ret
+.cfi_endproc
.size __ecp_nistz256_mul_montq,.-__ecp_nistz256_mul_montq
################################################################################
.type __ecp_nistz256_sqr_montq,\@abi-omnipotent
.align 32
__ecp_nistz256_sqr_montq:
+.cfi_startproc
mov %rax, $acc5
mulq $acc6 # a[1]*a[0]
mov %rax, $acc1
mov $acc7, 8*3($r_ptr)
ret
+.cfi_endproc
.size __ecp_nistz256_sqr_montq,.-__ecp_nistz256_sqr_montq
___
.type __ecp_nistz256_mul_montx,\@abi-omnipotent
.align 32
__ecp_nistz256_mul_montx:
+.cfi_startproc
########################################################################
# Multiply by b[0]
mulx $acc1, $acc0, $acc1
mov $acc1, 8*3($r_ptr)
ret
+.cfi_endproc
.size __ecp_nistz256_mul_montx,.-__ecp_nistz256_mul_montx
.type __ecp_nistz256_sqr_montx,\@abi-omnipotent
.align 32
__ecp_nistz256_sqr_montx:
+.cfi_startproc
mulx $acc6, $acc1, $acc2 # a[0]*a[1]
mulx $acc7, $t0, $acc3 # a[0]*a[2]
xor %eax, %eax
mov $acc7, 8*3($r_ptr)
ret
+.cfi_endproc
.size __ecp_nistz256_sqr_montx,.-__ecp_nistz256_sqr_montx
___
}
.type ecp_nistz256_gather_w5,\@abi-omnipotent
.align 32
ecp_nistz256_gather_w5:
+.cfi_startproc
___
$code.=<<___ if ($avx>1);
mov OPENSSL_ia32cap_P+8(%rip), %eax
___
$code.=<<___;
ret
+.cfi_endproc
.LSEH_end_ecp_nistz256_gather_w5:
.size ecp_nistz256_gather_w5,.-ecp_nistz256_gather_w5
.type ecp_nistz256_gather_w7,\@abi-omnipotent
.align 32
ecp_nistz256_gather_w7:
+.cfi_startproc
___
$code.=<<___ if ($avx>1);
mov OPENSSL_ia32cap_P+8(%rip), %eax
___
$code.=<<___;
ret
+.cfi_endproc
.LSEH_end_ecp_nistz256_gather_w7:
.size ecp_nistz256_gather_w7,.-ecp_nistz256_gather_w7
___
.type ecp_nistz256_avx2_gather_w5,\@abi-omnipotent
.align 32
ecp_nistz256_avx2_gather_w5:
+.cfi_startproc
.Lavx2_gather_w5:
vzeroupper
___
___
$code.=<<___;
ret
+.cfi_endproc
.LSEH_end_ecp_nistz256_avx2_gather_w5:
.size ecp_nistz256_avx2_gather_w5,.-ecp_nistz256_avx2_gather_w5
___
.type ecp_nistz256_avx2_gather_w7,\@abi-omnipotent
.align 32
ecp_nistz256_avx2_gather_w7:
+.cfi_startproc
.Lavx2_gather_w7:
vzeroupper
___
___
$code.=<<___;
ret
+.cfi_endproc
.LSEH_end_ecp_nistz256_avx2_gather_w7:
.size ecp_nistz256_avx2_gather_w7,.-ecp_nistz256_avx2_gather_w7
___
.type __ecp_nistz256_add_toq,\@abi-omnipotent
.align 32
__ecp_nistz256_add_toq:
+.cfi_startproc
xor $t4,$t4
add 8*0($b_ptr), $a0
adc 8*1($b_ptr), $a1
mov $a3, 8*3($r_ptr)
ret
+.cfi_endproc
.size __ecp_nistz256_add_toq,.-__ecp_nistz256_add_toq
.type __ecp_nistz256_sub_fromq,\@abi-omnipotent
.align 32
__ecp_nistz256_sub_fromq:
+.cfi_startproc
sub 8*0($b_ptr), $a0
sbb 8*1($b_ptr), $a1
mov $a0, $t0
mov $a3, 8*3($r_ptr)
ret
+.cfi_endproc
.size __ecp_nistz256_sub_fromq,.-__ecp_nistz256_sub_fromq
.type __ecp_nistz256_subq,\@abi-omnipotent
.align 32
__ecp_nistz256_subq:
+.cfi_startproc
sub $a0, $t0
sbb $a1, $t1
mov $t0, $a0
cmovnz $t3, $a3
ret
+.cfi_endproc
.size __ecp_nistz256_subq,.-__ecp_nistz256_subq
.type __ecp_nistz256_mul_by_2q,\@abi-omnipotent
.align 32
__ecp_nistz256_mul_by_2q:
+.cfi_startproc
xor $t4, $t4
add $a0, $a0 # a0:a3+a0:a3
adc $a1, $a1
mov $a3, 8*3($r_ptr)
ret
+.cfi_endproc
.size __ecp_nistz256_mul_by_2q,.-__ecp_nistz256_mul_by_2q
___
}
movq %xmm1, $a_ptr # restore $a_ptr
movq %xmm0, $r_ptr # restore $r_ptr
add \$`32*(18-5)`, %rsp # difference in frame sizes
+.cfi_adjust_cfa_offset `-32*(18-5)`
jmp .Lpoint_double_shortcut$x
+.cfi_adjust_cfa_offset `32*(18-5)`
.align 32
.Ladd_proceed$x:
.type __ecp_nistz256_add_tox,\@abi-omnipotent
.align 32
__ecp_nistz256_add_tox:
+.cfi_startproc
xor $t4, $t4
adc 8*0($b_ptr), $a0
adc 8*1($b_ptr), $a1
mov $a3, 8*3($r_ptr)
ret
+.cfi_endproc
.size __ecp_nistz256_add_tox,.-__ecp_nistz256_add_tox
.type __ecp_nistz256_sub_fromx,\@abi-omnipotent
.align 32
__ecp_nistz256_sub_fromx:
+.cfi_startproc
xor $t4, $t4
sbb 8*0($b_ptr), $a0
sbb 8*1($b_ptr), $a1
mov $a3, 8*3($r_ptr)
ret
+.cfi_endproc
.size __ecp_nistz256_sub_fromx,.-__ecp_nistz256_sub_fromx
.type __ecp_nistz256_subx,\@abi-omnipotent
.align 32
__ecp_nistz256_subx:
+.cfi_startproc
xor $t4, $t4
sbb $a0, $t0
sbb $a1, $t1
cmovc $t3, $a3
ret
+.cfi_endproc
.size __ecp_nistz256_subx,.-__ecp_nistz256_subx
.type __ecp_nistz256_mul_by_2x,\@abi-omnipotent
.align 32
__ecp_nistz256_mul_by_2x:
+.cfi_startproc
xor $t4, $t4
adc $a0, $a0 # a0:a3+a0:a3
adc $a1, $a1
mov $a3, 8*3($r_ptr)
ret
+.cfi_endproc
.size __ecp_nistz256_mul_by_2x,.-__ecp_nistz256_mul_by_2x
___
}
.type gcm_init_clmul,\@abi-omnipotent
.align 16
gcm_init_clmul:
+.cfi_startproc
.L_init_clmul:
___
$code.=<<___ if ($win64);
___
$code.=<<___;
ret
+.cfi_endproc
.size gcm_init_clmul,.-gcm_init_clmul
___
}
.type gcm_gmult_clmul,\@abi-omnipotent
.align 16
gcm_gmult_clmul:
+.cfi_startproc
.L_gmult_clmul:
movdqu ($Xip),$Xi
movdqa .Lbswap_mask(%rip),$T3
pshufb $T3,$Xi
movdqu $Xi,($Xip)
ret
+.cfi_endproc
.size gcm_gmult_clmul,.-gcm_gmult_clmul
___
}
.type gcm_ghash_clmul,\@abi-omnipotent
.align 32
gcm_ghash_clmul:
+.cfi_startproc
.L_ghash_clmul:
___
$code.=<<___ if ($win64);
___
$code.=<<___;
ret
+.cfi_endproc
.size gcm_ghash_clmul,.-gcm_ghash_clmul
___
}
.type gcm_init_avx,\@abi-omnipotent
.align 32
gcm_init_avx:
+.cfi_startproc
___
if ($avx) {
my ($Htbl,$Xip)=@_4args;
___
$code.=<<___;
ret
+.cfi_endproc
.size gcm_init_avx,.-gcm_init_avx
___
} else {
.type gcm_gmult_avx,\@abi-omnipotent
.align 32
gcm_gmult_avx:
+.cfi_startproc
jmp .L_gmult_clmul
+.cfi_endproc
.size gcm_gmult_avx,.-gcm_gmult_avx
___
\f
.type gcm_ghash_avx,\@abi-omnipotent
.align 32
gcm_ghash_avx:
+.cfi_startproc
___
if ($avx) {
my ($Xip,$Htbl,$inp,$len)=@_4args;
___
$code.=<<___;
ret
+.cfi_endproc
.size gcm_ghash_avx,.-gcm_ghash_avx
___
} else {
);
my ($cfa_reg, $cfa_rsp);
+ my @cfa_stack;
# [us]leb128 format is variable-length integer representation base
# 2^128, with most significant bit of each byte being 0 denoting
cfa_expression($$line)));
last;
};
+ /remember_state/
+ && do { push @cfa_stack, [$cfa_reg, $cfa_rsp];
+ last;
+ };
+ /restore_state/
+ && do { ($cfa_reg, $cfa_rsp) = @{pop @cfa_stack};
+ last;
+ };
}
$self->{value} = ".cfi_$dir\t$$line" if ($dir);