crypto/x86_64cpuid.pl: move extended feature detection upwards.
[oweals/openssl.git] / crypto / x86_64cpuid.pl
1 #!/usr/bin/env perl
2
3 $flavour = shift;
4 $output  = shift;
5 if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
6
7 $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
8
9 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
10 ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
11 ( $xlate="${dir}perlasm/x86_64-xlate.pl" and -f $xlate) or
12 die "can't locate x86_64-xlate.pl";
13
14 open OUT,"| \"$^X\" $xlate $flavour $output";
15 *STDOUT=*OUT;
16
17 ($arg1,$arg2,$arg3,$arg4)=$win64?("%rcx","%rdx","%r8", "%r9") : # Win64 order
18                                  ("%rdi","%rsi","%rdx","%rcx"); # Unix order
19
20 print<<___;
21 .extern         OPENSSL_cpuid_setup
22 .hidden         OPENSSL_cpuid_setup
23 .section        .init
24         call    OPENSSL_cpuid_setup
25
26 .hidden OPENSSL_ia32cap_P
27 .comm   OPENSSL_ia32cap_P,16,4
28
29 .text
30
31 .globl  OPENSSL_atomic_add
32 .type   OPENSSL_atomic_add,\@abi-omnipotent
33 .align  16
34 OPENSSL_atomic_add:
35         movl    ($arg1),%eax
36 .Lspin: leaq    ($arg2,%rax),%r8
37         .byte   0xf0            # lock
38         cmpxchgl        %r8d,($arg1)
39         jne     .Lspin
40         movl    %r8d,%eax
41         .byte   0x48,0x98       # cltq/cdqe
42         ret
43 .size   OPENSSL_atomic_add,.-OPENSSL_atomic_add
44
45 .globl  OPENSSL_rdtsc
46 .type   OPENSSL_rdtsc,\@abi-omnipotent
47 .align  16
48 OPENSSL_rdtsc:
49         rdtsc
50         shl     \$32,%rdx
51         or      %rdx,%rax
52         ret
53 .size   OPENSSL_rdtsc,.-OPENSSL_rdtsc
54
55 .globl  OPENSSL_ia32_cpuid
56 .type   OPENSSL_ia32_cpuid,\@function,1
57 .align  16
58 OPENSSL_ia32_cpuid:
59         mov     %rbx,%r8                # save %rbx
60
61         xor     %eax,%eax
62         mov     %eax,8(%rdi)            # clear 3rd word
63         cpuid
64         mov     %eax,%r11d              # max value for standard query level
65
66         cmp     \$7,%eax
67         jb      .Lno_extended_info
68
69         mov     \$7,%eax
70         xor     %ecx,%ecx
71         cpuid
72         mov     %ebx,8(%rdi)
73
74 .Lno_extended_info:
75
76         xor     %eax,%eax
77         cmp     \$0x756e6547,%ebx       # "Genu"
78         setne   %al
79         mov     %eax,%r9d
80         cmp     \$0x49656e69,%edx       # "ineI"
81         setne   %al
82         or      %eax,%r9d
83         cmp     \$0x6c65746e,%ecx       # "ntel"
84         setne   %al
85         or      %eax,%r9d               # 0 indicates Intel CPU
86         jz      .Lintel
87
88         cmp     \$0x68747541,%ebx       # "Auth"
89         setne   %al
90         mov     %eax,%r10d
91         cmp     \$0x69746E65,%edx       # "enti"
92         setne   %al
93         or      %eax,%r10d
94         cmp     \$0x444D4163,%ecx       # "cAMD"
95         setne   %al
96         or      %eax,%r10d              # 0 indicates AMD CPU
97         jnz     .Lintel
98
99         # AMD specific
100         mov     \$0x80000000,%eax
101         cpuid
102         cmp     \$0x80000001,%eax
103         jb      .Lintel
104         mov     %eax,%r10d
105         mov     \$0x80000001,%eax
106         cpuid
107         or      %ecx,%r9d
108         and     \$0x00000801,%r9d       # isolate AMD XOP bit, 1<<11
109
110         cmp     \$0x80000008,%r10d
111         jb      .Lintel
112
113         mov     \$0x80000008,%eax
114         cpuid
115         movzb   %cl,%r10                # number of cores - 1
116         inc     %r10                    # number of cores
117
118         mov     \$1,%eax
119         cpuid
120         bt      \$28,%edx               # test hyper-threading bit
121         jnc     .Lgeneric
122         shr     \$16,%ebx               # number of logical processors
123         cmp     %r10b,%bl
124         ja      .Lgeneric
125         and     \$0xefffffff,%edx       # ~(1<<28)
126         jmp     .Lgeneric
127
128 .Lintel:
129         cmp     \$4,%r11d
130         mov     \$-1,%r10d
131         jb      .Lnocacheinfo
132
133         mov     \$4,%eax
134         mov     \$0,%ecx                # query L1D
135         cpuid
136         mov     %eax,%r10d
137         shr     \$14,%r10d
138         and     \$0xfff,%r10d           # number of cores -1 per L1D
139
140 .Lnocacheinfo:
141         mov     \$1,%eax
142         cpuid
143         and     \$0xbfefffff,%edx       # force reserved bits to 0
144         cmp     \$0,%r9d
145         jne     .Lnotintel
146         or      \$0x40000000,%edx       # set reserved bit#30 on Intel CPUs
147         and     \$15,%ah
148         cmp     \$15,%ah                # examine Family ID
149         jne     .Lnotintel
150         or      \$0x00100000,%edx       # set reserved bit#20 to engage RC4_CHAR
151 .Lnotintel:
152         bt      \$28,%edx               # test hyper-threading bit
153         jnc     .Lgeneric
154         and     \$0xefffffff,%edx       # ~(1<<28)
155         cmp     \$0,%r10d
156         je      .Lgeneric
157
158         or      \$0x10000000,%edx       # 1<<28
159         shr     \$16,%ebx
160         cmp     \$1,%bl                 # see if cache is shared
161         ja      .Lgeneric
162         and     \$0xefffffff,%edx       # ~(1<<28)
163 .Lgeneric:
164         and     \$0x00000800,%r9d       # isolate AMD XOP flag
165         and     \$0xfffff7ff,%ecx
166         or      %ecx,%r9d               # merge AMD XOP flag
167
168         mov     %edx,%r10d              # %r9d:%r10d is copy of %ecx:%edx
169         bt      \$27,%r9d               # check OSXSAVE bit
170         jnc     .Lclear_avx
171         xor     %ecx,%ecx               # XCR0
172         .byte   0x0f,0x01,0xd0          # xgetbv
173         and     \$6,%eax                # isolate XMM and YMM state support
174         cmp     \$6,%eax
175         je      .Ldone
176 .Lclear_avx:
177         mov     \$0xefffe7ff,%eax       # ~(1<<28|1<<12|1<<11)
178         and     %eax,%r9d               # clear AVX, FMA and AMD XOP bits
179         andl    \$0xffffffdf,8(%rdi)    # cleax AVX2, ~(1<<5)
180 .Ldone:
181         shl     \$32,%r9
182         mov     %r10d,%eax
183         mov     %r8,%rbx                # restore %rbx
184         or      %r9,%rax
185         ret
186 .size   OPENSSL_ia32_cpuid,.-OPENSSL_ia32_cpuid
187
188 .globl  OPENSSL_cleanse
189 .type   OPENSSL_cleanse,\@abi-omnipotent
190 .align  16
191 OPENSSL_cleanse:
192         xor     %rax,%rax
193         cmp     \$15,$arg2
194         jae     .Lot
195         cmp     \$0,$arg2
196         je      .Lret
197 .Little:
198         mov     %al,($arg1)
199         sub     \$1,$arg2
200         lea     1($arg1),$arg1
201         jnz     .Little
202 .Lret:
203         ret
204 .align  16
205 .Lot:
206         test    \$7,$arg1
207         jz      .Laligned
208         mov     %al,($arg1)
209         lea     -1($arg2),$arg2
210         lea     1($arg1),$arg1
211         jmp     .Lot
212 .Laligned:
213         mov     %rax,($arg1)
214         lea     -8($arg2),$arg2
215         test    \$-8,$arg2
216         lea     8($arg1),$arg1
217         jnz     .Laligned
218         cmp     \$0,$arg2
219         jne     .Little
220         ret
221 .size   OPENSSL_cleanse,.-OPENSSL_cleanse
222 ___
223
224 print<<___ if (!$win64);
225 .globl  OPENSSL_wipe_cpu
226 .type   OPENSSL_wipe_cpu,\@abi-omnipotent
227 .align  16
228 OPENSSL_wipe_cpu:
229         pxor    %xmm0,%xmm0
230         pxor    %xmm1,%xmm1
231         pxor    %xmm2,%xmm2
232         pxor    %xmm3,%xmm3
233         pxor    %xmm4,%xmm4
234         pxor    %xmm5,%xmm5
235         pxor    %xmm6,%xmm6
236         pxor    %xmm7,%xmm7
237         pxor    %xmm8,%xmm8
238         pxor    %xmm9,%xmm9
239         pxor    %xmm10,%xmm10
240         pxor    %xmm11,%xmm11
241         pxor    %xmm12,%xmm12
242         pxor    %xmm13,%xmm13
243         pxor    %xmm14,%xmm14
244         pxor    %xmm15,%xmm15
245         xorq    %rcx,%rcx
246         xorq    %rdx,%rdx
247         xorq    %rsi,%rsi
248         xorq    %rdi,%rdi
249         xorq    %r8,%r8
250         xorq    %r9,%r9
251         xorq    %r10,%r10
252         xorq    %r11,%r11
253         leaq    8(%rsp),%rax
254         ret
255 .size   OPENSSL_wipe_cpu,.-OPENSSL_wipe_cpu
256 ___
257 print<<___ if ($win64);
258 .globl  OPENSSL_wipe_cpu
259 .type   OPENSSL_wipe_cpu,\@abi-omnipotent
260 .align  16
261 OPENSSL_wipe_cpu:
262         pxor    %xmm0,%xmm0
263         pxor    %xmm1,%xmm1
264         pxor    %xmm2,%xmm2
265         pxor    %xmm3,%xmm3
266         pxor    %xmm4,%xmm4
267         pxor    %xmm5,%xmm5
268         xorq    %rcx,%rcx
269         xorq    %rdx,%rdx
270         xorq    %r8,%r8
271         xorq    %r9,%r9
272         xorq    %r10,%r10
273         xorq    %r11,%r11
274         leaq    8(%rsp),%rax
275         ret
276 .size   OPENSSL_wipe_cpu,.-OPENSSL_wipe_cpu
277 ___
278
279 print<<___;
280 .globl  OPENSSL_ia32_rdrand
281 .type   OPENSSL_ia32_rdrand,\@abi-omnipotent
282 .align  16
283 OPENSSL_ia32_rdrand:
284         mov     \$8,%ecx
285 .Loop_rdrand:
286         rdrand  %rax
287         jc      .Lbreak_rdrand
288         loop    .Loop_rdrand
289 .Lbreak_rdrand:
290         cmp     \$0,%rax
291         cmove   %rcx,%rax
292         ret
293 .size   OPENSSL_ia32_rdrand,.-OPENSSL_ia32_rdrand
294
295 .globl  OPENSSL_ia32_rdseed
296 .type   OPENSSL_ia32_rdseed,\@abi-omnipotent
297 .align  16
298 OPENSSL_ia32_rdseed:
299         mov     \$8,%ecx
300 .Loop_rdseed:
301         rdseed  %rax
302         jc      .Lbreak_rdseed
303         loop    .Loop_rdseed
304 .Lbreak_rdseed:
305         cmp     \$0,%rax
306         cmove   %rcx,%rax
307         ret
308 .size   OPENSSL_ia32_rdseed,.-OPENSSL_ia32_rdseed
309 ___
310
311 close STDOUT;   # flush