2 * Copyright 2010, Google Inc.
4 * Brought in from coreboot uldivmod.S
6 * SPDX-License-Identifier: GPL-2.0
9 #include <linux/linkage.h>
10 #include <asm/assembler.h>
13 * A, Q = r0 + (r1 << 32)
14 * B, R = r2 + (r3 << 32)
36 ENTRY(__aeabi_uldivmod)
37 stmfd sp!, {r4, r5, r6, r7, THUMB(TMP,) lr}
39 orrs ip, B_0, B_1 @ Z set -> B == 0
41 @ Test if B is power of 2: (B & (B - 1)) == 0
47 @ Test if A_1 == B_1 == 0
52 /* CLZ only exists in ARM architecture version 5 and above. */
66 @ if clz B - clz A > 0
69 @ B <<= (clz B - clz A)
72 movmi B_1, B_1, lsl D_0
73 ARM( orrmi B_1, B_1, B_0, lsr ip )
74 THUMB( lsrmi TMP, B_0, ip )
75 THUMB( orrmi B_1, B_1, TMP )
76 movpl B_1, B_0, lsl D_1
78 @ C = 1 << (clz B - clz A)
79 movmi C_1, C_1, lsl D_0
80 ARM( orrmi C_1, C_1, C_0, lsr ip )
81 THUMB( lsrmi TMP, C_0, ip )
82 THUMB( orrmi C_1, C_1, TMP )
83 movpl C_1, C_0, lsl D_1
88 @ C: current bit; D: result
90 @ C: current bit; D: result
102 orr B_1, B_1, B_0, lsr #28
106 orr C_1, C_1, C_0, lsr #28
116 orr B_1, B_1, B_0, lsr #31
120 orr C_1, C_1, C_0, lsr #31
140 movs C_1, C_1, lsr #1
146 movs B_1, B_1, lsr #1
150 @ Note: A, B & Q, R are aliases
155 ldmfd sp!, {r4, r5, r6, r7, THUMB(TMP,) pc}
158 @ Note: A_0 & r0 are aliases
165 ldmfd sp!, {r4, r5, r6, r7, THUMB(TMP,) pc}
169 @ Note: A, B and Q, R are aliases
174 @ Note: B must not be 0 here!
181 mov A_0, A_1, lsr D_0
184 movpl A_0, A_0, lsr D_0
185 ARM( orrpl A_0, A_0, A_1, lsl D_1 )
186 THUMB( lslpl TMP, A_1, D_1 )
187 THUMB( orrpl A_0, A_0, TMP )
188 mov A_1, A_1, lsr D_0
192 ldmfd sp!, {r4, r5, r6, r7, THUMB(TMP,) pc}
194 @ Note: A, B and Q, R are aliases
199 @ Note: B must not be 0 here!
200 @ Count the leading zeroes in B.
203 @ If B is greater than 1 << 31, divide A and B by 1 << 32.
207 @ Count the remaining leading zeroes in B.
208 movs B_1, B_0, lsl #16
210 moveq B_0, B_0, lsr #16
213 moveq B_0, B_0, lsr #8
216 moveq B_0, B_0, lsr #4
219 moveq B_0, B_0, lsr #2
222 @ Shift A to the right by the appropriate amount.
224 mov Q_0, A_0, lsr D_0
225 ARM( orr Q_0, Q_0, A_1, lsl D_1 )
226 THUMB( lsl A_1, D_1 )
227 THUMB( orr Q_0, A_1 )
228 mov Q_1, A_1, lsr D_0
232 ldmfd sp!, {r4, r5, r6, r7, THUMB(TMP,) pc}
237 @ As wrong as it could be
242 ldmfd sp!, {r4, r5, r6, r7, THUMB(TMP,) pc}
243 ENDPROC(__aeabi_uldivmod)