4 #if LDBL_MANT_DIG==64 && LDBL_MAX_EXP==16384
15 /* exact add, assumes exponent_x >= exponent_y */
16 static void add(long double *hi, long double *lo, long double x, long double y)
26 /* exact mul, assumes no over/underflow */
27 static void mul(long double *hi, long double *lo, long double x, long double y)
29 static const long double c = 1.0 + 0x1p32L;
30 long double cx, xh, xl, cy, yh, yl;
39 *lo = (xh*yh - *hi) + xh*yl + xl*yh + xl*yl;
43 assume (long double)(hi+lo) == hi
44 return an adjusted hi so that rounding it to double (or less) precision is correct
46 static long double adjust(long double hi, long double lo)
53 if (uhi.bits.m & 0x3ff)
56 if (uhi.bits.s == ulo.bits.s)
60 /* handle underflow and take care of ld80 implicit msb */
61 if (uhi.bits.m == (uint64_t)-1/2) {
69 /* adjusted add so the result is correct when rounded to double (or less) precision */
70 static long double dadd(long double x, long double y)
76 /* adjusted mul so the result is correct when rounded to double (or less) precision */
77 static long double dmul(long double x, long double y)
83 static int getexp(long double x)
90 double fma(double x, double y, double z)
92 long double hi, lo1, lo2, xy;
95 /* handle +-inf,nan */
96 if (!isfinite(x) || !isfinite(y))
101 if (x == 0.0 || y == 0.0)
103 round = fegetround();
105 if (round == FE_TONEAREST)
110 /* exact mul and add require nearest rounding */
111 /* spurious inexact exceptions may be raised */
112 fesetround(FE_TONEAREST);
113 mul(&xy, &lo1, x, y);
117 add(&hi, &lo2, z, xy);
118 } else if (ez > exy - 12) {
119 add(&hi, &lo2, xy, z);
122 /* make sure that the sign of 0 is correct */
123 return (xy + z) + lo1;
128 the 12 extra bits (1guard, 11round+sticky) are needed so with
130 elo <= ehi - 11, and we use the last 10 bits in adjust so
132 gives correct result when rounded to double
138 if (round == FE_TONEAREST)
139 return dadd(hi, dadd(lo1, lo2));
140 return hi + (lo1 + lo2);
143 /* origin: FreeBSD /usr/src/lib/msun/src/s_fma.c */
145 * Copyright (c) 2005-2011 David Schultz <das@FreeBSD.ORG>
146 * All rights reserved.
148 * Redistribution and use in source and binary forms, with or without
149 * modification, are permitted provided that the following conditions
151 * 1. Redistributions of source code must retain the above copyright
152 * notice, this list of conditions and the following disclaimer.
153 * 2. Redistributions in binary form must reproduce the above copyright
154 * notice, this list of conditions and the following disclaimer in the
155 * documentation and/or other materials provided with the distribution.
157 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
158 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
159 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
160 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
161 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
162 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
163 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
164 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
165 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
166 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
171 * A struct dd represents a floating-point number with twice the precision
172 * of a double. We maintain the invariant that "hi" stores the 53 high-order
173 * bits of the result.
181 * Compute a+b exactly, returning the exact result in a struct dd. We assume
182 * that both a and b are finite, but make no assumptions about their relative
185 static inline struct dd dd_add(double a, double b)
192 ret.lo = (a - (ret.hi - s)) + (b - s);
197 * Compute a+b, with a small tweak: The least significant bit of the
198 * result is adjusted into a sticky bit summarizing all the bits that
199 * were lost to rounding. This adjustment negates the effects of double
200 * rounding when the result is added to another number with a higher
201 * exponent. For an explanation of round and sticky bits, see any reference
202 * on FPU design, e.g.,
204 * J. Coonen. An Implementation Guide to a Proposed Standard for
205 * Floating-Point Arithmetic. Computer, vol. 13, no. 1, Jan 1980.
207 static inline double add_adjusted(double a, double b)
210 uint64_t hibits, lobits;
214 EXTRACT_WORD64(hibits, sum.hi);
215 if ((hibits & 1) == 0) {
216 /* hibits += (int)copysign(1.0, sum.hi * sum.lo) */
217 EXTRACT_WORD64(lobits, sum.lo);
218 hibits += 1 - ((hibits ^ lobits) >> 62);
219 INSERT_WORD64(sum.hi, hibits);
226 * Compute ldexp(a+b, scale) with a single rounding error. It is assumed
227 * that the result will be subnormal, and care is taken to ensure that
228 * double rounding does not occur.
230 static inline double add_and_denormalize(double a, double b, int scale)
233 uint64_t hibits, lobits;
239 * If we are losing at least two bits of accuracy to denormalization,
240 * then the first lost bit becomes a round bit, and we adjust the
241 * lowest bit of sum.hi to make it a sticky bit summarizing all the
242 * bits in sum.lo. With the sticky bit adjusted, the hardware will
243 * break any ties in the correct direction.
245 * If we are losing only one bit to denormalization, however, we must
246 * break the ties manually.
249 EXTRACT_WORD64(hibits, sum.hi);
250 bits_lost = -((int)(hibits >> 52) & 0x7ff) - scale + 1;
251 if (bits_lost != 1 ^ (int)(hibits & 1)) {
252 /* hibits += (int)copysign(1.0, sum.hi * sum.lo) */
253 EXTRACT_WORD64(lobits, sum.lo);
254 hibits += 1 - (((hibits ^ lobits) >> 62) & 2);
255 INSERT_WORD64(sum.hi, hibits);
258 return scalbn(sum.hi, scale);
262 * Compute a*b exactly, returning the exact result in a struct dd. We assume
263 * that both a and b are normalized, so no underflow or overflow will occur.
264 * The current rounding mode must be round-to-nearest.
266 static inline struct dd dd_mul(double a, double b)
268 static const double split = 0x1p27 + 1.0;
270 double ha, hb, la, lb, p, q;
283 q = ha * lb + la * hb;
286 ret.lo = p - ret.hi + q + la * lb;
291 * Fused multiply-add: Compute x * y + z with a single rounding error.
293 * We use scaling to avoid overflow/underflow, along with the
294 * canonical precision-doubling technique adapted from:
296 * Dekker, T. A Floating-Point Technique for Extending the
297 * Available Precision. Numer. Math. 18, 224-242 (1971).
299 * This algorithm is sensitive to the rounding precision. FPUs such
300 * as the i387 must be set in double-precision mode if variables are
301 * to be stored in FP registers in order to avoid incorrect results.
302 * This is the default on FreeBSD, but not on many other systems.
304 * Hardware instructions should be used on architectures that support it,
305 * since this implementation will likely be several times slower.
307 double fma(double x, double y, double z)
309 double xs, ys, zs, adj;
316 * Handle special cases. The order of operations and the particular
317 * return values here are crucial in handling special cases involving
318 * infinities, NaNs, overflows, and signed zeroes correctly.
320 if (!isfinite(x) || !isfinite(y))
324 if (x == 0.0 || y == 0.0)
332 oround = fegetround();
333 spread = ex + ey - ez;
336 * If x * y and z are many orders of magnitude apart, the scaling
337 * will overflow, so we handle these cases specially. Rounding
338 * modes other than FE_TONEAREST are painful.
340 if (spread < -DBL_MANT_DIG) {
342 feraiseexcept(FE_INEXACT);
346 feraiseexcept(FE_UNDERFLOW);
349 default: /* FE_TONEAREST */
353 if (x > 0.0 ^ y < 0.0 ^ z < 0.0)
356 return (nextafter(z, 0));
360 if (x > 0.0 ^ y < 0.0)
363 return (nextafter(z, -INFINITY));
367 if (x > 0.0 ^ y < 0.0)
368 return (nextafter(z, INFINITY));
374 if (spread <= DBL_MANT_DIG * 2)
375 zs = scalbn(zs, -spread);
377 zs = copysign(DBL_MIN, zs);
379 fesetround(FE_TONEAREST);
382 * Basic approach for round-to-nearest:
384 * (xy.hi, xy.lo) = x * y (exact)
385 * (r.hi, r.lo) = xy.hi + z (exact)
386 * adj = xy.lo + r.lo (inexact; low bit is sticky)
387 * result = r.hi + adj (correctly rounded)
390 r = dd_add(xy.hi, zs);
396 * When the addends cancel to 0, ensure that the result has
400 volatile double vzs = zs; /* XXX gcc CSE bug workaround */
401 return xy.hi + vzs + scalbn(xy.lo, spread);
404 if (oround != FE_TONEAREST) {
406 * There is no need to worry about double rounding in directed
411 return scalbn(r.hi + adj, spread);
414 adj = add_adjusted(r.lo, xy.lo);
415 if (spread + ilogb(r.hi) > -1023)
416 return scalbn(r.hi + adj, spread);
418 return add_and_denormalize(r.hi, adj, spread);