#include #include #include #include "atomic.h" static inline uint64_t asuint64(double x) { union {double f; uint64_t i;} u = {x}; return u.i; } static inline double asdouble(uint64_t x) { union {uint64_t i; double f;} u = {x}; return u.f; } struct num { uint64_t m; int e; int sign; }; static struct num normalize(uint64_t x) { int e = x>>52; int sign = e & 1<<11; e &= (1<<11)-1; x &= (1ull<<52)-1; if (!e) { int k = a_clz_64(x); x <<= k-11; e = -k+12; } x |= 1ull<<52; x <<= 1; e -= 0x3ff + 52 + 1; return (struct num){x,e,sign}; } static void mul(uint64_t *hi, uint64_t *lo, uint64_t x, uint64_t y) { uint64_t t1,t2,t3; uint64_t xlo = (uint32_t)x, xhi = x>>32; uint64_t ylo = (uint32_t)y, yhi = y>>32; t1 = xlo*ylo; t2 = xlo*yhi + xhi*ylo; t3 = xhi*yhi; *lo = t1 + (t2<<32); *hi = t3 + (t2>>32) + (t1 > *lo); } static int zeroinfnan(uint64_t x) { return 2*x-1 >= 2*asuint64(INFINITY)-1; } double fma(double x, double y, double z) { #pragma STDC FENV_ACCESS ON uint64_t ix = asuint64(x); uint64_t iy = asuint64(y); uint64_t iz = asuint64(z); if (zeroinfnan(ix) || zeroinfnan(iy)) return x*y + z; if (zeroinfnan(iz)) { if (z == 0) return x*y + z; return z; } /* normalize so top 10bits and last bit are 0 */ struct num nx, ny, nz; nx = normalize(ix); ny = normalize(iy); nz = normalize(iz); /* mul: r = x*y */ uint64_t rhi, rlo, zhi, zlo; mul(&rhi, &rlo, nx.m, ny.m); /* either top 20 or 21 bits of rhi and last 2 bits of rlo are 0 */ /* align exponents */ int e = nx.e + ny.e; int d = nz.e - e; /* shift bits z<<=kz, r>>=kr, so kz+kr == d, set e = e+kr (== ez-kz) */ if (d > 0) { if (d < 64) { zlo = nz.m<>64-d; } else { zlo = 0; zhi = nz.m; e = nz.e - 64; d -= 64; if (d == 0) { } else if (d < 64) { rlo = rhi<<64-d | rlo>>d | !!(rlo<<64-d); rhi = rhi>>d; } else { rlo = 1; rhi = 0; } } } else { zhi = 0; d = -d; if (d == 0) { zlo = nz.m; } else if (d < 64) { zlo = nz.m>>d | !!(nz.m<<64-d); } else { zlo = 1; } } /* add */ int sign = nx.sign^ny.sign; int samesign = !(sign^nz.sign); int nonzero = 1; if (samesign) { /* r += z */ rlo += zlo; rhi += zhi + (rlo < zlo); } else { /* r -= z */ uint64_t t = rlo; rlo -= zlo; rhi = rhi - zhi - (t < rlo); if (rhi>>63) { rlo = -rlo; rhi = -rhi-!!rlo; sign = !sign; } nonzero = !!rhi; } /* set rhi to top 63bit of the result (last bit is sticky) */ if (nonzero) { e += 64; d = a_clz_64(rhi)-1; /* note: d > 0 */ rhi = rhi<>64-d | !!(rlo<>1 | (rlo&1); else rhi = rlo<>1 | (rhi&1) | 1ull<<62; if (sign) i = -i; r = i; r = 2*r - c; /* remove top bit */ volatile double uflow = DBL_MIN/FLT_MIN; uflow *= uflow; } } else { /* only round once when scaled */ d = 10; i = ( rhi>>d | !!(rhi<<64-d) ) << d; if (sign) i = -i; r = i; } } return scalbn(r, e); }