summaryrefslogtreecommitdiff
path: root/lib/mesa/src/compiler/glsl
diff options
context:
space:
mode:
Diffstat (limited to 'lib/mesa/src/compiler/glsl')
-rw-r--r--lib/mesa/src/compiler/glsl/float64.glsl1740
-rw-r--r--lib/mesa/src/compiler/glsl/float64_glsl.h1743
-rw-r--r--lib/mesa/src/compiler/glsl/gl_nir_linker.c4
-rw-r--r--lib/mesa/src/compiler/glsl/gl_nir_lower_atomics.c4
-rw-r--r--lib/mesa/src/compiler/glsl/gl_nir_lower_samplers.c75
-rw-r--r--lib/mesa/src/compiler/glsl/gl_nir_lower_samplers_as_deref.c69
-rw-r--r--lib/mesa/src/compiler/glsl/glcpp/glcpp-parse.h5
-rw-r--r--lib/mesa/src/compiler/glsl/glcpp/meson.build1
-rw-r--r--lib/mesa/src/compiler/glsl/ir_array_refcount.cpp3
-rw-r--r--lib/mesa/src/compiler/glsl/ir_function_detect_recursion.cpp3
-rw-r--r--lib/mesa/src/compiler/glsl/ir_variable_refcount.cpp3
-rw-r--r--lib/mesa/src/compiler/glsl/link_functions.cpp6
-rw-r--r--lib/mesa/src/compiler/glsl/loop_analysis.cpp3
-rw-r--r--lib/mesa/src/compiler/glsl/meson.build10
-rw-r--r--lib/mesa/src/compiler/glsl/opt_constant_variable.cpp3
-rw-r--r--lib/mesa/src/compiler/glsl/tests/meson.build18
-rw-r--r--lib/mesa/src/compiler/glsl/tests/warnings/031-__-in-function-name.vert22
-rw-r--r--lib/mesa/src/compiler/glsl/tests/warnings/031-__-in-function-name.vert.expected3
-rw-r--r--lib/mesa/src/compiler/glsl/tests/warnings/032-__-in-function-name-pragma-disable.vert24
-rw-r--r--lib/mesa/src/compiler/glsl/tests/warnings/032-__-in-function-name-pragma-disable.vert.expected2
-rw-r--r--lib/mesa/src/compiler/glsl/xxd.py111
21 files changed, 3752 insertions, 100 deletions
diff --git a/lib/mesa/src/compiler/glsl/float64.glsl b/lib/mesa/src/compiler/glsl/float64.glsl
new file mode 100644
index 000000000..415dde390
--- /dev/null
+++ b/lib/mesa/src/compiler/glsl/float64.glsl
@@ -0,0 +1,1740 @@
+/*
+ * The implementations contained in this file are heavily based on the
+ * implementations found in the Berkeley SoftFloat library. As such, they are
+ * licensed under the same 3-clause BSD license:
+ *
+ * License for Berkeley SoftFloat Release 3e
+ *
+ * John R. Hauser
+ * 2018 January 20
+ *
+ * The following applies to the whole of SoftFloat Release 3e as well as to
+ * each source file individually.
+ *
+ * Copyright 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018 The Regents of the
+ * University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions, and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions, and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#version 430
+#extension GL_ARB_gpu_shader_int64 : enable
+#extension GL_ARB_shader_bit_encoding : enable
+#extension GL_EXT_shader_integer_mix : enable
+#extension GL_MESA_shader_integer_functions : enable
+
+#pragma warning(off)
+
+/* Software IEEE floating-point rounding mode.
+ * GLSL spec section "4.7.1 Range and Precision":
+ * The rounding mode cannot be set and is undefined.
+ * But here, we are able to define the rounding mode at the compilation time.
+ */
+#define FLOAT_ROUND_NEAREST_EVEN 0
+#define FLOAT_ROUND_TO_ZERO 1
+#define FLOAT_ROUND_DOWN 2
+#define FLOAT_ROUND_UP 3
+#define FLOAT_ROUNDING_MODE FLOAT_ROUND_NEAREST_EVEN
+
+/* Absolute value of a Float64 :
+ * Clear the sign bit
+ */
+uint64_t
+__fabs64(uint64_t __a)
+{
+ uvec2 a = unpackUint2x32(__a);
+ a.y &= 0x7FFFFFFFu;
+ return packUint2x32(a);
+}
+
+/* Returns 1 if the double-precision floating-point value `a' is a NaN;
+ * otherwise returns 0.
+ */
+bool
+__is_nan(uint64_t __a)
+{
+ uvec2 a = unpackUint2x32(__a);
+ return (0xFFE00000u <= (a.y<<1)) &&
+ ((a.x != 0u) || ((a.y & 0x000FFFFFu) != 0u));
+}
+
+/* Negate value of a Float64 :
+ * Toggle the sign bit
+ */
+uint64_t
+__fneg64(uint64_t __a)
+{
+ uvec2 a = unpackUint2x32(__a);
+ uint t = a.y;
+
+ t ^= (1u << 31);
+ a.y = mix(t, a.y, __is_nan(__a));
+ return packUint2x32(a);
+}
+
+uint64_t
+__fsign64(uint64_t __a)
+{
+ uvec2 a = unpackUint2x32(__a);
+ uvec2 retval;
+ retval.x = 0u;
+ retval.y = mix((a.y & 0x80000000u) | 0x3FF00000u, 0u, (a.y << 1 | a.x) == 0u);
+ return packUint2x32(retval);
+}
+
+/* Returns the fraction bits of the double-precision floating-point value `a'.*/
+uint
+__extractFloat64FracLo(uint64_t a)
+{
+ return unpackUint2x32(a).x;
+}
+
+uint
+__extractFloat64FracHi(uint64_t a)
+{
+ return unpackUint2x32(a).y & 0x000FFFFFu;
+}
+
+/* Returns the exponent bits of the double-precision floating-point value `a'.*/
+int
+__extractFloat64Exp(uint64_t __a)
+{
+ uvec2 a = unpackUint2x32(__a);
+ return int((a.y>>20) & 0x7FFu);
+}
+
+bool
+__feq64_nonnan(uint64_t __a, uint64_t __b)
+{
+ uvec2 a = unpackUint2x32(__a);
+ uvec2 b = unpackUint2x32(__b);
+ return (a.x == b.x) &&
+ ((a.y == b.y) || ((a.x == 0u) && (((a.y | b.y)<<1) == 0u)));
+}
+
+/* Returns true if the double-precision floating-point value `a' is equal to the
+ * corresponding value `b', and false otherwise. The comparison is performed
+ * according to the IEEE Standard for Floating-Point Arithmetic.
+ */
+bool
+__feq64(uint64_t a, uint64_t b)
+{
+ if (__is_nan(a) || __is_nan(b))
+ return false;
+
+ return __feq64_nonnan(a, b);
+}
+
+/* Returns true if the double-precision floating-point value `a' is not equal
+ * to the corresponding value `b', and false otherwise. The comparison is
+ * performed according to the IEEE Standard for Floating-Point Arithmetic.
+ */
+bool
+__fne64(uint64_t a, uint64_t b)
+{
+ if (__is_nan(a) || __is_nan(b))
+ return true;
+
+ return !__feq64_nonnan(a, b);
+}
+
+/* Returns the sign bit of the double-precision floating-point value `a'.*/
+uint
+__extractFloat64Sign(uint64_t a)
+{
+ return unpackUint2x32(a).y >> 31;
+}
+
+/* Returns true if the 64-bit value formed by concatenating `a0' and `a1' is less
+ * than the 64-bit value formed by concatenating `b0' and `b1'. Otherwise,
+ * returns false.
+ */
+bool
+lt64(uint a0, uint a1, uint b0, uint b1)
+{
+ return (a0 < b0) || ((a0 == b0) && (a1 < b1));
+}
+
+bool
+__flt64_nonnan(uint64_t __a, uint64_t __b)
+{
+ uvec2 a = unpackUint2x32(__a);
+ uvec2 b = unpackUint2x32(__b);
+ uint aSign = __extractFloat64Sign(__a);
+ uint bSign = __extractFloat64Sign(__b);
+ if (aSign != bSign)
+ return (aSign != 0u) && ((((a.y | b.y)<<1) | a.x | b.x) != 0u);
+
+ return mix(lt64(a.y, a.x, b.y, b.x), lt64(b.y, b.x, a.y, a.x), aSign != 0u);
+}
+
+/* Returns true if the double-precision floating-point value `a' is less than
+ * the corresponding value `b', and false otherwise. The comparison is performed
+ * according to the IEEE Standard for Floating-Point Arithmetic.
+ */
+bool
+__flt64(uint64_t a, uint64_t b)
+{
+ if (__is_nan(a) || __is_nan(b))
+ return false;
+
+ return __flt64_nonnan(a, b);
+}
+
+/* Returns true if the double-precision floating-point value `a' is greater
+ * than or equal to * the corresponding value `b', and false otherwise. The
+ * comparison is performed * according to the IEEE Standard for Floating-Point
+ * Arithmetic.
+ */
+bool
+__fge64(uint64_t a, uint64_t b)
+{
+ if (__is_nan(a) || __is_nan(b))
+ return false;
+
+ return !__flt64_nonnan(a, b);
+}
+
+/* Adds the 64-bit value formed by concatenating `a0' and `a1' to the 64-bit
+ * value formed by concatenating `b0' and `b1'. Addition is modulo 2^64, so
+ * any carry out is lost. The result is broken into two 32-bit pieces which
+ * are stored at the locations pointed to by `z0Ptr' and `z1Ptr'.
+ */
+void
+__add64(uint a0, uint a1, uint b0, uint b1,
+ out uint z0Ptr,
+ out uint z1Ptr)
+{
+ uint z1 = a1 + b1;
+ z1Ptr = z1;
+ z0Ptr = a0 + b0 + uint(z1 < a1);
+}
+
+
+/* Subtracts the 64-bit value formed by concatenating `b0' and `b1' from the
+ * 64-bit value formed by concatenating `a0' and `a1'. Subtraction is modulo
+ * 2^64, so any borrow out (carry out) is lost. The result is broken into two
+ * 32-bit pieces which are stored at the locations pointed to by `z0Ptr' and
+ * `z1Ptr'.
+ */
+void
+__sub64(uint a0, uint a1, uint b0, uint b1,
+ out uint z0Ptr,
+ out uint z1Ptr)
+{
+ z1Ptr = a1 - b1;
+ z0Ptr = a0 - b0 - uint(a1 < b1);
+}
+
+/* Shifts the 64-bit value formed by concatenating `a0' and `a1' right by the
+ * number of bits given in `count'. If any nonzero bits are shifted off, they
+ * are "jammed" into the least significant bit of the result by setting the
+ * least significant bit to 1. The value of `count' can be arbitrarily large;
+ * in particular, if `count' is greater than 64, the result will be either 0
+ * or 1, depending on whether the concatenation of `a0' and `a1' is zero or
+ * nonzero. The result is broken into two 32-bit pieces which are stored at
+ * the locations pointed to by `z0Ptr' and `z1Ptr'.
+ */
+void
+__shift64RightJamming(uint a0,
+ uint a1,
+ int count,
+ out uint z0Ptr,
+ out uint z1Ptr)
+{
+ uint z0;
+ uint z1;
+ int negCount = (-count) & 31;
+
+ z0 = mix(0u, a0, count == 0);
+ z0 = mix(z0, (a0 >> count), count < 32);
+
+ z1 = uint((a0 | a1) != 0u); /* count >= 64 */
+ uint z1_lt64 = (a0>>(count & 31)) | uint(((a0<<negCount) | a1) != 0u);
+ z1 = mix(z1, z1_lt64, count < 64);
+ z1 = mix(z1, (a0 | uint(a1 != 0u)), count == 32);
+ uint z1_lt32 = (a0<<negCount) | (a1>>count) | uint ((a1<<negCount) != 0u);
+ z1 = mix(z1, z1_lt32, count < 32);
+ z1 = mix(z1, a1, count == 0);
+ z1Ptr = z1;
+ z0Ptr = z0;
+}
+
+/* Shifts the 96-bit value formed by concatenating `a0', `a1', and `a2' right
+ * by 32 _plus_ the number of bits given in `count'. The shifted result is
+ * at most 64 nonzero bits; these are broken into two 32-bit pieces which are
+ * stored at the locations pointed to by `z0Ptr' and `z1Ptr'. The bits shifted
+ * off form a third 32-bit result as follows: The _last_ bit shifted off is
+ * the most-significant bit of the extra result, and the other 31 bits of the
+ * extra result are all zero if and only if _all_but_the_last_ bits shifted off
+ * were all zero. This extra result is stored in the location pointed to by
+ * `z2Ptr'. The value of `count' can be arbitrarily large.
+ * (This routine makes more sense if `a0', `a1', and `a2' are considered
+ * to form a fixed-point value with binary point between `a1' and `a2'. This
+ * fixed-point value is shifted right by the number of bits given in `count',
+ * and the integer part of the result is returned at the locations pointed to
+ * by `z0Ptr' and `z1Ptr'. The fractional part of the result may be slightly
+ * corrupted as described above, and is returned at the location pointed to by
+ * `z2Ptr'.)
+ */
+void
+__shift64ExtraRightJamming(uint a0, uint a1, uint a2,
+ int count,
+ out uint z0Ptr,
+ out uint z1Ptr,
+ out uint z2Ptr)
+{
+ uint z0 = 0u;
+ uint z1;
+ uint z2;
+ int negCount = (-count) & 31;
+
+ z2 = mix(uint(a0 != 0u), a0, count == 64);
+ z2 = mix(z2, a0 << negCount, count < 64);
+ z2 = mix(z2, a1 << negCount, count < 32);
+
+ z1 = mix(0u, (a0 >> (count & 31)), count < 64);
+ z1 = mix(z1, (a0<<negCount) | (a1>>count), count < 32);
+
+ a2 = mix(a2 | a1, a2, count < 32);
+ z0 = mix(z0, a0 >> count, count < 32);
+ z2 |= uint(a2 != 0u);
+
+ z0 = mix(z0, 0u, (count == 32));
+ z1 = mix(z1, a0, (count == 32));
+ z2 = mix(z2, a1, (count == 32));
+ z0 = mix(z0, a0, (count == 0));
+ z1 = mix(z1, a1, (count == 0));
+ z2 = mix(z2, a2, (count == 0));
+ z2Ptr = z2;
+ z1Ptr = z1;
+ z0Ptr = z0;
+}
+
+/* Shifts the 64-bit value formed by concatenating `a0' and `a1' left by the
+ * number of bits given in `count'. Any bits shifted off are lost. The value
+ * of `count' must be less than 32. The result is broken into two 32-bit
+ * pieces which are stored at the locations pointed to by `z0Ptr' and `z1Ptr'.
+ */
+void
+__shortShift64Left(uint a0, uint a1,
+ int count,
+ out uint z0Ptr,
+ out uint z1Ptr)
+{
+ z1Ptr = a1<<count;
+ z0Ptr = mix((a0 << count | (a1 >> ((-count) & 31))), a0, count == 0);
+}
+
+/* Packs the sign `zSign', the exponent `zExp', and the significand formed by
+ * the concatenation of `zFrac0' and `zFrac1' into a double-precision floating-
+ * point value, returning the result. After being shifted into the proper
+ * positions, the three fields `zSign', `zExp', and `zFrac0' are simply added
+ * together to form the most significant 32 bits of the result. This means
+ * that any integer portion of `zFrac0' will be added into the exponent. Since
+ * a properly normalized significand will have an integer portion equal to 1,
+ * the `zExp' input should be 1 less than the desired result exponent whenever
+ * `zFrac0' and `zFrac1' concatenated form a complete, normalized significand.
+ */
+uint64_t
+__packFloat64(uint zSign, int zExp, uint zFrac0, uint zFrac1)
+{
+ uvec2 z;
+
+ z.y = (zSign << 31) + (uint(zExp) << 20) + zFrac0;
+ z.x = zFrac1;
+ return packUint2x32(z);
+}
+
+/* Takes an abstract floating-point value having sign `zSign', exponent `zExp',
+ * and extended significand formed by the concatenation of `zFrac0', `zFrac1',
+ * and `zFrac2', and returns the proper double-precision floating-point value
+ * corresponding to the abstract input. Ordinarily, the abstract value is
+ * simply rounded and packed into the double-precision format, with the inexact
+ * exception raised if the abstract input cannot be represented exactly.
+ * However, if the abstract value is too large, the overflow and inexact
+ * exceptions are raised and an infinity or maximal finite value is returned.
+ * If the abstract value is too small, the input value is rounded to a
+ * subnormal number, and the underflow and inexact exceptions are raised if the
+ * abstract input cannot be represented exactly as a subnormal double-precision
+ * floating-point number.
+ * The input significand must be normalized or smaller. If the input
+ * significand is not normalized, `zExp' must be 0; in that case, the result
+ * returned is a subnormal number, and it must not require rounding. In the
+ * usual case that the input significand is normalized, `zExp' must be 1 less
+ * than the "true" floating-point exponent. The handling of underflow and
+ * overflow follows the IEEE Standard for Floating-Point Arithmetic.
+ */
+uint64_t
+__roundAndPackFloat64(uint zSign,
+ int zExp,
+ uint zFrac0,
+ uint zFrac1,
+ uint zFrac2)
+{
+ bool roundNearestEven;
+ bool increment;
+
+ roundNearestEven = FLOAT_ROUNDING_MODE == FLOAT_ROUND_NEAREST_EVEN;
+ increment = int(zFrac2) < 0;
+ if (!roundNearestEven) {
+ if (FLOAT_ROUNDING_MODE == FLOAT_ROUND_TO_ZERO) {
+ increment = false;
+ } else {
+ if (zSign != 0u) {
+ increment = (FLOAT_ROUNDING_MODE == FLOAT_ROUND_DOWN) &&
+ (zFrac2 != 0u);
+ } else {
+ increment = (FLOAT_ROUNDING_MODE == FLOAT_ROUND_UP) &&
+ (zFrac2 != 0u);
+ }
+ }
+ }
+ if (0x7FD <= zExp) {
+ if ((0x7FD < zExp) ||
+ ((zExp == 0x7FD) &&
+ (0x001FFFFFu == zFrac0 && 0xFFFFFFFFu == zFrac1) &&
+ increment)) {
+ if ((FLOAT_ROUNDING_MODE == FLOAT_ROUND_TO_ZERO) ||
+ ((zSign != 0u) && (FLOAT_ROUNDING_MODE == FLOAT_ROUND_UP)) ||
+ ((zSign == 0u) && (FLOAT_ROUNDING_MODE == FLOAT_ROUND_DOWN))) {
+ return __packFloat64(zSign, 0x7FE, 0x000FFFFFu, 0xFFFFFFFFu);
+ }
+ return __packFloat64(zSign, 0x7FF, 0u, 0u);
+ }
+ if (zExp < 0) {
+ __shift64ExtraRightJamming(
+ zFrac0, zFrac1, zFrac2, -zExp, zFrac0, zFrac1, zFrac2);
+ zExp = 0;
+ if (roundNearestEven) {
+ increment = zFrac2 < 0u;
+ } else {
+ if (zSign != 0u) {
+ increment = (FLOAT_ROUNDING_MODE == FLOAT_ROUND_DOWN) &&
+ (zFrac2 != 0u);
+ } else {
+ increment = (FLOAT_ROUNDING_MODE == FLOAT_ROUND_UP) &&
+ (zFrac2 != 0u);
+ }
+ }
+ }
+ }
+ if (increment) {
+ __add64(zFrac0, zFrac1, 0u, 1u, zFrac0, zFrac1);
+ zFrac1 &= ~((zFrac2 + uint(zFrac2 == 0u)) & uint(roundNearestEven));
+ } else {
+ zExp = mix(zExp, 0, (zFrac0 | zFrac1) == 0u);
+ }
+ return __packFloat64(zSign, zExp, zFrac0, zFrac1);
+}
+
+uint64_t
+__roundAndPackUInt64(uint zSign, uint zFrac0, uint zFrac1, uint zFrac2)
+{
+ bool roundNearestEven;
+ bool increment;
+ uint64_t default_nan = 0xFFFFFFFFFFFFFFFFUL;
+
+ roundNearestEven = FLOAT_ROUNDING_MODE == FLOAT_ROUND_NEAREST_EVEN;
+
+ if (zFrac2 >= 0x80000000u)
+ increment = false;
+
+ if (!roundNearestEven) {
+ if (zSign != 0u) {
+ if ((FLOAT_ROUNDING_MODE == FLOAT_ROUND_DOWN) && (zFrac2 != 0u)) {
+ increment = false;
+ }
+ } else {
+ increment = (FLOAT_ROUNDING_MODE == FLOAT_ROUND_UP) &&
+ (zFrac2 != 0u);
+ }
+ }
+
+ if (increment) {
+ __add64(zFrac0, zFrac1, 0u, 1u, zFrac0, zFrac1);
+ if ((zFrac0 | zFrac1) != 0u)
+ zFrac1 &= ~(1u) + uint(zFrac2 == 0u) & uint(roundNearestEven);
+ }
+ return mix(packUint2x32(uvec2(zFrac1, zFrac0)), default_nan,
+ (zSign !=0u && (zFrac0 | zFrac1) != 0u));
+}
+
+int64_t
+__roundAndPackInt64(uint zSign, uint zFrac0, uint zFrac1, uint zFrac2)
+{
+ bool roundNearestEven;
+ bool increment;
+ int64_t default_NegNaN = -0x7FFFFFFFFFFFFFFEL;
+ int64_t default_PosNaN = 0xFFFFFFFFFFFFFFFFL;
+
+ roundNearestEven = FLOAT_ROUNDING_MODE == FLOAT_ROUND_NEAREST_EVEN;
+
+ if (zFrac2 >= 0x80000000u)
+ increment = false;
+
+ if (!roundNearestEven) {
+ if (zSign != 0u) {
+ increment = ((FLOAT_ROUNDING_MODE == FLOAT_ROUND_DOWN) &&
+ (zFrac2 != 0u));
+ } else {
+ increment = (FLOAT_ROUNDING_MODE == FLOAT_ROUND_UP) &&
+ (zFrac2 != 0u);
+ }
+ }
+
+ if (increment) {
+ __add64(zFrac0, zFrac1, 0u, 1u, zFrac0, zFrac1);
+ if ((zFrac0 | zFrac1) != 0u)
+ zFrac1 &= ~(1u) + uint(zFrac2 == 0u) & uint(roundNearestEven);
+ }
+
+ int64_t absZ = mix(int64_t(packUint2x32(uvec2(zFrac1, zFrac0))),
+ -int64_t(packUint2x32(uvec2(zFrac1, zFrac0))),
+ (zSign != 0u));
+ int64_t nan = mix(default_PosNaN, default_NegNaN, bool(zSign));
+ return mix(absZ, nan, bool(zSign ^ uint(absZ < 0)) && bool(absZ));
+}
+
+/* Returns the number of leading 0 bits before the most-significant 1 bit of
+ * `a'. If `a' is zero, 32 is returned.
+ */
+int
+__countLeadingZeros32(uint a)
+{
+ int shiftCount;
+ shiftCount = mix(31 - findMSB(a), 32, a == 0u);
+ return shiftCount;
+}
+
+/* Takes an abstract floating-point value having sign `zSign', exponent `zExp',
+ * and significand formed by the concatenation of `zSig0' and `zSig1', and
+ * returns the proper double-precision floating-point value corresponding
+ * to the abstract input. This routine is just like `__roundAndPackFloat64'
+ * except that the input significand has fewer bits and does not have to be
+ * normalized. In all cases, `zExp' must be 1 less than the "true" floating-
+ * point exponent.
+ */
+uint64_t
+__normalizeRoundAndPackFloat64(uint zSign,
+ int zExp,
+ uint zFrac0,
+ uint zFrac1)
+{
+ int shiftCount;
+ uint zFrac2;
+
+ if (zFrac0 == 0u) {
+ zExp -= 32;
+ zFrac0 = zFrac1;
+ zFrac1 = 0u;
+ }
+
+ shiftCount = __countLeadingZeros32(zFrac0) - 11;
+ if (0 <= shiftCount) {
+ zFrac2 = 0u;
+ __shortShift64Left(zFrac0, zFrac1, shiftCount, zFrac0, zFrac1);
+ } else {
+ __shift64ExtraRightJamming(
+ zFrac0, zFrac1, 0u, -shiftCount, zFrac0, zFrac1, zFrac2);
+ }
+ zExp -= shiftCount;
+ return __roundAndPackFloat64(zSign, zExp, zFrac0, zFrac1, zFrac2);
+}
+
+/* Takes two double-precision floating-point values `a' and `b', one of which
+ * is a NaN, and returns the appropriate NaN result.
+ */
+uint64_t
+__propagateFloat64NaN(uint64_t __a, uint64_t __b)
+{
+ bool aIsNaN = __is_nan(__a);
+ bool bIsNaN = __is_nan(__b);
+ uvec2 a = unpackUint2x32(__a);
+ uvec2 b = unpackUint2x32(__b);
+ a.y |= 0x00080000u;
+ b.y |= 0x00080000u;
+
+ return packUint2x32(mix(b, mix(a, b, bvec2(bIsNaN, bIsNaN)), bvec2(aIsNaN, aIsNaN)));
+}
+
+/* Returns the result of adding the double-precision floating-point values
+ * `a' and `b'. The operation is performed according to the IEEE Standard for
+ * Floating-Point Arithmetic.
+ */
+uint64_t
+__fadd64(uint64_t a, uint64_t b)
+{
+ uint aSign = __extractFloat64Sign(a);
+ uint bSign = __extractFloat64Sign(b);
+ uint aFracLo = __extractFloat64FracLo(a);
+ uint aFracHi = __extractFloat64FracHi(a);
+ uint bFracLo = __extractFloat64FracLo(b);
+ uint bFracHi = __extractFloat64FracHi(b);
+ int aExp = __extractFloat64Exp(a);
+ int bExp = __extractFloat64Exp(b);
+ uint zFrac0 = 0u;
+ uint zFrac1 = 0u;
+ int expDiff = aExp - bExp;
+ if (aSign == bSign) {
+ uint zFrac2 = 0u;
+ int zExp;
+ bool orig_exp_diff_is_zero = (expDiff == 0);
+
+ if (orig_exp_diff_is_zero) {
+ if (aExp == 0x7FF) {
+ bool propagate = (aFracHi | aFracLo | bFracHi | bFracLo) != 0u;
+ return mix(a, __propagateFloat64NaN(a, b), propagate);
+ }
+ __add64(aFracHi, aFracLo, bFracHi, bFracLo, zFrac0, zFrac1);
+ if (aExp == 0)
+ return __packFloat64(aSign, 0, zFrac0, zFrac1);
+ zFrac2 = 0u;
+ zFrac0 |= 0x00200000u;
+ zExp = aExp;
+ __shift64ExtraRightJamming(
+ zFrac0, zFrac1, zFrac2, 1, zFrac0, zFrac1, zFrac2);
+ } else if (0 < expDiff) {
+ if (aExp == 0x7FF) {
+ bool propagate = (aFracHi | aFracLo) != 0u;
+ return mix(a, __propagateFloat64NaN(a, b), propagate);
+ }
+
+ expDiff = mix(expDiff, expDiff - 1, bExp == 0);
+ bFracHi = mix(bFracHi | 0x00100000u, bFracHi, bExp == 0);
+ __shift64ExtraRightJamming(
+ bFracHi, bFracLo, 0u, expDiff, bFracHi, bFracLo, zFrac2);
+ zExp = aExp;
+ } else if (expDiff < 0) {
+ if (bExp == 0x7FF) {
+ bool propagate = (bFracHi | bFracLo) != 0u;
+ return mix(__packFloat64(aSign, 0x7ff, 0u, 0u), __propagateFloat64NaN(a, b), propagate);
+ }
+ expDiff = mix(expDiff, expDiff + 1, aExp == 0);
+ aFracHi = mix(aFracHi | 0x00100000u, aFracHi, aExp == 0);
+ __shift64ExtraRightJamming(
+ aFracHi, aFracLo, 0u, - expDiff, aFracHi, aFracLo, zFrac2);
+ zExp = bExp;
+ }
+ if (!orig_exp_diff_is_zero) {
+ aFracHi |= 0x00100000u;
+ __add64(aFracHi, aFracLo, bFracHi, bFracLo, zFrac0, zFrac1);
+ --zExp;
+ if (!(zFrac0 < 0x00200000u)) {
+ __shift64ExtraRightJamming(zFrac0, zFrac1, zFrac2, 1, zFrac0, zFrac1, zFrac2);
+ ++zExp;
+ }
+ }
+ return __roundAndPackFloat64(aSign, zExp, zFrac0, zFrac1, zFrac2);
+
+ } else {
+ int zExp;
+
+ __shortShift64Left(aFracHi, aFracLo, 10, aFracHi, aFracLo);
+ __shortShift64Left(bFracHi, bFracLo, 10, bFracHi, bFracLo);
+ if (0 < expDiff) {
+ if (aExp == 0x7FF) {
+ bool propagate = (aFracHi | aFracLo) != 0u;
+ return mix(a, __propagateFloat64NaN(a, b), propagate);
+ }
+ expDiff = mix(expDiff, expDiff - 1, bExp == 0);
+ bFracHi = mix(bFracHi | 0x40000000u, bFracHi, bExp == 0);
+ __shift64RightJamming(bFracHi, bFracLo, expDiff, bFracHi, bFracLo);
+ aFracHi |= 0x40000000u;
+ __sub64(aFracHi, aFracLo, bFracHi, bFracLo, zFrac0, zFrac1);
+ zExp = aExp;
+ --zExp;
+ return __normalizeRoundAndPackFloat64(aSign, zExp - 10, zFrac0, zFrac1);
+ }
+ if (expDiff < 0) {
+ if (bExp == 0x7FF) {
+ bool propagate = (bFracHi | bFracLo) != 0u;
+ return mix(__packFloat64(aSign ^ 1u, 0x7ff, 0u, 0u), __propagateFloat64NaN(a, b), propagate);
+ }
+ expDiff = mix(expDiff, expDiff + 1, aExp == 0);
+ aFracHi = mix(aFracHi | 0x40000000u, aFracHi, aExp == 0);
+ __shift64RightJamming(aFracHi, aFracLo, - expDiff, aFracHi, aFracLo);
+ bFracHi |= 0x40000000u;
+ __sub64(bFracHi, bFracLo, aFracHi, aFracLo, zFrac0, zFrac1);
+ zExp = bExp;
+ aSign ^= 1u;
+ --zExp;
+ return __normalizeRoundAndPackFloat64(aSign, zExp - 10, zFrac0, zFrac1);
+ }
+ if (aExp == 0x7FF) {
+ bool propagate = (aFracHi | aFracLo | bFracHi | bFracLo) != 0u;
+ return mix(0xFFFFFFFFFFFFFFFFUL, __propagateFloat64NaN(a, b), propagate);
+ }
+ bExp = mix(bExp, 1, aExp == 0);
+ aExp = mix(aExp, 1, aExp == 0);
+ bool zexp_normal = false;
+ bool blta = true;
+ if (bFracHi < aFracHi) {
+ __sub64(aFracHi, aFracLo, bFracHi, bFracLo, zFrac0, zFrac1);
+ zexp_normal = true;
+ }
+ else if (aFracHi < bFracHi) {
+ __sub64(bFracHi, bFracLo, aFracHi, aFracLo, zFrac0, zFrac1);
+ blta = false;
+ zexp_normal = true;
+ }
+ else if (bFracLo < aFracLo) {
+ __sub64(aFracHi, aFracLo, bFracHi, bFracLo, zFrac0, zFrac1);
+ zexp_normal = true;
+ }
+ else if (aFracLo < bFracLo) {
+ __sub64(bFracHi, bFracLo, aFracHi, aFracLo, zFrac0, zFrac1);
+ blta = false;
+ zexp_normal = true;
+ }
+ zExp = mix(bExp, aExp, blta);
+ aSign = mix(aSign ^ 1u, aSign, blta);
+ uint64_t retval_0 = __packFloat64(uint(FLOAT_ROUNDING_MODE == FLOAT_ROUND_DOWN), 0, 0u, 0u);
+ uint64_t retval_1 = __normalizeRoundAndPackFloat64(aSign, zExp - 11, zFrac0, zFrac1);
+ return mix(retval_0, retval_1, zexp_normal);
+ }
+}
+
+/* Multiplies `a' by `b' to obtain a 64-bit product. The product is broken
+ * into two 32-bit pieces which are stored at the locations pointed to by
+ * `z0Ptr' and `z1Ptr'.
+ */
+void
+__mul32To64(uint a, uint b, out uint z0Ptr, out uint z1Ptr)
+{
+ uint aLow = a & 0x0000FFFFu;
+ uint aHigh = a>>16;
+ uint bLow = b & 0x0000FFFFu;
+ uint bHigh = b>>16;
+ uint z1 = aLow * bLow;
+ uint zMiddleA = aLow * bHigh;
+ uint zMiddleB = aHigh * bLow;
+ uint z0 = aHigh * bHigh;
+ zMiddleA += zMiddleB;
+ z0 += ((uint(zMiddleA < zMiddleB)) << 16) + (zMiddleA >> 16);
+ zMiddleA <<= 16;
+ z1 += zMiddleA;
+ z0 += uint(z1 < zMiddleA);
+ z1Ptr = z1;
+ z0Ptr = z0;
+}
+
+/* Multiplies the 64-bit value formed by concatenating `a0' and `a1' to the
+ * 64-bit value formed by concatenating `b0' and `b1' to obtain a 128-bit
+ * product. The product is broken into four 32-bit pieces which are stored at
+ * the locations pointed to by `z0Ptr', `z1Ptr', `z2Ptr', and `z3Ptr'.
+ */
+void
+__mul64To128(uint a0, uint a1, uint b0, uint b1,
+ out uint z0Ptr,
+ out uint z1Ptr,
+ out uint z2Ptr,
+ out uint z3Ptr)
+{
+ uint z0 = 0u;
+ uint z1 = 0u;
+ uint z2 = 0u;
+ uint z3 = 0u;
+ uint more1 = 0u;
+ uint more2 = 0u;
+
+ __mul32To64(a1, b1, z2, z3);
+ __mul32To64(a1, b0, z1, more2);
+ __add64(z1, more2, 0u, z2, z1, z2);
+ __mul32To64(a0, b0, z0, more1);
+ __add64(z0, more1, 0u, z1, z0, z1);
+ __mul32To64(a0, b1, more1, more2);
+ __add64(more1, more2, 0u, z2, more1, z2);
+ __add64(z0, z1, 0u, more1, z0, z1);
+ z3Ptr = z3;
+ z2Ptr = z2;
+ z1Ptr = z1;
+ z0Ptr = z0;
+}
+
+/* Normalizes the subnormal double-precision floating-point value represented
+ * by the denormalized significand formed by the concatenation of `aFrac0' and
+ * `aFrac1'. The normalized exponent is stored at the location pointed to by
+ * `zExpPtr'. The most significant 21 bits of the normalized significand are
+ * stored at the location pointed to by `zFrac0Ptr', and the least significant
+ * 32 bits of the normalized significand are stored at the location pointed to
+ * by `zFrac1Ptr'.
+ */
+void
+__normalizeFloat64Subnormal(uint aFrac0, uint aFrac1,
+ out int zExpPtr,
+ out uint zFrac0Ptr,
+ out uint zFrac1Ptr)
+{
+ int shiftCount;
+ uint temp_zfrac0, temp_zfrac1;
+ shiftCount = __countLeadingZeros32(mix(aFrac0, aFrac1, aFrac0 == 0u)) - 11;
+ zExpPtr = mix(1 - shiftCount, -shiftCount - 31, aFrac0 == 0u);
+
+ temp_zfrac0 = mix(aFrac1<<shiftCount, aFrac1>>(-shiftCount), shiftCount < 0);
+ temp_zfrac1 = mix(0u, aFrac1<<(shiftCount & 31), shiftCount < 0);
+
+ __shortShift64Left(aFrac0, aFrac1, shiftCount, zFrac0Ptr, zFrac1Ptr);
+
+ zFrac0Ptr = mix(zFrac0Ptr, temp_zfrac0, aFrac0 == 0);
+ zFrac1Ptr = mix(zFrac1Ptr, temp_zfrac1, aFrac0 == 0);
+}
+
+/* Returns the result of multiplying the double-precision floating-point values
+ * `a' and `b'. The operation is performed according to the IEEE Standard for
+ * Floating-Point Arithmetic.
+ */
+uint64_t
+__fmul64(uint64_t a, uint64_t b)
+{
+ uint zFrac0 = 0u;
+ uint zFrac1 = 0u;
+ uint zFrac2 = 0u;
+ uint zFrac3 = 0u;
+ int zExp;
+
+ uint aFracLo = __extractFloat64FracLo(a);
+ uint aFracHi = __extractFloat64FracHi(a);
+ uint bFracLo = __extractFloat64FracLo(b);
+ uint bFracHi = __extractFloat64FracHi(b);
+ int aExp = __extractFloat64Exp(a);
+ uint aSign = __extractFloat64Sign(a);
+ int bExp = __extractFloat64Exp(b);
+ uint bSign = __extractFloat64Sign(b);
+ uint zSign = aSign ^ bSign;
+ if (aExp == 0x7FF) {
+ if (((aFracHi | aFracLo) != 0u) ||
+ ((bExp == 0x7FF) && ((bFracHi | bFracLo) != 0u))) {
+ return __propagateFloat64NaN(a, b);
+ }
+ if ((uint(bExp) | bFracHi | bFracLo) == 0u)
+ return 0xFFFFFFFFFFFFFFFFUL;
+ return __packFloat64(zSign, 0x7FF, 0u, 0u);
+ }
+ if (bExp == 0x7FF) {
+ if ((bFracHi | bFracLo) != 0u)
+ return __propagateFloat64NaN(a, b);
+ if ((uint(aExp) | aFracHi | aFracLo) == 0u)
+ return 0xFFFFFFFFFFFFFFFFUL;
+ return __packFloat64(zSign, 0x7FF, 0u, 0u);
+ }
+ if (aExp == 0) {
+ if ((aFracHi | aFracLo) == 0u)
+ return __packFloat64(zSign, 0, 0u, 0u);
+ __normalizeFloat64Subnormal(aFracHi, aFracLo, aExp, aFracHi, aFracLo);
+ }
+ if (bExp == 0) {
+ if ((bFracHi | bFracLo) == 0u)
+ return __packFloat64(zSign, 0, 0u, 0u);
+ __normalizeFloat64Subnormal(bFracHi, bFracLo, bExp, bFracHi, bFracLo);
+ }
+ zExp = aExp + bExp - 0x400;
+ aFracHi |= 0x00100000u;
+ __shortShift64Left(bFracHi, bFracLo, 12, bFracHi, bFracLo);
+ __mul64To128(
+ aFracHi, aFracLo, bFracHi, bFracLo, zFrac0, zFrac1, zFrac2, zFrac3);
+ __add64(zFrac0, zFrac1, aFracHi, aFracLo, zFrac0, zFrac1);
+ zFrac2 |= uint(zFrac3 != 0u);
+ if (0x00200000u <= zFrac0) {
+ __shift64ExtraRightJamming(
+ zFrac0, zFrac1, zFrac2, 1, zFrac0, zFrac1, zFrac2);
+ ++zExp;
+ }
+ return __roundAndPackFloat64(zSign, zExp, zFrac0, zFrac1, zFrac2);
+}
+
+uint64_t
+__ffma64(uint64_t a, uint64_t b, uint64_t c)
+{
+ return __fadd64(__fmul64(a, b), c);
+}
+
+/* Shifts the 64-bit value formed by concatenating `a0' and `a1' right by the
+ * number of bits given in `count'. Any bits shifted off are lost. The value
+ * of `count' can be arbitrarily large; in particular, if `count' is greater
+ * than 64, the result will be 0. The result is broken into two 32-bit pieces
+ * which are stored at the locations pointed to by `z0Ptr' and `z1Ptr'.
+ */
+void
+__shift64Right(uint a0, uint a1,
+ int count,
+ out uint z0Ptr,
+ out uint z1Ptr)
+{
+ uint z0;
+ uint z1;
+ int negCount = (-count) & 31;
+
+ z0 = 0u;
+ z0 = mix(z0, (a0 >> count), count < 32);
+ z0 = mix(z0, a0, count == 0);
+
+ z1 = mix(0u, (a0 >> (count & 31)), count < 64);
+ z1 = mix(z1, (a0<<negCount) | (a1>>count), count < 32);
+ z1 = mix(z1, a0, count == 0);
+
+ z1Ptr = z1;
+ z0Ptr = z0;
+}
+
+/* Returns the result of converting the double-precision floating-point value
+ * `a' to the unsigned integer format. The conversion is performed according
+ * to the IEEE Standard for Floating-Point Arithmetic.
+ */
+uint
+__fp64_to_uint(uint64_t a)
+{
+ uint aFracLo = __extractFloat64FracLo(a);
+ uint aFracHi = __extractFloat64FracHi(a);
+ int aExp = __extractFloat64Exp(a);
+ uint aSign = __extractFloat64Sign(a);
+
+ if ((aExp == 0x7FF) && ((aFracHi | aFracLo) != 0u))
+ return 0xFFFFFFFFu;
+
+ aFracHi |= mix(0u, 0x00100000u, aExp != 0);
+
+ int shiftDist = 0x427 - aExp;
+ if (0 < shiftDist)
+ __shift64RightJamming(aFracHi, aFracLo, shiftDist, aFracHi, aFracLo);
+
+ if ((aFracHi & 0xFFFFF000u) != 0u)
+ return mix(~0u, 0u, (aSign != 0u));
+
+ uint z = 0u;
+ uint zero = 0u;
+ __shift64Right(aFracHi, aFracLo, 12, zero, z);
+
+ uint expt = mix(~0u, 0u, (aSign != 0u));
+
+ return mix(z, expt, (aSign != 0u) && (z != 0u));
+}
+
+uint64_t
+__uint_to_fp64(uint a)
+{
+ if (a == 0u)
+ return 0ul;
+
+ int shiftDist = __countLeadingZeros32(a) + 21;
+
+ uint aHigh = 0u;
+ uint aLow = 0u;
+ int negCount = (- shiftDist) & 31;
+
+ aHigh = mix(0u, a<< shiftDist - 32, shiftDist < 64);
+ aLow = 0u;
+ aHigh = mix(aHigh, 0u, shiftDist == 0);
+ aLow = mix(aLow, a, shiftDist ==0);
+ aHigh = mix(aHigh, a >> negCount, shiftDist < 32);
+ aLow = mix(aLow, a << shiftDist, shiftDist < 32);
+
+ return __packFloat64(0u, 0x432 - shiftDist, aHigh, aLow);
+}
+
+uint64_t
+__uint64_to_fp64(uint64_t a)
+{
+ if (a == 0u)
+ return 0ul;
+
+ uvec2 aFrac = unpackUint2x32(a);
+ uint aFracLo = __extractFloat64FracLo(a);
+ uint aFracHi = __extractFloat64FracHi(a);
+
+ if ((aFracHi & 0x80000000u) != 0u) {
+ __shift64RightJamming(aFracHi, aFracLo, 1, aFracHi, aFracLo);
+ return __roundAndPackFloat64(0, 0x433, aFracHi, aFracLo, 0u);
+ } else {
+ return __normalizeRoundAndPackFloat64(0, 0x432, aFrac.y, aFrac.x);
+ }
+}
+
+uint64_t
+__fp64_to_uint64(uint64_t a)
+{
+ uint aFracLo = __extractFloat64FracLo(a);
+ uint aFracHi = __extractFloat64FracHi(a);
+ int aExp = __extractFloat64Exp(a);
+ uint aSign = __extractFloat64Sign(a);
+ uint zFrac2 = 0u;
+ uint64_t default_nan = 0xFFFFFFFFFFFFFFFFUL;
+
+ aFracHi = mix(aFracHi, aFracHi | 0x00100000u, aExp != 0);
+ int shiftCount = 0x433 - aExp;
+
+ if ( shiftCount <= 0 ) {
+ if (shiftCount < -11 && aExp == 0x7FF) {
+ if ((aFracHi | aFracLo) != 0u)
+ return __propagateFloat64NaN(a, a);
+ return mix(default_nan, a, aSign == 0u);
+ }
+ __shortShift64Left(aFracHi, aFracLo, -shiftCount, aFracHi, aFracLo);
+ } else {
+ __shift64ExtraRightJamming(aFracHi, aFracLo, zFrac2, shiftCount,
+ aFracHi, aFracLo, zFrac2);
+ }
+ return __roundAndPackUInt64(aSign, aFracHi, aFracLo, zFrac2);
+}
+
+int64_t
+__fp64_to_int64(uint64_t a)
+{
+ uint zFrac2 = 0u;
+ uint aFracLo = __extractFloat64FracLo(a);
+ uint aFracHi = __extractFloat64FracHi(a);
+ int aExp = __extractFloat64Exp(a);
+ uint aSign = __extractFloat64Sign(a);
+ int64_t default_NegNaN = -0x7FFFFFFFFFFFFFFEL;
+ int64_t default_PosNaN = 0xFFFFFFFFFFFFFFFFL;
+
+ aFracHi = mix(aFracHi, aFracHi | 0x00100000u, aExp != 0);
+ int shiftCount = 0x433 - aExp;
+
+ if (shiftCount <= 0) {
+ if (shiftCount < -11 && aExp == 0x7FF) {
+ if ((aFracHi | aFracLo) != 0u)
+ return default_NegNaN;
+ return mix(default_NegNaN, default_PosNaN, aSign == 0u);
+ }
+ __shortShift64Left(aFracHi, aFracLo, -shiftCount, aFracHi, aFracLo);
+ } else {
+ __shift64ExtraRightJamming(aFracHi, aFracLo, zFrac2, shiftCount,
+ aFracHi, aFracLo, zFrac2);
+ }
+
+ return __roundAndPackInt64(aSign, aFracHi, aFracLo, zFrac2);
+}
+
+uint64_t
+__fp32_to_uint64(float f)
+{
+ uint a = floatBitsToUint(f);
+ uint aFrac = a & 0x007FFFFFu;
+ int aExp = int((a>>23) & 0xFFu);
+ uint aSign = a>>31;
+ uint zFrac0 = 0u;
+ uint zFrac1 = 0u;
+ uint zFrac2 = 0u;
+ uint64_t default_nan = 0xFFFFFFFFFFFFFFFFUL;
+ int shiftCount = 0xBE - aExp;
+
+ if (shiftCount <0) {
+ if (aExp == 0xFF)
+ return default_nan;
+ }
+
+ aFrac = mix(aFrac, aFrac | 0x00800000u, aExp != 0);
+ __shortShift64Left(aFrac, 0, 40, zFrac0, zFrac1);
+
+ if (shiftCount != 0) {
+ __shift64ExtraRightJamming(zFrac0, zFrac1, zFrac2, shiftCount,
+ zFrac0, zFrac1, zFrac2);
+ }
+
+ return __roundAndPackUInt64(aSign, zFrac0, zFrac1, zFrac2);
+}
+
+int64_t
+__fp32_to_int64(float f)
+{
+ uint a = floatBitsToUint(f);
+ uint aFrac = a & 0x007FFFFFu;
+ int aExp = int((a>>23) & 0xFFu);
+ uint aSign = a>>31;
+ uint zFrac0 = 0u;
+ uint zFrac1 = 0u;
+ uint zFrac2 = 0u;
+ int64_t default_NegNaN = -0x7FFFFFFFFFFFFFFEL;
+ int64_t default_PosNaN = 0xFFFFFFFFFFFFFFFFL;
+ int shiftCount = 0xBE - aExp;
+
+ if (shiftCount <0) {
+ if (aExp == 0xFF && aFrac != 0u)
+ return default_NegNaN;
+ return mix(default_NegNaN, default_PosNaN, aSign == 0u);
+ }
+
+ aFrac = mix(aFrac, aFrac | 0x00800000u, aExp != 0);
+ __shortShift64Left(aFrac, 0, 40, zFrac0, zFrac1);
+
+ if (shiftCount != 0) {
+ __shift64ExtraRightJamming(zFrac0, zFrac1, zFrac2, shiftCount,
+ zFrac0, zFrac1, zFrac2);
+ }
+
+ return __roundAndPackInt64(aSign, zFrac0, zFrac1, zFrac2);
+}
+
+uint64_t
+__int64_to_fp64(int64_t a)
+{
+ if (a==0)
+ return 0ul;
+
+ uint64_t absA = mix(uint64_t(a), uint64_t(-a), a < 0);
+ uint aFracHi = __extractFloat64FracHi(absA);
+ uvec2 aFrac = unpackUint2x32(absA);
+ uint zSign = uint(a < 0);
+
+ if ((aFracHi & 0x80000000u) != 0u) {
+ return mix(0ul, __packFloat64(1, 0x434, 0u, 0u), a < 0);
+ }
+
+ return __normalizeRoundAndPackFloat64(zSign, 0x432, aFrac.y, aFrac.x);
+}
+
+/* Returns the result of converting the double-precision floating-point value
+ * `a' to the 32-bit two's complement integer format. The conversion is
+ * performed according to the IEEE Standard for Floating-Point Arithmetic---
+ * which means in particular that the conversion is rounded according to the
+ * current rounding mode. If `a' is a NaN, the largest positive integer is
+ * returned. Otherwise, if the conversion overflows, the largest integer with
+ * the same sign as `a' is returned.
+ */
+int
+__fp64_to_int(uint64_t a)
+{
+ uint aFracLo = __extractFloat64FracLo(a);
+ uint aFracHi = __extractFloat64FracHi(a);
+ int aExp = __extractFloat64Exp(a);
+ uint aSign = __extractFloat64Sign(a);
+
+ uint absZ = 0u;
+ uint aFracExtra = 0u;
+ int shiftCount = aExp - 0x413;
+
+ if (0 <= shiftCount) {
+ if (0x41E < aExp) {
+ if ((aExp == 0x7FF) && bool(aFracHi | aFracLo))
+ aSign = 0u;
+ return mix(0x7FFFFFFF, 0x80000000, bool(aSign));
+ }
+ __shortShift64Left(aFracHi | 0x00100000u, aFracLo, shiftCount, absZ, aFracExtra);
+ } else {
+ if (aExp < 0x3FF)
+ return 0;
+
+ aFracHi |= 0x00100000u;
+ aFracExtra = ( aFracHi << (shiftCount & 31)) | aFracLo;
+ absZ = aFracHi >> (- shiftCount);
+ }
+
+ int z = mix(int(absZ), -int(absZ), (aSign != 0u));
+ int nan = mix(0x7FFFFFFF, 0x80000000, bool(aSign));
+ return mix(z, nan, bool(aSign ^ uint(z < 0)) && bool(z));
+}
+
+/* Returns the result of converting the 32-bit two's complement integer `a'
+ * to the double-precision floating-point format. The conversion is performed
+ * according to the IEEE Standard for Floating-Point Arithmetic.
+ */
+uint64_t
+__int_to_fp64(int a)
+{
+ uint zFrac0 = 0u;
+ uint zFrac1 = 0u;
+ if (a==0)
+ return __packFloat64(0u, 0, 0u, 0u);
+ uint zSign = uint(a < 0);
+ uint absA = mix(uint(a), uint(-a), a < 0);
+ int shiftCount = __countLeadingZeros32(absA) - 11;
+ if (0 <= shiftCount) {
+ zFrac0 = absA << shiftCount;
+ zFrac1 = 0u;
+ } else {
+ __shift64Right(absA, 0u, -shiftCount, zFrac0, zFrac1);
+ }
+ return __packFloat64(zSign, 0x412 - shiftCount, zFrac0, zFrac1);
+}
+
+bool
+__fp64_to_bool(uint64_t a)
+{
+ return !__feq64_nonnan(__fabs64(a), 0ul);
+}
+
+uint64_t
+__bool_to_fp64(bool a)
+{
+ return __int_to_fp64(int(a));
+}
+
+/* Packs the sign `zSign', exponent `zExp', and significand `zFrac' into a
+ * single-precision floating-point value, returning the result. After being
+ * shifted into the proper positions, the three fields are simply added
+ * together to form the result. This means that any integer portion of `zSig'
+ * will be added into the exponent. Since a properly normalized significand
+ * will have an integer portion equal to 1, the `zExp' input should be 1 less
+ * than the desired result exponent whenever `zFrac' is a complete, normalized
+ * significand.
+ */
+float
+__packFloat32(uint zSign, int zExp, uint zFrac)
+{
+ return uintBitsToFloat((zSign<<31) + (uint(zExp)<<23) + zFrac);
+}
+
+/* Takes an abstract floating-point value having sign `zSign', exponent `zExp',
+ * and significand `zFrac', and returns the proper single-precision floating-
+ * point value corresponding to the abstract input. Ordinarily, the abstract
+ * value is simply rounded and packed into the single-precision format, with
+ * the inexact exception raised if the abstract input cannot be represented
+ * exactly. However, if the abstract value is too large, the overflow and
+ * inexact exceptions are raised and an infinity or maximal finite value is
+ * returned. If the abstract value is too small, the input value is rounded to
+ * a subnormal number, and the underflow and inexact exceptions are raised if
+ * the abstract input cannot be represented exactly as a subnormal single-
+ * precision floating-point number.
+ * The input significand `zFrac' has its binary point between bits 30
+ * and 29, which is 7 bits to the left of the usual location. This shifted
+ * significand must be normalized or smaller. If `zFrac' is not normalized,
+ * `zExp' must be 0; in that case, the result returned is a subnormal number,
+ * and it must not require rounding. In the usual case that `zFrac' is
+ * normalized, `zExp' must be 1 less than the "true" floating-point exponent.
+ * The handling of underflow and overflow follows the IEEE Standard for
+ * Floating-Point Arithmetic.
+ */
+float
+__roundAndPackFloat32(uint zSign, int zExp, uint zFrac)
+{
+ bool roundNearestEven;
+ int roundIncrement;
+ int roundBits;
+
+ roundNearestEven = FLOAT_ROUNDING_MODE == FLOAT_ROUND_NEAREST_EVEN;
+ roundIncrement = 0x40;
+ if (!roundNearestEven) {
+ if (FLOAT_ROUNDING_MODE == FLOAT_ROUND_TO_ZERO) {
+ roundIncrement = 0;
+ } else {
+ roundIncrement = 0x7F;
+ if (zSign != 0u) {
+ if (FLOAT_ROUNDING_MODE == FLOAT_ROUND_UP)
+ roundIncrement = 0;
+ } else {
+ if (FLOAT_ROUNDING_MODE == FLOAT_ROUND_DOWN)
+ roundIncrement = 0;
+ }
+ }
+ }
+ roundBits = int(zFrac & 0x7Fu);
+ if (0xFDu <= uint(zExp)) {
+ if ((0xFD < zExp) || ((zExp == 0xFD) && (int(zFrac) + roundIncrement) < 0))
+ return __packFloat32(zSign, 0xFF, 0u) - float(roundIncrement == 0);
+ int count = -zExp;
+ bool zexp_lt0 = zExp < 0;
+ uint zFrac_lt0 = mix(uint(zFrac != 0u), (zFrac>>count) | uint((zFrac<<((-count) & 31)) != 0u), (-zExp) < 32);
+ zFrac = mix(zFrac, zFrac_lt0, zexp_lt0);
+ roundBits = mix(roundBits, int(zFrac) & 0x7f, zexp_lt0);
+ zExp = mix(zExp, 0, zexp_lt0);
+ }
+ zFrac = (zFrac + uint(roundIncrement))>>7;
+ zFrac &= ~uint(((roundBits ^ 0x40) == 0) && roundNearestEven);
+
+ return __packFloat32(zSign, mix(zExp, 0, zFrac == 0u), zFrac);
+}
+
+/* Returns the result of converting the double-precision floating-point value
+ * `a' to the single-precision floating-point format. The conversion is
+ * performed according to the IEEE Standard for Floating-Point Arithmetic.
+ */
+float
+__fp64_to_fp32(uint64_t __a)
+{
+ uvec2 a = unpackUint2x32(__a);
+ uint zFrac = 0u;
+ uint allZero = 0u;
+
+ uint aFracLo = __extractFloat64FracLo(__a);
+ uint aFracHi = __extractFloat64FracHi(__a);
+ int aExp = __extractFloat64Exp(__a);
+ uint aSign = __extractFloat64Sign(__a);
+ if (aExp == 0x7FF) {
+ __shortShift64Left(a.y, a.x, 12, a.y, a.x);
+ float rval = uintBitsToFloat((aSign<<31) | 0x7FC00000u | (a.y>>9));
+ rval = mix(__packFloat32(aSign, 0xFF, 0u), rval, (aFracHi | aFracLo) != 0u);
+ return rval;
+ }
+ __shift64RightJamming(aFracHi, aFracLo, 22, allZero, zFrac);
+ zFrac = mix(zFrac, zFrac | 0x40000000u, aExp != 0);
+ return __roundAndPackFloat32(aSign, aExp - 0x381, zFrac);
+}
+
+float
+__uint64_to_fp32(uint64_t __a)
+{
+ uint zFrac = 0u;
+ uvec2 aFrac = unpackUint2x32(__a);
+ int shiftCount = __countLeadingZeros32(mix(aFrac.y, aFrac.x, aFrac.y == 0u));
+ shiftCount -= mix(40, 8, aFrac.y == 0u);
+
+ if (0 <= shiftCount) {
+ __shortShift64Left(aFrac.y, aFrac.x, shiftCount, aFrac.y, aFrac.x);
+ bool is_zero = (aFrac.y | aFrac.x) == 0u;
+ return mix(__packFloat32(0u, 0x95 - shiftCount, aFrac.x), 0, is_zero);
+ }
+
+ shiftCount += 7;
+ __shift64RightJamming(aFrac.y, aFrac.x, -shiftCount, aFrac.y, aFrac.x);
+ zFrac = mix(aFrac.x<<shiftCount, aFrac.x, shiftCount < 0);
+ return __roundAndPackFloat32(0u, 0x9C - shiftCount, zFrac);
+}
+
+float
+__int64_to_fp32(int64_t __a)
+{
+ uint zFrac = 0u;
+ uint aSign = uint(__a < 0);
+ uint64_t absA = mix(uint64_t(__a), uint64_t(-__a), __a < 0);
+ uvec2 aFrac = unpackUint2x32(absA);
+ int shiftCount = __countLeadingZeros32(mix(aFrac.y, aFrac.x, aFrac.y == 0u));
+ shiftCount -= mix(40, 8, aFrac.y == 0u);
+
+ if (0 <= shiftCount) {
+ __shortShift64Left(aFrac.y, aFrac.x, shiftCount, aFrac.y, aFrac.x);
+ bool is_zero = (aFrac.y | aFrac.x) == 0u;
+ return mix(__packFloat32(aSign, 0x95 - shiftCount, aFrac.x), 0, absA == 0u);
+ }
+
+ shiftCount += 7;
+ __shift64RightJamming(aFrac.y, aFrac.x, -shiftCount, aFrac.y, aFrac.x);
+ zFrac = mix(aFrac.x<<shiftCount, aFrac.x, shiftCount < 0);
+ return __roundAndPackFloat32(aSign, 0x9C - shiftCount, zFrac);
+}
+
+/* Returns the result of converting the single-precision floating-point value
+ * `a' to the double-precision floating-point format.
+ */
+uint64_t
+__fp32_to_fp64(float f)
+{
+ uint a = floatBitsToUint(f);
+ uint aFrac = a & 0x007FFFFFu;
+ int aExp = int((a>>23) & 0xFFu);
+ uint aSign = a>>31;
+ uint zFrac0 = 0u;
+ uint zFrac1 = 0u;
+
+ if (aExp == 0xFF) {
+ if (aFrac != 0u) {
+ uint nanLo = 0u;
+ uint nanHi = a<<9;
+ __shift64Right(nanHi, nanLo, 12, nanHi, nanLo);
+ nanHi |= ((aSign<<31) | 0x7FF80000u);
+ return packUint2x32(uvec2(nanLo, nanHi));
+ }
+ return __packFloat64(aSign, 0x7FF, 0u, 0u);
+ }
+
+ if (aExp == 0) {
+ if (aFrac == 0u)
+ return __packFloat64(aSign, 0, 0u, 0u);
+ /* Normalize subnormal */
+ int shiftCount = __countLeadingZeros32(aFrac) - 8;
+ aFrac <<= shiftCount;
+ aExp = 1 - shiftCount;
+ --aExp;
+ }
+
+ __shift64Right(aFrac, 0u, 3, zFrac0, zFrac1);
+ return __packFloat64(aSign, aExp + 0x380, zFrac0, zFrac1);
+}
+
+/* Adds the 96-bit value formed by concatenating `a0', `a1', and `a2' to the
+ * 96-bit value formed by concatenating `b0', `b1', and `b2'. Addition is
+ * modulo 2^96, so any carry out is lost. The result is broken into three
+ * 32-bit pieces which are stored at the locations pointed to by `z0Ptr',
+ * `z1Ptr', and `z2Ptr'.
+ */
+void
+__add96(uint a0, uint a1, uint a2,
+ uint b0, uint b1, uint b2,
+ out uint z0Ptr,
+ out uint z1Ptr,
+ out uint z2Ptr)
+{
+ uint z2 = a2 + b2;
+ uint carry1 = uint(z2 < a2);
+ uint z1 = a1 + b1;
+ uint carry0 = uint(z1 < a1);
+ uint z0 = a0 + b0;
+ z1 += carry1;
+ z0 += uint(z1 < carry1);
+ z0 += carry0;
+ z2Ptr = z2;
+ z1Ptr = z1;
+ z0Ptr = z0;
+}
+
+/* Subtracts the 96-bit value formed by concatenating `b0', `b1', and `b2' from
+ * the 96-bit value formed by concatenating `a0', `a1', and `a2'. Subtraction
+ * is modulo 2^96, so any borrow out (carry out) is lost. The result is broken
+ * into three 32-bit pieces which are stored at the locations pointed to by
+ * `z0Ptr', `z1Ptr', and `z2Ptr'.
+ */
+void
+__sub96(uint a0, uint a1, uint a2,
+ uint b0, uint b1, uint b2,
+ out uint z0Ptr,
+ out uint z1Ptr,
+ out uint z2Ptr)
+{
+ uint z2 = a2 - b2;
+ uint borrow1 = uint(a2 < b2);
+ uint z1 = a1 - b1;
+ uint borrow0 = uint(a1 < b1);
+ uint z0 = a0 - b0;
+ z0 -= uint(z1 < borrow1);
+ z1 -= borrow1;
+ z0 -= borrow0;
+ z2Ptr = z2;
+ z1Ptr = z1;
+ z0Ptr = z0;
+}
+
+/* Returns an approximation to the 32-bit integer quotient obtained by dividing
+ * `b' into the 64-bit value formed by concatenating `a0' and `a1'. The
+ * divisor `b' must be at least 2^31. If q is the exact quotient truncated
+ * toward zero, the approximation returned lies between q and q + 2 inclusive.
+ * If the exact quotient q is larger than 32 bits, the maximum positive 32-bit
+ * unsigned integer is returned.
+ */
+uint
+__estimateDiv64To32(uint a0, uint a1, uint b)
+{
+ uint b0;
+ uint b1;
+ uint rem0 = 0u;
+ uint rem1 = 0u;
+ uint term0 = 0u;
+ uint term1 = 0u;
+ uint z;
+
+ if (b <= a0)
+ return 0xFFFFFFFFu;
+ b0 = b>>16;
+ z = (b0<<16 <= a0) ? 0xFFFF0000u : (a0 / b0)<<16;
+ __mul32To64(b, z, term0, term1);
+ __sub64(a0, a1, term0, term1, rem0, rem1);
+ while (int(rem0) < 0) {
+ z -= 0x10000u;
+ b1 = b<<16;
+ __add64(rem0, rem1, b0, b1, rem0, rem1);
+ }
+ rem0 = (rem0<<16) | (rem1>>16);
+ z |= (b0<<16 <= rem0) ? 0xFFFFu : rem0 / b0;
+ return z;
+}
+
+uint
+__sqrtOddAdjustments(int index)
+{
+ uint res = 0u;
+ if (index == 0)
+ res = 0x0004u;
+ if (index == 1)
+ res = 0x0022u;
+ if (index == 2)
+ res = 0x005Du;
+ if (index == 3)
+ res = 0x00B1u;
+ if (index == 4)
+ res = 0x011Du;
+ if (index == 5)
+ res = 0x019Fu;
+ if (index == 6)
+ res = 0x0236u;
+ if (index == 7)
+ res = 0x02E0u;
+ if (index == 8)
+ res = 0x039Cu;
+ if (index == 9)
+ res = 0x0468u;
+ if (index == 10)
+ res = 0x0545u;
+ if (index == 11)
+ res = 0x631u;
+ if (index == 12)
+ res = 0x072Bu;
+ if (index == 13)
+ res = 0x0832u;
+ if (index == 14)
+ res = 0x0946u;
+ if (index == 15)
+ res = 0x0A67u;
+
+ return res;
+}
+
+uint
+__sqrtEvenAdjustments(int index)
+{
+ uint res = 0u;
+ if (index == 0)
+ res = 0x0A2Du;
+ if (index == 1)
+ res = 0x08AFu;
+ if (index == 2)
+ res = 0x075Au;
+ if (index == 3)
+ res = 0x0629u;
+ if (index == 4)
+ res = 0x051Au;
+ if (index == 5)
+ res = 0x0429u;
+ if (index == 6)
+ res = 0x0356u;
+ if (index == 7)
+ res = 0x029Eu;
+ if (index == 8)
+ res = 0x0200u;
+ if (index == 9)
+ res = 0x0179u;
+ if (index == 10)
+ res = 0x0109u;
+ if (index == 11)
+ res = 0x00AFu;
+ if (index == 12)
+ res = 0x0068u;
+ if (index == 13)
+ res = 0x0034u;
+ if (index == 14)
+ res = 0x0012u;
+ if (index == 15)
+ res = 0x0002u;
+
+ return res;
+}
+
+/* Returns an approximation to the square root of the 32-bit significand given
+ * by `a'. Considered as an integer, `a' must be at least 2^31. If bit 0 of
+ * `aExp' (the least significant bit) is 1, the integer returned approximates
+ * 2^31*sqrt(`a'/2^31), where `a' is considered an integer. If bit 0 of `aExp'
+ * is 0, the integer returned approximates 2^31*sqrt(`a'/2^30). In either
+ * case, the approximation returned lies strictly within +/-2 of the exact
+ * value.
+ */
+uint
+__estimateSqrt32(int aExp, uint a)
+{
+ uint z;
+
+ int index = int(a>>27 & 15u);
+ if ((aExp & 1) != 0) {
+ z = 0x4000u + (a>>17) - __sqrtOddAdjustments(index);
+ z = ((a / z)<<14) + (z<<15);
+ a >>= 1;
+ } else {
+ z = 0x8000u + (a>>17) - __sqrtEvenAdjustments(index);
+ z = a / z + z;
+ z = (0x20000u <= z) ? 0xFFFF8000u : (z<<15);
+ if (z <= a)
+ return uint(int(a)>>1);
+ }
+ return ((__estimateDiv64To32(a, 0u, z))>>1) + (z>>1);
+}
+
+/* Returns the square root of the double-precision floating-point value `a'.
+ * The operation is performed according to the IEEE Standard for Floating-Point
+ * Arithmetic.
+ */
+uint64_t
+__fsqrt64(uint64_t a)
+{
+ uint zFrac0 = 0u;
+ uint zFrac1 = 0u;
+ uint zFrac2 = 0u;
+ uint doubleZFrac0 = 0u;
+ uint rem0 = 0u;
+ uint rem1 = 0u;
+ uint rem2 = 0u;
+ uint rem3 = 0u;
+ uint term0 = 0u;
+ uint term1 = 0u;
+ uint term2 = 0u;
+ uint term3 = 0u;
+ uint64_t default_nan = 0xFFFFFFFFFFFFFFFFUL;
+
+ uint aFracLo = __extractFloat64FracLo(a);
+ uint aFracHi = __extractFloat64FracHi(a);
+ int aExp = __extractFloat64Exp(a);
+ uint aSign = __extractFloat64Sign(a);
+ if (aExp == 0x7FF) {
+ if ((aFracHi | aFracLo) != 0u)
+ return __propagateFloat64NaN(a, a);
+ if (aSign == 0u)
+ return a;
+ return default_nan;
+ }
+ if (aSign != 0u) {
+ if ((uint(aExp) | aFracHi | aFracLo) == 0u)
+ return a;
+ return default_nan;
+ }
+ if (aExp == 0) {
+ if ((aFracHi | aFracLo) == 0u)
+ return __packFloat64(0u, 0, 0u, 0u);
+ __normalizeFloat64Subnormal(aFracHi, aFracLo, aExp, aFracHi, aFracLo);
+ }
+ int zExp = ((aExp - 0x3FF)>>1) + 0x3FE;
+ aFracHi |= 0x00100000u;
+ __shortShift64Left(aFracHi, aFracLo, 11, term0, term1);
+ zFrac0 = (__estimateSqrt32(aExp, term0)>>1) + 1u;
+ if (zFrac0 == 0u)
+ zFrac0 = 0x7FFFFFFFu;
+ doubleZFrac0 = zFrac0 + zFrac0;
+ __shortShift64Left(aFracHi, aFracLo, 9 - (aExp & 1), aFracHi, aFracLo);
+ __mul32To64(zFrac0, zFrac0, term0, term1);
+ __sub64(aFracHi, aFracLo, term0, term1, rem0, rem1);
+ while (int(rem0) < 0) {
+ --zFrac0;
+ doubleZFrac0 -= 2u;
+ __add64(rem0, rem1, 0u, doubleZFrac0 | 1u, rem0, rem1);
+ }
+ zFrac1 = __estimateDiv64To32(rem1, 0u, doubleZFrac0);
+ if ((zFrac1 & 0x1FFu) <= 5u) {
+ if (zFrac1 == 0u)
+ zFrac1 = 1u;
+ __mul32To64(doubleZFrac0, zFrac1, term1, term2);
+ __sub64(rem1, 0u, term1, term2, rem1, rem2);
+ __mul32To64(zFrac1, zFrac1, term2, term3);
+ __sub96(rem1, rem2, 0u, 0u, term2, term3, rem1, rem2, rem3);
+ while (int(rem1) < 0) {
+ --zFrac1;
+ __shortShift64Left(0u, zFrac1, 1, term2, term3);
+ term3 |= 1u;
+ term2 |= doubleZFrac0;
+ __add96(rem1, rem2, rem3, 0u, term2, term3, rem1, rem2, rem3);
+ }
+ zFrac1 |= uint((rem1 | rem2 | rem3) != 0u);
+ }
+ __shift64ExtraRightJamming(zFrac0, zFrac1, 0u, 10, zFrac0, zFrac1, zFrac2);
+ return __roundAndPackFloat64(0u, zExp, zFrac0, zFrac1, zFrac2);
+}
+
+uint64_t
+__ftrunc64(uint64_t __a)
+{
+ uvec2 a = unpackUint2x32(__a);
+ int aExp = __extractFloat64Exp(__a);
+ uint zLo;
+ uint zHi;
+
+ int unbiasedExp = aExp - 1023;
+ int fracBits = 52 - unbiasedExp;
+ uint maskLo = mix(~0u << fracBits, 0u, fracBits >= 32);
+ uint maskHi = mix(~0u << (fracBits - 32), ~0u, fracBits < 33);
+ zLo = maskLo & a.x;
+ zHi = maskHi & a.y;
+
+ zLo = mix(zLo, 0u, unbiasedExp < 0);
+ zHi = mix(zHi, 0u, unbiasedExp < 0);
+ zLo = mix(zLo, a.x, unbiasedExp > 52);
+ zHi = mix(zHi, a.y, unbiasedExp > 52);
+ return packUint2x32(uvec2(zLo, zHi));
+}
+
+uint64_t
+__ffloor64(uint64_t a)
+{
+ bool is_positive = __fge64(a, 0ul);
+ uint64_t tr = __ftrunc64(a);
+
+ if (is_positive || __feq64(tr, a)) {
+ return tr;
+ } else {
+ return __fadd64(tr, 0xbff0000000000000ul /* -1.0 */);
+ }
+}
+
+uint64_t
+__fround64(uint64_t __a)
+{
+ uvec2 a = unpackUint2x32(__a);
+ int unbiasedExp = __extractFloat64Exp(__a) - 1023;
+ uint aHi = a.y;
+ uint aLo = a.x;
+
+ if (unbiasedExp < 20) {
+ if (unbiasedExp < 0) {
+ aHi &= 0x80000000u;
+ if (unbiasedExp == -1 && aLo != 0u)
+ aHi |= (1023u << 20);
+ aLo = 0u;
+ } else {
+ uint maskExp = 0x000FFFFFu >> unbiasedExp;
+ /* a is an integral value */
+ if (((aHi & maskExp) == 0u) && (aLo == 0u))
+ return __a;
+
+ aHi += 0x00080000u >> unbiasedExp;
+ aHi &= ~maskExp;
+ aLo = 0u;
+ }
+ } else if (unbiasedExp > 51 || unbiasedExp == 1024) {
+ return __a;
+ } else {
+ uint maskExp = 0xFFFFFFFFu >> (unbiasedExp - 20);
+ if ((aLo & maskExp) == 0u)
+ return __a;
+ uint tmp = aLo + (1u << (51 - unbiasedExp));
+ if(tmp < aLo)
+ aHi += 1u;
+ aLo = tmp;
+ aLo &= ~maskExp;
+ }
+
+ a.x = aLo;
+ a.y = aHi;
+ return packUint2x32(a);
+}
+
+uint64_t
+__fmin64(uint64_t a, uint64_t b)
+{
+ if (__is_nan(a)) return b;
+ if (__is_nan(b)) return a;
+
+ if (__flt64_nonnan(a, b)) return a;
+ return b;
+}
+
+uint64_t
+__fmax64(uint64_t a, uint64_t b)
+{
+ if (__is_nan(a)) return b;
+ if (__is_nan(b)) return a;
+
+ if (__flt64_nonnan(a, b)) return b;
+ return a;
+}
+
+uint64_t
+__ffract64(uint64_t a)
+{
+ return __fadd64(a, __fneg64(__ffloor64(a)));
+}
diff --git a/lib/mesa/src/compiler/glsl/float64_glsl.h b/lib/mesa/src/compiler/glsl/float64_glsl.h
new file mode 100644
index 000000000..58b67d321
--- /dev/null
+++ b/lib/mesa/src/compiler/glsl/float64_glsl.h
@@ -0,0 +1,1743 @@
+static const char float64_source[] =
+ "/*\n"
+ " * The implementations contained in this file are heavily based on the\n"
+ " * implementations found in the Berkeley SoftFloat library. As such, they are\n"
+ " * licensed under the same 3-clause BSD license:\n"
+ " *\n"
+ " * License for Berkeley SoftFloat Release 3e\n"
+ " *\n"
+ " * John R. Hauser\n"
+ " * 2018 January 20\n"
+ " *\n"
+ " * The following applies to the whole of SoftFloat Release 3e as well as to\n"
+ " * each source file individually.\n"
+ " *\n"
+ " * Copyright 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018 The Regents of the\n"
+ " * University of California. All rights reserved.\n"
+ " *\n"
+ " * Redistribution and use in source and binary forms, with or without\n"
+ " * modification, are permitted provided that the following conditions are met:\n"
+ " *\n"
+ " * 1. Redistributions of source code must retain the above copyright notice,\n"
+ " * this list of conditions, and the following disclaimer.\n"
+ " *\n"
+ " * 2. Redistributions in binary form must reproduce the above copyright\n"
+ " * notice, this list of conditions, and the following disclaimer in the\n"
+ " * documentation and/or other materials provided with the distribution.\n"
+ " *\n"
+ " * 3. Neither the name of the University nor the names of its contributors\n"
+ " * may be used to endorse or promote products derived from this software\n"
+ " * without specific prior written permission.\n"
+ " *\n"
+ " * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS \"AS IS\", AND ANY\n"
+ " * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n"
+ " * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE\n"
+ " * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY\n"
+ " * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n"
+ " * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n"
+ " * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n"
+ " * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n"
+ " * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF\n"
+ " * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"
+ "*/\n"
+ "\n"
+ "#version 430\n"
+ "#extension GL_ARB_gpu_shader_int64 : enable\n"
+ "#extension GL_ARB_shader_bit_encoding : enable\n"
+ "#extension GL_EXT_shader_integer_mix : enable\n"
+ "#extension GL_MESA_shader_integer_functions : enable\n"
+ "\n"
+ "#pragma warning(off)\n"
+ "\n"
+ "/* Software IEEE floating-point rounding mode.\n"
+ " * GLSL spec section \"4.7.1 Range and Precision\":\n"
+ " * The rounding mode cannot be set and is undefined.\n"
+ " * But here, we are able to define the rounding mode at the compilation time.\n"
+ " */\n"
+ "#define FLOAT_ROUND_NEAREST_EVEN 0\n"
+ "#define FLOAT_ROUND_TO_ZERO 1\n"
+ "#define FLOAT_ROUND_DOWN 2\n"
+ "#define FLOAT_ROUND_UP 3\n"
+ "#define FLOAT_ROUNDING_MODE FLOAT_ROUND_NEAREST_EVEN\n"
+ "\n"
+ "/* Absolute value of a Float64 :\n"
+ " * Clear the sign bit\n"
+ " */\n"
+ "uint64_t\n"
+ "__fabs64(uint64_t __a)\n"
+ "{\n"
+ " uvec2 a = unpackUint2x32(__a);\n"
+ " a.y &= 0x7FFFFFFFu;\n"
+ " return packUint2x32(a);\n"
+ "}\n"
+ "\n"
+ "/* Returns 1 if the double-precision floating-point value `a' is a NaN;\n"
+ " * otherwise returns 0.\n"
+ " */\n"
+ "bool\n"
+ "__is_nan(uint64_t __a)\n"
+ "{\n"
+ " uvec2 a = unpackUint2x32(__a);\n"
+ " return (0xFFE00000u <= (a.y<<1)) &&\n"
+ " ((a.x != 0u) || ((a.y & 0x000FFFFFu) != 0u));\n"
+ "}\n"
+ "\n"
+ "/* Negate value of a Float64 :\n"
+ " * Toggle the sign bit\n"
+ " */\n"
+ "uint64_t\n"
+ "__fneg64(uint64_t __a)\n"
+ "{\n"
+ " uvec2 a = unpackUint2x32(__a);\n"
+ " uint t = a.y;\n"
+ "\n"
+ " t ^= (1u << 31);\n"
+ " a.y = mix(t, a.y, __is_nan(__a));\n"
+ " return packUint2x32(a);\n"
+ "}\n"
+ "\n"
+ "uint64_t\n"
+ "__fsign64(uint64_t __a)\n"
+ "{\n"
+ " uvec2 a = unpackUint2x32(__a);\n"
+ " uvec2 retval;\n"
+ " retval.x = 0u;\n"
+ " retval.y = mix((a.y & 0x80000000u) | 0x3FF00000u, 0u, (a.y << 1 | a.x) == 0u);\n"
+ " return packUint2x32(retval);\n"
+ "}\n"
+ "\n"
+ "/* Returns the fraction bits of the double-precision floating-point value `a'.*/\n"
+ "uint\n"
+ "__extractFloat64FracLo(uint64_t a)\n"
+ "{\n"
+ " return unpackUint2x32(a).x;\n"
+ "}\n"
+ "\n"
+ "uint\n"
+ "__extractFloat64FracHi(uint64_t a)\n"
+ "{\n"
+ " return unpackUint2x32(a).y & 0x000FFFFFu;\n"
+ "}\n"
+ "\n"
+ "/* Returns the exponent bits of the double-precision floating-point value `a'.*/\n"
+ "int\n"
+ "__extractFloat64Exp(uint64_t __a)\n"
+ "{\n"
+ " uvec2 a = unpackUint2x32(__a);\n"
+ " return int((a.y>>20) & 0x7FFu);\n"
+ "}\n"
+ "\n"
+ "bool\n"
+ "__feq64_nonnan(uint64_t __a, uint64_t __b)\n"
+ "{\n"
+ " uvec2 a = unpackUint2x32(__a);\n"
+ " uvec2 b = unpackUint2x32(__b);\n"
+ " return (a.x == b.x) &&\n"
+ " ((a.y == b.y) || ((a.x == 0u) && (((a.y | b.y)<<1) == 0u)));\n"
+ "}\n"
+ "\n"
+ "/* Returns true if the double-precision floating-point value `a' is equal to the\n"
+ " * corresponding value `b', and false otherwise. The comparison is performed\n"
+ " * according to the IEEE Standard for Floating-Point Arithmetic.\n"
+ " */\n"
+ "bool\n"
+ "__feq64(uint64_t a, uint64_t b)\n"
+ "{\n"
+ " if (__is_nan(a) || __is_nan(b))\n"
+ " return false;\n"
+ "\n"
+ " return __feq64_nonnan(a, b);\n"
+ "}\n"
+ "\n"
+ "/* Returns true if the double-precision floating-point value `a' is not equal\n"
+ " * to the corresponding value `b', and false otherwise. The comparison is\n"
+ " * performed according to the IEEE Standard for Floating-Point Arithmetic.\n"
+ " */\n"
+ "bool\n"
+ "__fne64(uint64_t a, uint64_t b)\n"
+ "{\n"
+ " if (__is_nan(a) || __is_nan(b))\n"
+ " return true;\n"
+ "\n"
+ " return !__feq64_nonnan(a, b);\n"
+ "}\n"
+ "\n"
+ "/* Returns the sign bit of the double-precision floating-point value `a'.*/\n"
+ "uint\n"
+ "__extractFloat64Sign(uint64_t a)\n"
+ "{\n"
+ " return unpackUint2x32(a).y >> 31;\n"
+ "}\n"
+ "\n"
+ "/* Returns true if the 64-bit value formed by concatenating `a0' and `a1' is less\n"
+ " * than the 64-bit value formed by concatenating `b0' and `b1'. Otherwise,\n"
+ " * returns false.\n"
+ " */\n"
+ "bool\n"
+ "lt64(uint a0, uint a1, uint b0, uint b1)\n"
+ "{\n"
+ " return (a0 < b0) || ((a0 == b0) && (a1 < b1));\n"
+ "}\n"
+ "\n"
+ "bool\n"
+ "__flt64_nonnan(uint64_t __a, uint64_t __b)\n"
+ "{\n"
+ " uvec2 a = unpackUint2x32(__a);\n"
+ " uvec2 b = unpackUint2x32(__b);\n"
+ " uint aSign = __extractFloat64Sign(__a);\n"
+ " uint bSign = __extractFloat64Sign(__b);\n"
+ " if (aSign != bSign)\n"
+ " return (aSign != 0u) && ((((a.y | b.y)<<1) | a.x | b.x) != 0u);\n"
+ "\n"
+ " return mix(lt64(a.y, a.x, b.y, b.x), lt64(b.y, b.x, a.y, a.x), aSign != 0u);\n"
+ "}\n"
+ "\n"
+ "/* Returns true if the double-precision floating-point value `a' is less than\n"
+ " * the corresponding value `b', and false otherwise. The comparison is performed\n"
+ " * according to the IEEE Standard for Floating-Point Arithmetic.\n"
+ " */\n"
+ "bool\n"
+ "__flt64(uint64_t a, uint64_t b)\n"
+ "{\n"
+ " if (__is_nan(a) || __is_nan(b))\n"
+ " return false;\n"
+ "\n"
+ " return __flt64_nonnan(a, b);\n"
+ "}\n"
+ "\n"
+ "/* Returns true if the double-precision floating-point value `a' is greater\n"
+ " * than or equal to * the corresponding value `b', and false otherwise. The\n"
+ " * comparison is performed * according to the IEEE Standard for Floating-Point\n"
+ " * Arithmetic.\n"
+ " */\n"
+ "bool\n"
+ "__fge64(uint64_t a, uint64_t b)\n"
+ "{\n"
+ " if (__is_nan(a) || __is_nan(b))\n"
+ " return false;\n"
+ "\n"
+ " return !__flt64_nonnan(a, b);\n"
+ "}\n"
+ "\n"
+ "/* Adds the 64-bit value formed by concatenating `a0' and `a1' to the 64-bit\n"
+ " * value formed by concatenating `b0' and `b1'. Addition is modulo 2^64, so\n"
+ " * any carry out is lost. The result is broken into two 32-bit pieces which\n"
+ " * are stored at the locations pointed to by `z0Ptr' and `z1Ptr'.\n"
+ " */\n"
+ "void\n"
+ "__add64(uint a0, uint a1, uint b0, uint b1,\n"
+ " out uint z0Ptr,\n"
+ " out uint z1Ptr)\n"
+ "{\n"
+ " uint z1 = a1 + b1;\n"
+ " z1Ptr = z1;\n"
+ " z0Ptr = a0 + b0 + uint(z1 < a1);\n"
+ "}\n"
+ "\n"
+ "\n"
+ "/* Subtracts the 64-bit value formed by concatenating `b0' and `b1' from the\n"
+ " * 64-bit value formed by concatenating `a0' and `a1'. Subtraction is modulo\n"
+ " * 2^64, so any borrow out (carry out) is lost. The result is broken into two\n"
+ " * 32-bit pieces which are stored at the locations pointed to by `z0Ptr' and\n"
+ " * `z1Ptr'.\n"
+ " */\n"
+ "void\n"
+ "__sub64(uint a0, uint a1, uint b0, uint b1,\n"
+ " out uint z0Ptr,\n"
+ " out uint z1Ptr)\n"
+ "{\n"
+ " z1Ptr = a1 - b1;\n"
+ " z0Ptr = a0 - b0 - uint(a1 < b1);\n"
+ "}\n"
+ "\n"
+ "/* Shifts the 64-bit value formed by concatenating `a0' and `a1' right by the\n"
+ " * number of bits given in `count'. If any nonzero bits are shifted off, they\n"
+ " * are \"jammed\" into the least significant bit of the result by setting the\n"
+ " * least significant bit to 1. The value of `count' can be arbitrarily large;\n"
+ " * in particular, if `count' is greater than 64, the result will be either 0\n"
+ " * or 1, depending on whether the concatenation of `a0' and `a1' is zero or\n"
+ " * nonzero. The result is broken into two 32-bit pieces which are stored at\n"
+ " * the locations pointed to by `z0Ptr' and `z1Ptr'.\n"
+ " */\n"
+ "void\n"
+ "__shift64RightJamming(uint a0,\n"
+ " uint a1,\n"
+ " int count,\n"
+ " out uint z0Ptr,\n"
+ " out uint z1Ptr)\n"
+ "{\n"
+ " uint z0;\n"
+ " uint z1;\n"
+ " int negCount = (-count) & 31;\n"
+ "\n"
+ " z0 = mix(0u, a0, count == 0);\n"
+ " z0 = mix(z0, (a0 >> count), count < 32);\n"
+ "\n"
+ " z1 = uint((a0 | a1) != 0u); /* count >= 64 */\n"
+ " uint z1_lt64 = (a0>>(count & 31)) | uint(((a0<<negCount) | a1) != 0u);\n"
+ " z1 = mix(z1, z1_lt64, count < 64);\n"
+ " z1 = mix(z1, (a0 | uint(a1 != 0u)), count == 32);\n"
+ " uint z1_lt32 = (a0<<negCount) | (a1>>count) | uint ((a1<<negCount) != 0u);\n"
+ " z1 = mix(z1, z1_lt32, count < 32);\n"
+ " z1 = mix(z1, a1, count == 0);\n"
+ " z1Ptr = z1;\n"
+ " z0Ptr = z0;\n"
+ "}\n"
+ "\n"
+ "/* Shifts the 96-bit value formed by concatenating `a0', `a1', and `a2' right\n"
+ " * by 32 _plus_ the number of bits given in `count'. The shifted result is\n"
+ " * at most 64 nonzero bits; these are broken into two 32-bit pieces which are\n"
+ " * stored at the locations pointed to by `z0Ptr' and `z1Ptr'. The bits shifted\n"
+ " * off form a third 32-bit result as follows: The _last_ bit shifted off is\n"
+ " * the most-significant bit of the extra result, and the other 31 bits of the\n"
+ " * extra result are all zero if and only if _all_but_the_last_ bits shifted off\n"
+ " * were all zero. This extra result is stored in the location pointed to by\n"
+ " * `z2Ptr'. The value of `count' can be arbitrarily large.\n"
+ " * (This routine makes more sense if `a0', `a1', and `a2' are considered\n"
+ " * to form a fixed-point value with binary point between `a1' and `a2'. This\n"
+ " * fixed-point value is shifted right by the number of bits given in `count',\n"
+ " * and the integer part of the result is returned at the locations pointed to\n"
+ " * by `z0Ptr' and `z1Ptr'. The fractional part of the result may be slightly\n"
+ " * corrupted as described above, and is returned at the location pointed to by\n"
+ " * `z2Ptr'.)\n"
+ " */\n"
+ "void\n"
+ "__shift64ExtraRightJamming(uint a0, uint a1, uint a2,\n"
+ " int count,\n"
+ " out uint z0Ptr,\n"
+ " out uint z1Ptr,\n"
+ " out uint z2Ptr)\n"
+ "{\n"
+ " uint z0 = 0u;\n"
+ " uint z1;\n"
+ " uint z2;\n"
+ " int negCount = (-count) & 31;\n"
+ "\n"
+ " z2 = mix(uint(a0 != 0u), a0, count == 64);\n"
+ " z2 = mix(z2, a0 << negCount, count < 64);\n"
+ " z2 = mix(z2, a1 << negCount, count < 32);\n"
+ "\n"
+ " z1 = mix(0u, (a0 >> (count & 31)), count < 64);\n"
+ " z1 = mix(z1, (a0<<negCount) | (a1>>count), count < 32);\n"
+ "\n"
+ " a2 = mix(a2 | a1, a2, count < 32);\n"
+ " z0 = mix(z0, a0 >> count, count < 32);\n"
+ " z2 |= uint(a2 != 0u);\n"
+ "\n"
+ " z0 = mix(z0, 0u, (count == 32));\n"
+ " z1 = mix(z1, a0, (count == 32));\n"
+ " z2 = mix(z2, a1, (count == 32));\n"
+ " z0 = mix(z0, a0, (count == 0));\n"
+ " z1 = mix(z1, a1, (count == 0));\n"
+ " z2 = mix(z2, a2, (count == 0));\n"
+ " z2Ptr = z2;\n"
+ " z1Ptr = z1;\n"
+ " z0Ptr = z0;\n"
+ "}\n"
+ "\n"
+ "/* Shifts the 64-bit value formed by concatenating `a0' and `a1' left by the\n"
+ " * number of bits given in `count'. Any bits shifted off are lost. The value\n"
+ " * of `count' must be less than 32. The result is broken into two 32-bit\n"
+ " * pieces which are stored at the locations pointed to by `z0Ptr' and `z1Ptr'.\n"
+ " */\n"
+ "void\n"
+ "__shortShift64Left(uint a0, uint a1,\n"
+ " int count,\n"
+ " out uint z0Ptr,\n"
+ " out uint z1Ptr)\n"
+ "{\n"
+ " z1Ptr = a1<<count;\n"
+ " z0Ptr = mix((a0 << count | (a1 >> ((-count) & 31))), a0, count == 0);\n"
+ "}\n"
+ "\n"
+ "/* Packs the sign `zSign', the exponent `zExp', and the significand formed by\n"
+ " * the concatenation of `zFrac0' and `zFrac1' into a double-precision floating-\n"
+ " * point value, returning the result. After being shifted into the proper\n"
+ " * positions, the three fields `zSign', `zExp', and `zFrac0' are simply added\n"
+ " * together to form the most significant 32 bits of the result. This means\n"
+ " * that any integer portion of `zFrac0' will be added into the exponent. Since\n"
+ " * a properly normalized significand will have an integer portion equal to 1,\n"
+ " * the `zExp' input should be 1 less than the desired result exponent whenever\n"
+ " * `zFrac0' and `zFrac1' concatenated form a complete, normalized significand.\n"
+ " */\n"
+ "uint64_t\n"
+ "__packFloat64(uint zSign, int zExp, uint zFrac0, uint zFrac1)\n"
+ "{\n"
+ " uvec2 z;\n"
+ "\n"
+ " z.y = (zSign << 31) + (uint(zExp) << 20) + zFrac0;\n"
+ " z.x = zFrac1;\n"
+ " return packUint2x32(z);\n"
+ "}\n"
+ "\n"
+ "/* Takes an abstract floating-point value having sign `zSign', exponent `zExp',\n"
+ " * and extended significand formed by the concatenation of `zFrac0', `zFrac1',\n"
+ " * and `zFrac2', and returns the proper double-precision floating-point value\n"
+ " * corresponding to the abstract input. Ordinarily, the abstract value is\n"
+ " * simply rounded and packed into the double-precision format, with the inexact\n"
+ " * exception raised if the abstract input cannot be represented exactly.\n"
+ " * However, if the abstract value is too large, the overflow and inexact\n"
+ " * exceptions are raised and an infinity or maximal finite value is returned.\n"
+ " * If the abstract value is too small, the input value is rounded to a\n"
+ " * subnormal number, and the underflow and inexact exceptions are raised if the\n"
+ " * abstract input cannot be represented exactly as a subnormal double-precision\n"
+ " * floating-point number.\n"
+ " * The input significand must be normalized or smaller. If the input\n"
+ " * significand is not normalized, `zExp' must be 0; in that case, the result\n"
+ " * returned is a subnormal number, and it must not require rounding. In the\n"
+ " * usual case that the input significand is normalized, `zExp' must be 1 less\n"
+ " * than the \"true\" floating-point exponent. The handling of underflow and\n"
+ " * overflow follows the IEEE Standard for Floating-Point Arithmetic.\n"
+ " */\n"
+ "uint64_t\n"
+ "__roundAndPackFloat64(uint zSign,\n"
+ " int zExp,\n"
+ " uint zFrac0,\n"
+ " uint zFrac1,\n"
+ " uint zFrac2)\n"
+ "{\n"
+ " bool roundNearestEven;\n"
+ " bool increment;\n"
+ "\n"
+ " roundNearestEven = FLOAT_ROUNDING_MODE == FLOAT_ROUND_NEAREST_EVEN;\n"
+ " increment = int(zFrac2) < 0;\n"
+ " if (!roundNearestEven) {\n"
+ " if (FLOAT_ROUNDING_MODE == FLOAT_ROUND_TO_ZERO) {\n"
+ " increment = false;\n"
+ " } else {\n"
+ " if (zSign != 0u) {\n"
+ " increment = (FLOAT_ROUNDING_MODE == FLOAT_ROUND_DOWN) &&\n"
+ " (zFrac2 != 0u);\n"
+ " } else {\n"
+ " increment = (FLOAT_ROUNDING_MODE == FLOAT_ROUND_UP) &&\n"
+ " (zFrac2 != 0u);\n"
+ " }\n"
+ " }\n"
+ " }\n"
+ " if (0x7FD <= zExp) {\n"
+ " if ((0x7FD < zExp) ||\n"
+ " ((zExp == 0x7FD) &&\n"
+ " (0x001FFFFFu == zFrac0 && 0xFFFFFFFFu == zFrac1) &&\n"
+ " increment)) {\n"
+ " if ((FLOAT_ROUNDING_MODE == FLOAT_ROUND_TO_ZERO) ||\n"
+ " ((zSign != 0u) && (FLOAT_ROUNDING_MODE == FLOAT_ROUND_UP)) ||\n"
+ " ((zSign == 0u) && (FLOAT_ROUNDING_MODE == FLOAT_ROUND_DOWN))) {\n"
+ " return __packFloat64(zSign, 0x7FE, 0x000FFFFFu, 0xFFFFFFFFu);\n"
+ " }\n"
+ " return __packFloat64(zSign, 0x7FF, 0u, 0u);\n"
+ " }\n"
+ " if (zExp < 0) {\n"
+ " __shift64ExtraRightJamming(\n"
+ " zFrac0, zFrac1, zFrac2, -zExp, zFrac0, zFrac1, zFrac2);\n"
+ " zExp = 0;\n"
+ " if (roundNearestEven) {\n"
+ " increment = zFrac2 < 0u;\n"
+ " } else {\n"
+ " if (zSign != 0u) {\n"
+ " increment = (FLOAT_ROUNDING_MODE == FLOAT_ROUND_DOWN) &&\n"
+ " (zFrac2 != 0u);\n"
+ " } else {\n"
+ " increment = (FLOAT_ROUNDING_MODE == FLOAT_ROUND_UP) &&\n"
+ " (zFrac2 != 0u);\n"
+ " }\n"
+ " }\n"
+ " }\n"
+ " }\n"
+ " if (increment) {\n"
+ " __add64(zFrac0, zFrac1, 0u, 1u, zFrac0, zFrac1);\n"
+ " zFrac1 &= ~((zFrac2 + uint(zFrac2 == 0u)) & uint(roundNearestEven));\n"
+ " } else {\n"
+ " zExp = mix(zExp, 0, (zFrac0 | zFrac1) == 0u);\n"
+ " }\n"
+ " return __packFloat64(zSign, zExp, zFrac0, zFrac1);\n"
+ "}\n"
+ "\n"
+ "uint64_t\n"
+ "__roundAndPackUInt64(uint zSign, uint zFrac0, uint zFrac1, uint zFrac2)\n"
+ "{\n"
+ " bool roundNearestEven;\n"
+ " bool increment;\n"
+ " uint64_t default_nan = 0xFFFFFFFFFFFFFFFFUL;\n"
+ "\n"
+ " roundNearestEven = FLOAT_ROUNDING_MODE == FLOAT_ROUND_NEAREST_EVEN;\n"
+ "\n"
+ " if (zFrac2 >= 0x80000000u)\n"
+ " increment = false;\n"
+ "\n"
+ " if (!roundNearestEven) {\n"
+ " if (zSign != 0u) {\n"
+ " if ((FLOAT_ROUNDING_MODE == FLOAT_ROUND_DOWN) && (zFrac2 != 0u)) {\n"
+ " increment = false;\n"
+ " }\n"
+ " } else {\n"
+ " increment = (FLOAT_ROUNDING_MODE == FLOAT_ROUND_UP) &&\n"
+ " (zFrac2 != 0u);\n"
+ " }\n"
+ " }\n"
+ "\n"
+ " if (increment) {\n"
+ " __add64(zFrac0, zFrac1, 0u, 1u, zFrac0, zFrac1);\n"
+ " if ((zFrac0 | zFrac1) != 0u)\n"
+ " zFrac1 &= ~(1u) + uint(zFrac2 == 0u) & uint(roundNearestEven);\n"
+ " }\n"
+ " return mix(packUint2x32(uvec2(zFrac1, zFrac0)), default_nan,\n"
+ " (zSign !=0u && (zFrac0 | zFrac1) != 0u));\n"
+ "}\n"
+ "\n"
+ "int64_t\n"
+ "__roundAndPackInt64(uint zSign, uint zFrac0, uint zFrac1, uint zFrac2)\n"
+ "{\n"
+ " bool roundNearestEven;\n"
+ " bool increment;\n"
+ " int64_t default_NegNaN = -0x7FFFFFFFFFFFFFFEL;\n"
+ " int64_t default_PosNaN = 0xFFFFFFFFFFFFFFFFL;\n"
+ "\n"
+ " roundNearestEven = FLOAT_ROUNDING_MODE == FLOAT_ROUND_NEAREST_EVEN;\n"
+ "\n"
+ " if (zFrac2 >= 0x80000000u)\n"
+ " increment = false;\n"
+ "\n"
+ " if (!roundNearestEven) {\n"
+ " if (zSign != 0u) {\n"
+ " increment = ((FLOAT_ROUNDING_MODE == FLOAT_ROUND_DOWN) &&\n"
+ " (zFrac2 != 0u));\n"
+ " } else {\n"
+ " increment = (FLOAT_ROUNDING_MODE == FLOAT_ROUND_UP) &&\n"
+ " (zFrac2 != 0u);\n"
+ " }\n"
+ " }\n"
+ "\n"
+ " if (increment) {\n"
+ " __add64(zFrac0, zFrac1, 0u, 1u, zFrac0, zFrac1);\n"
+ " if ((zFrac0 | zFrac1) != 0u)\n"
+ " zFrac1 &= ~(1u) + uint(zFrac2 == 0u) & uint(roundNearestEven);\n"
+ " }\n"
+ "\n"
+ " int64_t absZ = mix(int64_t(packUint2x32(uvec2(zFrac1, zFrac0))),\n"
+ " -int64_t(packUint2x32(uvec2(zFrac1, zFrac0))),\n"
+ " (zSign != 0u));\n"
+ " int64_t nan = mix(default_PosNaN, default_NegNaN, bool(zSign));\n"
+ " return mix(absZ, nan, bool(zSign ^ uint(absZ < 0)) && bool(absZ));\n"
+ "}\n"
+ "\n"
+ "/* Returns the number of leading 0 bits before the most-significant 1 bit of\n"
+ " * `a'. If `a' is zero, 32 is returned.\n"
+ " */\n"
+ "int\n"
+ "__countLeadingZeros32(uint a)\n"
+ "{\n"
+ " int shiftCount;\n"
+ " shiftCount = mix(31 - findMSB(a), 32, a == 0u);\n"
+ " return shiftCount;\n"
+ "}\n"
+ "\n"
+ "/* Takes an abstract floating-point value having sign `zSign', exponent `zExp',\n"
+ " * and significand formed by the concatenation of `zSig0' and `zSig1', and\n"
+ " * returns the proper double-precision floating-point value corresponding\n"
+ " * to the abstract input. This routine is just like `__roundAndPackFloat64'\n"
+ " * except that the input significand has fewer bits and does not have to be\n"
+ " * normalized. In all cases, `zExp' must be 1 less than the \"true\" floating-\n"
+ " * point exponent.\n"
+ " */\n"
+ "uint64_t\n"
+ "__normalizeRoundAndPackFloat64(uint zSign,\n"
+ " int zExp,\n"
+ " uint zFrac0,\n"
+ " uint zFrac1)\n"
+ "{\n"
+ " int shiftCount;\n"
+ " uint zFrac2;\n"
+ "\n"
+ " if (zFrac0 == 0u) {\n"
+ " zExp -= 32;\n"
+ " zFrac0 = zFrac1;\n"
+ " zFrac1 = 0u;\n"
+ " }\n"
+ "\n"
+ " shiftCount = __countLeadingZeros32(zFrac0) - 11;\n"
+ " if (0 <= shiftCount) {\n"
+ " zFrac2 = 0u;\n"
+ " __shortShift64Left(zFrac0, zFrac1, shiftCount, zFrac0, zFrac1);\n"
+ " } else {\n"
+ " __shift64ExtraRightJamming(\n"
+ " zFrac0, zFrac1, 0u, -shiftCount, zFrac0, zFrac1, zFrac2);\n"
+ " }\n"
+ " zExp -= shiftCount;\n"
+ " return __roundAndPackFloat64(zSign, zExp, zFrac0, zFrac1, zFrac2);\n"
+ "}\n"
+ "\n"
+ "/* Takes two double-precision floating-point values `a' and `b', one of which\n"
+ " * is a NaN, and returns the appropriate NaN result.\n"
+ " */\n"
+ "uint64_t\n"
+ "__propagateFloat64NaN(uint64_t __a, uint64_t __b)\n"
+ "{\n"
+ " bool aIsNaN = __is_nan(__a);\n"
+ " bool bIsNaN = __is_nan(__b);\n"
+ " uvec2 a = unpackUint2x32(__a);\n"
+ " uvec2 b = unpackUint2x32(__b);\n"
+ " a.y |= 0x00080000u;\n"
+ " b.y |= 0x00080000u;\n"
+ "\n"
+ " return packUint2x32(mix(b, mix(a, b, bvec2(bIsNaN, bIsNaN)), bvec2(aIsNaN, aIsNaN)));\n"
+ "}\n"
+ "\n"
+ "/* Returns the result of adding the double-precision floating-point values\n"
+ " * `a' and `b'. The operation is performed according to the IEEE Standard for\n"
+ " * Floating-Point Arithmetic.\n"
+ " */\n"
+ "uint64_t\n"
+ "__fadd64(uint64_t a, uint64_t b)\n"
+ "{\n"
+ " uint aSign = __extractFloat64Sign(a);\n"
+ " uint bSign = __extractFloat64Sign(b);\n"
+ " uint aFracLo = __extractFloat64FracLo(a);\n"
+ " uint aFracHi = __extractFloat64FracHi(a);\n"
+ " uint bFracLo = __extractFloat64FracLo(b);\n"
+ " uint bFracHi = __extractFloat64FracHi(b);\n"
+ " int aExp = __extractFloat64Exp(a);\n"
+ " int bExp = __extractFloat64Exp(b);\n"
+ " uint zFrac0 = 0u;\n"
+ " uint zFrac1 = 0u;\n"
+ " int expDiff = aExp - bExp;\n"
+ " if (aSign == bSign) {\n"
+ " uint zFrac2 = 0u;\n"
+ " int zExp;\n"
+ " bool orig_exp_diff_is_zero = (expDiff == 0);\n"
+ "\n"
+ " if (orig_exp_diff_is_zero) {\n"
+ " if (aExp == 0x7FF) {\n"
+ " bool propagate = (aFracHi | aFracLo | bFracHi | bFracLo) != 0u;\n"
+ " return mix(a, __propagateFloat64NaN(a, b), propagate);\n"
+ " }\n"
+ " __add64(aFracHi, aFracLo, bFracHi, bFracLo, zFrac0, zFrac1);\n"
+ " if (aExp == 0)\n"
+ " return __packFloat64(aSign, 0, zFrac0, zFrac1);\n"
+ " zFrac2 = 0u;\n"
+ " zFrac0 |= 0x00200000u;\n"
+ " zExp = aExp;\n"
+ " __shift64ExtraRightJamming(\n"
+ " zFrac0, zFrac1, zFrac2, 1, zFrac0, zFrac1, zFrac2);\n"
+ " } else if (0 < expDiff) {\n"
+ " if (aExp == 0x7FF) {\n"
+ " bool propagate = (aFracHi | aFracLo) != 0u;\n"
+ " return mix(a, __propagateFloat64NaN(a, b), propagate);\n"
+ " }\n"
+ "\n"
+ " expDiff = mix(expDiff, expDiff - 1, bExp == 0);\n"
+ " bFracHi = mix(bFracHi | 0x00100000u, bFracHi, bExp == 0);\n"
+ " __shift64ExtraRightJamming(\n"
+ " bFracHi, bFracLo, 0u, expDiff, bFracHi, bFracLo, zFrac2);\n"
+ " zExp = aExp;\n"
+ " } else if (expDiff < 0) {\n"
+ " if (bExp == 0x7FF) {\n"
+ " bool propagate = (bFracHi | bFracLo) != 0u;\n"
+ " return mix(__packFloat64(aSign, 0x7ff, 0u, 0u), __propagateFloat64NaN(a, b), propagate);\n"
+ " }\n"
+ " expDiff = mix(expDiff, expDiff + 1, aExp == 0);\n"
+ " aFracHi = mix(aFracHi | 0x00100000u, aFracHi, aExp == 0);\n"
+ " __shift64ExtraRightJamming(\n"
+ " aFracHi, aFracLo, 0u, - expDiff, aFracHi, aFracLo, zFrac2);\n"
+ " zExp = bExp;\n"
+ " }\n"
+ " if (!orig_exp_diff_is_zero) {\n"
+ " aFracHi |= 0x00100000u;\n"
+ " __add64(aFracHi, aFracLo, bFracHi, bFracLo, zFrac0, zFrac1);\n"
+ " --zExp;\n"
+ " if (!(zFrac0 < 0x00200000u)) {\n"
+ " __shift64ExtraRightJamming(zFrac0, zFrac1, zFrac2, 1, zFrac0, zFrac1, zFrac2);\n"
+ " ++zExp;\n"
+ " }\n"
+ " }\n"
+ " return __roundAndPackFloat64(aSign, zExp, zFrac0, zFrac1, zFrac2);\n"
+ "\n"
+ " } else {\n"
+ " int zExp;\n"
+ "\n"
+ " __shortShift64Left(aFracHi, aFracLo, 10, aFracHi, aFracLo);\n"
+ " __shortShift64Left(bFracHi, bFracLo, 10, bFracHi, bFracLo);\n"
+ " if (0 < expDiff) {\n"
+ " if (aExp == 0x7FF) {\n"
+ " bool propagate = (aFracHi | aFracLo) != 0u;\n"
+ " return mix(a, __propagateFloat64NaN(a, b), propagate);\n"
+ " }\n"
+ " expDiff = mix(expDiff, expDiff - 1, bExp == 0);\n"
+ " bFracHi = mix(bFracHi | 0x40000000u, bFracHi, bExp == 0);\n"
+ " __shift64RightJamming(bFracHi, bFracLo, expDiff, bFracHi, bFracLo);\n"
+ " aFracHi |= 0x40000000u;\n"
+ " __sub64(aFracHi, aFracLo, bFracHi, bFracLo, zFrac0, zFrac1);\n"
+ " zExp = aExp;\n"
+ " --zExp;\n"
+ " return __normalizeRoundAndPackFloat64(aSign, zExp - 10, zFrac0, zFrac1);\n"
+ " }\n"
+ " if (expDiff < 0) {\n"
+ " if (bExp == 0x7FF) {\n"
+ " bool propagate = (bFracHi | bFracLo) != 0u;\n"
+ " return mix(__packFloat64(aSign ^ 1u, 0x7ff, 0u, 0u), __propagateFloat64NaN(a, b), propagate);\n"
+ " }\n"
+ " expDiff = mix(expDiff, expDiff + 1, aExp == 0);\n"
+ " aFracHi = mix(aFracHi | 0x40000000u, aFracHi, aExp == 0);\n"
+ " __shift64RightJamming(aFracHi, aFracLo, - expDiff, aFracHi, aFracLo);\n"
+ " bFracHi |= 0x40000000u;\n"
+ " __sub64(bFracHi, bFracLo, aFracHi, aFracLo, zFrac0, zFrac1);\n"
+ " zExp = bExp;\n"
+ " aSign ^= 1u;\n"
+ " --zExp;\n"
+ " return __normalizeRoundAndPackFloat64(aSign, zExp - 10, zFrac0, zFrac1);\n"
+ " }\n"
+ " if (aExp == 0x7FF) {\n"
+ " bool propagate = (aFracHi | aFracLo | bFracHi | bFracLo) != 0u;\n"
+ " return mix(0xFFFFFFFFFFFFFFFFUL, __propagateFloat64NaN(a, b), propagate);\n"
+ " }\n"
+ " bExp = mix(bExp, 1, aExp == 0);\n"
+ " aExp = mix(aExp, 1, aExp == 0);\n"
+ " bool zexp_normal = false;\n"
+ " bool blta = true;\n"
+ " if (bFracHi < aFracHi) {\n"
+ " __sub64(aFracHi, aFracLo, bFracHi, bFracLo, zFrac0, zFrac1);\n"
+ " zexp_normal = true;\n"
+ " }\n"
+ " else if (aFracHi < bFracHi) {\n"
+ " __sub64(bFracHi, bFracLo, aFracHi, aFracLo, zFrac0, zFrac1);\n"
+ " blta = false;\n"
+ " zexp_normal = true;\n"
+ " }\n"
+ " else if (bFracLo < aFracLo) {\n"
+ " __sub64(aFracHi, aFracLo, bFracHi, bFracLo, zFrac0, zFrac1);\n"
+ " zexp_normal = true;\n"
+ " }\n"
+ " else if (aFracLo < bFracLo) {\n"
+ " __sub64(bFracHi, bFracLo, aFracHi, aFracLo, zFrac0, zFrac1);\n"
+ " blta = false;\n"
+ " zexp_normal = true;\n"
+ " }\n"
+ " zExp = mix(bExp, aExp, blta);\n"
+ " aSign = mix(aSign ^ 1u, aSign, blta);\n"
+ " uint64_t retval_0 = __packFloat64(uint(FLOAT_ROUNDING_MODE == FLOAT_ROUND_DOWN), 0, 0u, 0u);\n"
+ " uint64_t retval_1 = __normalizeRoundAndPackFloat64(aSign, zExp - 11, zFrac0, zFrac1);\n"
+ " return mix(retval_0, retval_1, zexp_normal);\n"
+ " }\n"
+ "}\n"
+ "\n"
+ "/* Multiplies `a' by `b' to obtain a 64-bit product. The product is broken\n"
+ " * into two 32-bit pieces which are stored at the locations pointed to by\n"
+ " * `z0Ptr' and `z1Ptr'.\n"
+ " */\n"
+ "void\n"
+ "__mul32To64(uint a, uint b, out uint z0Ptr, out uint z1Ptr)\n"
+ "{\n"
+ " uint aLow = a & 0x0000FFFFu;\n"
+ " uint aHigh = a>>16;\n"
+ " uint bLow = b & 0x0000FFFFu;\n"
+ " uint bHigh = b>>16;\n"
+ " uint z1 = aLow * bLow;\n"
+ " uint zMiddleA = aLow * bHigh;\n"
+ " uint zMiddleB = aHigh * bLow;\n"
+ " uint z0 = aHigh * bHigh;\n"
+ " zMiddleA += zMiddleB;\n"
+ " z0 += ((uint(zMiddleA < zMiddleB)) << 16) + (zMiddleA >> 16);\n"
+ " zMiddleA <<= 16;\n"
+ " z1 += zMiddleA;\n"
+ " z0 += uint(z1 < zMiddleA);\n"
+ " z1Ptr = z1;\n"
+ " z0Ptr = z0;\n"
+ "}\n"
+ "\n"
+ "/* Multiplies the 64-bit value formed by concatenating `a0' and `a1' to the\n"
+ " * 64-bit value formed by concatenating `b0' and `b1' to obtain a 128-bit\n"
+ " * product. The product is broken into four 32-bit pieces which are stored at\n"
+ " * the locations pointed to by `z0Ptr', `z1Ptr', `z2Ptr', and `z3Ptr'.\n"
+ " */\n"
+ "void\n"
+ "__mul64To128(uint a0, uint a1, uint b0, uint b1,\n"
+ " out uint z0Ptr,\n"
+ " out uint z1Ptr,\n"
+ " out uint z2Ptr,\n"
+ " out uint z3Ptr)\n"
+ "{\n"
+ " uint z0 = 0u;\n"
+ " uint z1 = 0u;\n"
+ " uint z2 = 0u;\n"
+ " uint z3 = 0u;\n"
+ " uint more1 = 0u;\n"
+ " uint more2 = 0u;\n"
+ "\n"
+ " __mul32To64(a1, b1, z2, z3);\n"
+ " __mul32To64(a1, b0, z1, more2);\n"
+ " __add64(z1, more2, 0u, z2, z1, z2);\n"
+ " __mul32To64(a0, b0, z0, more1);\n"
+ " __add64(z0, more1, 0u, z1, z0, z1);\n"
+ " __mul32To64(a0, b1, more1, more2);\n"
+ " __add64(more1, more2, 0u, z2, more1, z2);\n"
+ " __add64(z0, z1, 0u, more1, z0, z1);\n"
+ " z3Ptr = z3;\n"
+ " z2Ptr = z2;\n"
+ " z1Ptr = z1;\n"
+ " z0Ptr = z0;\n"
+ "}\n"
+ "\n"
+ "/* Normalizes the subnormal double-precision floating-point value represented\n"
+ " * by the denormalized significand formed by the concatenation of `aFrac0' and\n"
+ " * `aFrac1'. The normalized exponent is stored at the location pointed to by\n"
+ " * `zExpPtr'. The most significant 21 bits of the normalized significand are\n"
+ " * stored at the location pointed to by `zFrac0Ptr', and the least significant\n"
+ " * 32 bits of the normalized significand are stored at the location pointed to\n"
+ " * by `zFrac1Ptr'.\n"
+ " */\n"
+ "void\n"
+ "__normalizeFloat64Subnormal(uint aFrac0, uint aFrac1,\n"
+ " out int zExpPtr,\n"
+ " out uint zFrac0Ptr,\n"
+ " out uint zFrac1Ptr)\n"
+ "{\n"
+ " int shiftCount;\n"
+ " uint temp_zfrac0, temp_zfrac1;\n"
+ " shiftCount = __countLeadingZeros32(mix(aFrac0, aFrac1, aFrac0 == 0u)) - 11;\n"
+ " zExpPtr = mix(1 - shiftCount, -shiftCount - 31, aFrac0 == 0u);\n"
+ "\n"
+ " temp_zfrac0 = mix(aFrac1<<shiftCount, aFrac1>>(-shiftCount), shiftCount < 0);\n"
+ " temp_zfrac1 = mix(0u, aFrac1<<(shiftCount & 31), shiftCount < 0);\n"
+ "\n"
+ " __shortShift64Left(aFrac0, aFrac1, shiftCount, zFrac0Ptr, zFrac1Ptr);\n"
+ "\n"
+ " zFrac0Ptr = mix(zFrac0Ptr, temp_zfrac0, aFrac0 == 0);\n"
+ " zFrac1Ptr = mix(zFrac1Ptr, temp_zfrac1, aFrac0 == 0);\n"
+ "}\n"
+ "\n"
+ "/* Returns the result of multiplying the double-precision floating-point values\n"
+ " * `a' and `b'. The operation is performed according to the IEEE Standard for\n"
+ " * Floating-Point Arithmetic.\n"
+ " */\n"
+ "uint64_t\n"
+ "__fmul64(uint64_t a, uint64_t b)\n"
+ "{\n"
+ " uint zFrac0 = 0u;\n"
+ " uint zFrac1 = 0u;\n"
+ " uint zFrac2 = 0u;\n"
+ " uint zFrac3 = 0u;\n"
+ " int zExp;\n"
+ "\n"
+ " uint aFracLo = __extractFloat64FracLo(a);\n"
+ " uint aFracHi = __extractFloat64FracHi(a);\n"
+ " uint bFracLo = __extractFloat64FracLo(b);\n"
+ " uint bFracHi = __extractFloat64FracHi(b);\n"
+ " int aExp = __extractFloat64Exp(a);\n"
+ " uint aSign = __extractFloat64Sign(a);\n"
+ " int bExp = __extractFloat64Exp(b);\n"
+ " uint bSign = __extractFloat64Sign(b);\n"
+ " uint zSign = aSign ^ bSign;\n"
+ " if (aExp == 0x7FF) {\n"
+ " if (((aFracHi | aFracLo) != 0u) ||\n"
+ " ((bExp == 0x7FF) && ((bFracHi | bFracLo) != 0u))) {\n"
+ " return __propagateFloat64NaN(a, b);\n"
+ " }\n"
+ " if ((uint(bExp) | bFracHi | bFracLo) == 0u)\n"
+ " return 0xFFFFFFFFFFFFFFFFUL;\n"
+ " return __packFloat64(zSign, 0x7FF, 0u, 0u);\n"
+ " }\n"
+ " if (bExp == 0x7FF) {\n"
+ " if ((bFracHi | bFracLo) != 0u)\n"
+ " return __propagateFloat64NaN(a, b);\n"
+ " if ((uint(aExp) | aFracHi | aFracLo) == 0u)\n"
+ " return 0xFFFFFFFFFFFFFFFFUL;\n"
+ " return __packFloat64(zSign, 0x7FF, 0u, 0u);\n"
+ " }\n"
+ " if (aExp == 0) {\n"
+ " if ((aFracHi | aFracLo) == 0u)\n"
+ " return __packFloat64(zSign, 0, 0u, 0u);\n"
+ " __normalizeFloat64Subnormal(aFracHi, aFracLo, aExp, aFracHi, aFracLo);\n"
+ " }\n"
+ " if (bExp == 0) {\n"
+ " if ((bFracHi | bFracLo) == 0u)\n"
+ " return __packFloat64(zSign, 0, 0u, 0u);\n"
+ " __normalizeFloat64Subnormal(bFracHi, bFracLo, bExp, bFracHi, bFracLo);\n"
+ " }\n"
+ " zExp = aExp + bExp - 0x400;\n"
+ " aFracHi |= 0x00100000u;\n"
+ " __shortShift64Left(bFracHi, bFracLo, 12, bFracHi, bFracLo);\n"
+ " __mul64To128(\n"
+ " aFracHi, aFracLo, bFracHi, bFracLo, zFrac0, zFrac1, zFrac2, zFrac3);\n"
+ " __add64(zFrac0, zFrac1, aFracHi, aFracLo, zFrac0, zFrac1);\n"
+ " zFrac2 |= uint(zFrac3 != 0u);\n"
+ " if (0x00200000u <= zFrac0) {\n"
+ " __shift64ExtraRightJamming(\n"
+ " zFrac0, zFrac1, zFrac2, 1, zFrac0, zFrac1, zFrac2);\n"
+ " ++zExp;\n"
+ " }\n"
+ " return __roundAndPackFloat64(zSign, zExp, zFrac0, zFrac1, zFrac2);\n"
+ "}\n"
+ "\n"
+ "uint64_t\n"
+ "__ffma64(uint64_t a, uint64_t b, uint64_t c)\n"
+ "{\n"
+ " return __fadd64(__fmul64(a, b), c);\n"
+ "}\n"
+ "\n"
+ "/* Shifts the 64-bit value formed by concatenating `a0' and `a1' right by the\n"
+ " * number of bits given in `count'. Any bits shifted off are lost. The value\n"
+ " * of `count' can be arbitrarily large; in particular, if `count' is greater\n"
+ " * than 64, the result will be 0. The result is broken into two 32-bit pieces\n"
+ " * which are stored at the locations pointed to by `z0Ptr' and `z1Ptr'.\n"
+ " */\n"
+ "void\n"
+ "__shift64Right(uint a0, uint a1,\n"
+ " int count,\n"
+ " out uint z0Ptr,\n"
+ " out uint z1Ptr)\n"
+ "{\n"
+ " uint z0;\n"
+ " uint z1;\n"
+ " int negCount = (-count) & 31;\n"
+ "\n"
+ " z0 = 0u;\n"
+ " z0 = mix(z0, (a0 >> count), count < 32);\n"
+ " z0 = mix(z0, a0, count == 0);\n"
+ "\n"
+ " z1 = mix(0u, (a0 >> (count & 31)), count < 64);\n"
+ " z1 = mix(z1, (a0<<negCount) | (a1>>count), count < 32);\n"
+ " z1 = mix(z1, a0, count == 0);\n"
+ "\n"
+ " z1Ptr = z1;\n"
+ " z0Ptr = z0;\n"
+ "}\n"
+ "\n"
+ "/* Returns the result of converting the double-precision floating-point value\n"
+ " * `a' to the unsigned integer format. The conversion is performed according\n"
+ " * to the IEEE Standard for Floating-Point Arithmetic.\n"
+ " */\n"
+ "uint\n"
+ "__fp64_to_uint(uint64_t a)\n"
+ "{\n"
+ " uint aFracLo = __extractFloat64FracLo(a);\n"
+ " uint aFracHi = __extractFloat64FracHi(a);\n"
+ " int aExp = __extractFloat64Exp(a);\n"
+ " uint aSign = __extractFloat64Sign(a);\n"
+ "\n"
+ " if ((aExp == 0x7FF) && ((aFracHi | aFracLo) != 0u))\n"
+ " return 0xFFFFFFFFu;\n"
+ "\n"
+ " aFracHi |= mix(0u, 0x00100000u, aExp != 0);\n"
+ "\n"
+ " int shiftDist = 0x427 - aExp;\n"
+ " if (0 < shiftDist)\n"
+ " __shift64RightJamming(aFracHi, aFracLo, shiftDist, aFracHi, aFracLo);\n"
+ "\n"
+ " if ((aFracHi & 0xFFFFF000u) != 0u)\n"
+ " return mix(~0u, 0u, (aSign != 0u));\n"
+ "\n"
+ " uint z = 0u;\n"
+ " uint zero = 0u;\n"
+ " __shift64Right(aFracHi, aFracLo, 12, zero, z);\n"
+ "\n"
+ " uint expt = mix(~0u, 0u, (aSign != 0u));\n"
+ "\n"
+ " return mix(z, expt, (aSign != 0u) && (z != 0u));\n"
+ "}\n"
+ "\n"
+ "uint64_t\n"
+ "__uint_to_fp64(uint a)\n"
+ "{\n"
+ " if (a == 0u)\n"
+ " return 0ul;\n"
+ "\n"
+ " int shiftDist = __countLeadingZeros32(a) + 21;\n"
+ "\n"
+ " uint aHigh = 0u;\n"
+ " uint aLow = 0u;\n"
+ " int negCount = (- shiftDist) & 31;\n"
+ "\n"
+ " aHigh = mix(0u, a<< shiftDist - 32, shiftDist < 64);\n"
+ " aLow = 0u;\n"
+ " aHigh = mix(aHigh, 0u, shiftDist == 0);\n"
+ " aLow = mix(aLow, a, shiftDist ==0);\n"
+ " aHigh = mix(aHigh, a >> negCount, shiftDist < 32);\n"
+ " aLow = mix(aLow, a << shiftDist, shiftDist < 32);\n"
+ "\n"
+ " return __packFloat64(0u, 0x432 - shiftDist, aHigh, aLow);\n"
+ "}\n"
+ "\n"
+ "uint64_t\n"
+ "__uint64_to_fp64(uint64_t a)\n"
+ "{\n"
+ " if (a == 0u)\n"
+ " return 0ul;\n"
+ "\n"
+ " uvec2 aFrac = unpackUint2x32(a);\n"
+ " uint aFracLo = __extractFloat64FracLo(a);\n"
+ " uint aFracHi = __extractFloat64FracHi(a);\n"
+ "\n"
+ " if ((aFracHi & 0x80000000u) != 0u) {\n"
+ " __shift64RightJamming(aFracHi, aFracLo, 1, aFracHi, aFracLo);\n"
+ " return __roundAndPackFloat64(0, 0x433, aFracHi, aFracLo, 0u);\n"
+ " } else {\n"
+ " return __normalizeRoundAndPackFloat64(0, 0x432, aFrac.y, aFrac.x);\n"
+ " }\n"
+ "}\n"
+ "\n"
+ "uint64_t\n"
+ "__fp64_to_uint64(uint64_t a)\n"
+ "{\n"
+ " uint aFracLo = __extractFloat64FracLo(a);\n"
+ " uint aFracHi = __extractFloat64FracHi(a);\n"
+ " int aExp = __extractFloat64Exp(a);\n"
+ " uint aSign = __extractFloat64Sign(a);\n"
+ " uint zFrac2 = 0u;\n"
+ " uint64_t default_nan = 0xFFFFFFFFFFFFFFFFUL;\n"
+ "\n"
+ " aFracHi = mix(aFracHi, aFracHi | 0x00100000u, aExp != 0);\n"
+ " int shiftCount = 0x433 - aExp;\n"
+ "\n"
+ " if ( shiftCount <= 0 ) {\n"
+ " if (shiftCount < -11 && aExp == 0x7FF) {\n"
+ " if ((aFracHi | aFracLo) != 0u)\n"
+ " return __propagateFloat64NaN(a, a);\n"
+ " return mix(default_nan, a, aSign == 0u);\n"
+ " }\n"
+ " __shortShift64Left(aFracHi, aFracLo, -shiftCount, aFracHi, aFracLo);\n"
+ " } else {\n"
+ " __shift64ExtraRightJamming(aFracHi, aFracLo, zFrac2, shiftCount,\n"
+ " aFracHi, aFracLo, zFrac2);\n"
+ " }\n"
+ " return __roundAndPackUInt64(aSign, aFracHi, aFracLo, zFrac2);\n"
+ "}\n"
+ "\n"
+ "int64_t\n"
+ "__fp64_to_int64(uint64_t a)\n"
+ "{\n"
+ " uint zFrac2 = 0u;\n"
+ " uint aFracLo = __extractFloat64FracLo(a);\n"
+ " uint aFracHi = __extractFloat64FracHi(a);\n"
+ " int aExp = __extractFloat64Exp(a);\n"
+ " uint aSign = __extractFloat64Sign(a);\n"
+ " int64_t default_NegNaN = -0x7FFFFFFFFFFFFFFEL;\n"
+ " int64_t default_PosNaN = 0xFFFFFFFFFFFFFFFFL;\n"
+ "\n"
+ " aFracHi = mix(aFracHi, aFracHi | 0x00100000u, aExp != 0);\n"
+ " int shiftCount = 0x433 - aExp;\n"
+ "\n"
+ " if (shiftCount <= 0) {\n"
+ " if (shiftCount < -11 && aExp == 0x7FF) {\n"
+ " if ((aFracHi | aFracLo) != 0u)\n"
+ " return default_NegNaN;\n"
+ " return mix(default_NegNaN, default_PosNaN, aSign == 0u);\n"
+ " }\n"
+ " __shortShift64Left(aFracHi, aFracLo, -shiftCount, aFracHi, aFracLo);\n"
+ " } else {\n"
+ " __shift64ExtraRightJamming(aFracHi, aFracLo, zFrac2, shiftCount,\n"
+ " aFracHi, aFracLo, zFrac2);\n"
+ " }\n"
+ "\n"
+ " return __roundAndPackInt64(aSign, aFracHi, aFracLo, zFrac2);\n"
+ "}\n"
+ "\n"
+ "uint64_t\n"
+ "__fp32_to_uint64(float f)\n"
+ "{\n"
+ " uint a = floatBitsToUint(f);\n"
+ " uint aFrac = a & 0x007FFFFFu;\n"
+ " int aExp = int((a>>23) & 0xFFu);\n"
+ " uint aSign = a>>31;\n"
+ " uint zFrac0 = 0u;\n"
+ " uint zFrac1 = 0u;\n"
+ " uint zFrac2 = 0u;\n"
+ " uint64_t default_nan = 0xFFFFFFFFFFFFFFFFUL;\n"
+ " int shiftCount = 0xBE - aExp;\n"
+ "\n"
+ " if (shiftCount <0) {\n"
+ " if (aExp == 0xFF)\n"
+ " return default_nan;\n"
+ " }\n"
+ "\n"
+ " aFrac = mix(aFrac, aFrac | 0x00800000u, aExp != 0);\n"
+ " __shortShift64Left(aFrac, 0, 40, zFrac0, zFrac1);\n"
+ "\n"
+ " if (shiftCount != 0) {\n"
+ " __shift64ExtraRightJamming(zFrac0, zFrac1, zFrac2, shiftCount,\n"
+ " zFrac0, zFrac1, zFrac2);\n"
+ " }\n"
+ "\n"
+ " return __roundAndPackUInt64(aSign, zFrac0, zFrac1, zFrac2);\n"
+ "}\n"
+ "\n"
+ "int64_t\n"
+ "__fp32_to_int64(float f)\n"
+ "{\n"
+ " uint a = floatBitsToUint(f);\n"
+ " uint aFrac = a & 0x007FFFFFu;\n"
+ " int aExp = int((a>>23) & 0xFFu);\n"
+ " uint aSign = a>>31;\n"
+ " uint zFrac0 = 0u;\n"
+ " uint zFrac1 = 0u;\n"
+ " uint zFrac2 = 0u;\n"
+ " int64_t default_NegNaN = -0x7FFFFFFFFFFFFFFEL;\n"
+ " int64_t default_PosNaN = 0xFFFFFFFFFFFFFFFFL;\n"
+ " int shiftCount = 0xBE - aExp;\n"
+ "\n"
+ " if (shiftCount <0) {\n"
+ " if (aExp == 0xFF && aFrac != 0u)\n"
+ " return default_NegNaN;\n"
+ " return mix(default_NegNaN, default_PosNaN, aSign == 0u);\n"
+ " }\n"
+ "\n"
+ " aFrac = mix(aFrac, aFrac | 0x00800000u, aExp != 0);\n"
+ " __shortShift64Left(aFrac, 0, 40, zFrac0, zFrac1);\n"
+ "\n"
+ " if (shiftCount != 0) {\n"
+ " __shift64ExtraRightJamming(zFrac0, zFrac1, zFrac2, shiftCount,\n"
+ " zFrac0, zFrac1, zFrac2);\n"
+ " }\n"
+ "\n"
+ " return __roundAndPackInt64(aSign, zFrac0, zFrac1, zFrac2);\n"
+ "}\n"
+ "\n"
+ "uint64_t\n"
+ "__int64_to_fp64(int64_t a)\n"
+ "{\n"
+ " if (a==0)\n"
+ " return 0ul;\n"
+ "\n"
+ " uint64_t absA = mix(uint64_t(a), uint64_t(-a), a < 0);\n"
+ " uint aFracHi = __extractFloat64FracHi(absA);\n"
+ " uvec2 aFrac = unpackUint2x32(absA);\n"
+ " uint zSign = uint(a < 0);\n"
+ "\n"
+ " if ((aFracHi & 0x80000000u) != 0u) {\n"
+ " return mix(0ul, __packFloat64(1, 0x434, 0u, 0u), a < 0);\n"
+ " }\n"
+ "\n"
+ " return __normalizeRoundAndPackFloat64(zSign, 0x432, aFrac.y, aFrac.x);\n"
+ "}\n"
+ "\n"
+ "/* Returns the result of converting the double-precision floating-point value\n"
+ " * `a' to the 32-bit two's complement integer format. The conversion is\n"
+ " * performed according to the IEEE Standard for Floating-Point Arithmetic---\n"
+ " * which means in particular that the conversion is rounded according to the\n"
+ " * current rounding mode. If `a' is a NaN, the largest positive integer is\n"
+ " * returned. Otherwise, if the conversion overflows, the largest integer with\n"
+ " * the same sign as `a' is returned.\n"
+ " */\n"
+ "int\n"
+ "__fp64_to_int(uint64_t a)\n"
+ "{\n"
+ " uint aFracLo = __extractFloat64FracLo(a);\n"
+ " uint aFracHi = __extractFloat64FracHi(a);\n"
+ " int aExp = __extractFloat64Exp(a);\n"
+ " uint aSign = __extractFloat64Sign(a);\n"
+ "\n"
+ " uint absZ = 0u;\n"
+ " uint aFracExtra = 0u;\n"
+ " int shiftCount = aExp - 0x413;\n"
+ "\n"
+ " if (0 <= shiftCount) {\n"
+ " if (0x41E < aExp) {\n"
+ " if ((aExp == 0x7FF) && bool(aFracHi | aFracLo))\n"
+ " aSign = 0u;\n"
+ " return mix(0x7FFFFFFF, 0x80000000, bool(aSign));\n"
+ " }\n"
+ " __shortShift64Left(aFracHi | 0x00100000u, aFracLo, shiftCount, absZ, aFracExtra);\n"
+ " } else {\n"
+ " if (aExp < 0x3FF)\n"
+ " return 0;\n"
+ "\n"
+ " aFracHi |= 0x00100000u;\n"
+ " aFracExtra = ( aFracHi << (shiftCount & 31)) | aFracLo;\n"
+ " absZ = aFracHi >> (- shiftCount);\n"
+ " }\n"
+ "\n"
+ " int z = mix(int(absZ), -int(absZ), (aSign != 0u));\n"
+ " int nan = mix(0x7FFFFFFF, 0x80000000, bool(aSign));\n"
+ " return mix(z, nan, bool(aSign ^ uint(z < 0)) && bool(z));\n"
+ "}\n"
+ "\n"
+ "/* Returns the result of converting the 32-bit two's complement integer `a'\n"
+ " * to the double-precision floating-point format. The conversion is performed\n"
+ " * according to the IEEE Standard for Floating-Point Arithmetic.\n"
+ " */\n"
+ "uint64_t\n"
+ "__int_to_fp64(int a)\n"
+ "{\n"
+ " uint zFrac0 = 0u;\n"
+ " uint zFrac1 = 0u;\n"
+ " if (a==0)\n"
+ " return __packFloat64(0u, 0, 0u, 0u);\n"
+ " uint zSign = uint(a < 0);\n"
+ " uint absA = mix(uint(a), uint(-a), a < 0);\n"
+ " int shiftCount = __countLeadingZeros32(absA) - 11;\n"
+ " if (0 <= shiftCount) {\n"
+ " zFrac0 = absA << shiftCount;\n"
+ " zFrac1 = 0u;\n"
+ " } else {\n"
+ " __shift64Right(absA, 0u, -shiftCount, zFrac0, zFrac1);\n"
+ " }\n"
+ " return __packFloat64(zSign, 0x412 - shiftCount, zFrac0, zFrac1);\n"
+ "}\n"
+ "\n"
+ "bool\n"
+ "__fp64_to_bool(uint64_t a)\n"
+ "{\n"
+ " return !__feq64_nonnan(__fabs64(a), 0ul);\n"
+ "}\n"
+ "\n"
+ "uint64_t\n"
+ "__bool_to_fp64(bool a)\n"
+ "{\n"
+ " return __int_to_fp64(int(a));\n"
+ "}\n"
+ "\n"
+ "/* Packs the sign `zSign', exponent `zExp', and significand `zFrac' into a\n"
+ " * single-precision floating-point value, returning the result. After being\n"
+ " * shifted into the proper positions, the three fields are simply added\n"
+ " * together to form the result. This means that any integer portion of `zSig'\n"
+ " * will be added into the exponent. Since a properly normalized significand\n"
+ " * will have an integer portion equal to 1, the `zExp' input should be 1 less\n"
+ " * than the desired result exponent whenever `zFrac' is a complete, normalized\n"
+ " * significand.\n"
+ " */\n"
+ "float\n"
+ "__packFloat32(uint zSign, int zExp, uint zFrac)\n"
+ "{\n"
+ " return uintBitsToFloat((zSign<<31) + (uint(zExp)<<23) + zFrac);\n"
+ "}\n"
+ "\n"
+ "/* Takes an abstract floating-point value having sign `zSign', exponent `zExp',\n"
+ " * and significand `zFrac', and returns the proper single-precision floating-\n"
+ " * point value corresponding to the abstract input. Ordinarily, the abstract\n"
+ " * value is simply rounded and packed into the single-precision format, with\n"
+ " * the inexact exception raised if the abstract input cannot be represented\n"
+ " * exactly. However, if the abstract value is too large, the overflow and\n"
+ " * inexact exceptions are raised and an infinity or maximal finite value is\n"
+ " * returned. If the abstract value is too small, the input value is rounded to\n"
+ " * a subnormal number, and the underflow and inexact exceptions are raised if\n"
+ " * the abstract input cannot be represented exactly as a subnormal single-\n"
+ " * precision floating-point number.\n"
+ " * The input significand `zFrac' has its binary point between bits 30\n"
+ " * and 29, which is 7 bits to the left of the usual location. This shifted\n"
+ " * significand must be normalized or smaller. If `zFrac' is not normalized,\n"
+ " * `zExp' must be 0; in that case, the result returned is a subnormal number,\n"
+ " * and it must not require rounding. In the usual case that `zFrac' is\n"
+ " * normalized, `zExp' must be 1 less than the \"true\" floating-point exponent.\n"
+ " * The handling of underflow and overflow follows the IEEE Standard for\n"
+ " * Floating-Point Arithmetic.\n"
+ " */\n"
+ "float\n"
+ "__roundAndPackFloat32(uint zSign, int zExp, uint zFrac)\n"
+ "{\n"
+ " bool roundNearestEven;\n"
+ " int roundIncrement;\n"
+ " int roundBits;\n"
+ "\n"
+ " roundNearestEven = FLOAT_ROUNDING_MODE == FLOAT_ROUND_NEAREST_EVEN;\n"
+ " roundIncrement = 0x40;\n"
+ " if (!roundNearestEven) {\n"
+ " if (FLOAT_ROUNDING_MODE == FLOAT_ROUND_TO_ZERO) {\n"
+ " roundIncrement = 0;\n"
+ " } else {\n"
+ " roundIncrement = 0x7F;\n"
+ " if (zSign != 0u) {\n"
+ " if (FLOAT_ROUNDING_MODE == FLOAT_ROUND_UP)\n"
+ " roundIncrement = 0;\n"
+ " } else {\n"
+ " if (FLOAT_ROUNDING_MODE == FLOAT_ROUND_DOWN)\n"
+ " roundIncrement = 0;\n"
+ " }\n"
+ " }\n"
+ " }\n"
+ " roundBits = int(zFrac & 0x7Fu);\n"
+ " if (0xFDu <= uint(zExp)) {\n"
+ " if ((0xFD < zExp) || ((zExp == 0xFD) && (int(zFrac) + roundIncrement) < 0))\n"
+ " return __packFloat32(zSign, 0xFF, 0u) - float(roundIncrement == 0);\n"
+ " int count = -zExp;\n"
+ " bool zexp_lt0 = zExp < 0;\n"
+ " uint zFrac_lt0 = mix(uint(zFrac != 0u), (zFrac>>count) | uint((zFrac<<((-count) & 31)) != 0u), (-zExp) < 32);\n"
+ " zFrac = mix(zFrac, zFrac_lt0, zexp_lt0);\n"
+ " roundBits = mix(roundBits, int(zFrac) & 0x7f, zexp_lt0);\n"
+ " zExp = mix(zExp, 0, zexp_lt0);\n"
+ " }\n"
+ " zFrac = (zFrac + uint(roundIncrement))>>7;\n"
+ " zFrac &= ~uint(((roundBits ^ 0x40) == 0) && roundNearestEven);\n"
+ "\n"
+ " return __packFloat32(zSign, mix(zExp, 0, zFrac == 0u), zFrac);\n"
+ "}\n"
+ "\n"
+ "/* Returns the result of converting the double-precision floating-point value\n"
+ " * `a' to the single-precision floating-point format. The conversion is\n"
+ " * performed according to the IEEE Standard for Floating-Point Arithmetic.\n"
+ " */\n"
+ "float\n"
+ "__fp64_to_fp32(uint64_t __a)\n"
+ "{\n"
+ " uvec2 a = unpackUint2x32(__a);\n"
+ " uint zFrac = 0u;\n"
+ " uint allZero = 0u;\n"
+ "\n"
+ " uint aFracLo = __extractFloat64FracLo(__a);\n"
+ " uint aFracHi = __extractFloat64FracHi(__a);\n"
+ " int aExp = __extractFloat64Exp(__a);\n"
+ " uint aSign = __extractFloat64Sign(__a);\n"
+ " if (aExp == 0x7FF) {\n"
+ " __shortShift64Left(a.y, a.x, 12, a.y, a.x);\n"
+ " float rval = uintBitsToFloat((aSign<<31) | 0x7FC00000u | (a.y>>9));\n"
+ " rval = mix(__packFloat32(aSign, 0xFF, 0u), rval, (aFracHi | aFracLo) != 0u);\n"
+ " return rval;\n"
+ " }\n"
+ " __shift64RightJamming(aFracHi, aFracLo, 22, allZero, zFrac);\n"
+ " zFrac = mix(zFrac, zFrac | 0x40000000u, aExp != 0);\n"
+ " return __roundAndPackFloat32(aSign, aExp - 0x381, zFrac);\n"
+ "}\n"
+ "\n"
+ "float\n"
+ "__uint64_to_fp32(uint64_t __a)\n"
+ "{\n"
+ " uint zFrac = 0u;\n"
+ " uvec2 aFrac = unpackUint2x32(__a);\n"
+ " int shiftCount = __countLeadingZeros32(mix(aFrac.y, aFrac.x, aFrac.y == 0u));\n"
+ " shiftCount -= mix(40, 8, aFrac.y == 0u);\n"
+ "\n"
+ " if (0 <= shiftCount) {\n"
+ " __shortShift64Left(aFrac.y, aFrac.x, shiftCount, aFrac.y, aFrac.x);\n"
+ " bool is_zero = (aFrac.y | aFrac.x) == 0u;\n"
+ " return mix(__packFloat32(0u, 0x95 - shiftCount, aFrac.x), 0, is_zero);\n"
+ " }\n"
+ "\n"
+ " shiftCount += 7;\n"
+ " __shift64RightJamming(aFrac.y, aFrac.x, -shiftCount, aFrac.y, aFrac.x);\n"
+ " zFrac = mix(aFrac.x<<shiftCount, aFrac.x, shiftCount < 0);\n"
+ " return __roundAndPackFloat32(0u, 0x9C - shiftCount, zFrac);\n"
+ "}\n"
+ "\n"
+ "float\n"
+ "__int64_to_fp32(int64_t __a)\n"
+ "{\n"
+ " uint zFrac = 0u;\n"
+ " uint aSign = uint(__a < 0);\n"
+ " uint64_t absA = mix(uint64_t(__a), uint64_t(-__a), __a < 0);\n"
+ " uvec2 aFrac = unpackUint2x32(absA);\n"
+ " int shiftCount = __countLeadingZeros32(mix(aFrac.y, aFrac.x, aFrac.y == 0u));\n"
+ " shiftCount -= mix(40, 8, aFrac.y == 0u);\n"
+ "\n"
+ " if (0 <= shiftCount) {\n"
+ " __shortShift64Left(aFrac.y, aFrac.x, shiftCount, aFrac.y, aFrac.x);\n"
+ " bool is_zero = (aFrac.y | aFrac.x) == 0u;\n"
+ " return mix(__packFloat32(aSign, 0x95 - shiftCount, aFrac.x), 0, absA == 0u);\n"
+ " }\n"
+ "\n"
+ " shiftCount += 7;\n"
+ " __shift64RightJamming(aFrac.y, aFrac.x, -shiftCount, aFrac.y, aFrac.x);\n"
+ " zFrac = mix(aFrac.x<<shiftCount, aFrac.x, shiftCount < 0);\n"
+ " return __roundAndPackFloat32(aSign, 0x9C - shiftCount, zFrac);\n"
+ "}\n"
+ "\n"
+ "/* Returns the result of converting the single-precision floating-point value\n"
+ " * `a' to the double-precision floating-point format.\n"
+ " */\n"
+ "uint64_t\n"
+ "__fp32_to_fp64(float f)\n"
+ "{\n"
+ " uint a = floatBitsToUint(f);\n"
+ " uint aFrac = a & 0x007FFFFFu;\n"
+ " int aExp = int((a>>23) & 0xFFu);\n"
+ " uint aSign = a>>31;\n"
+ " uint zFrac0 = 0u;\n"
+ " uint zFrac1 = 0u;\n"
+ "\n"
+ " if (aExp == 0xFF) {\n"
+ " if (aFrac != 0u) {\n"
+ " uint nanLo = 0u;\n"
+ " uint nanHi = a<<9;\n"
+ " __shift64Right(nanHi, nanLo, 12, nanHi, nanLo);\n"
+ " nanHi |= ((aSign<<31) | 0x7FF80000u);\n"
+ " return packUint2x32(uvec2(nanLo, nanHi));\n"
+ " }\n"
+ " return __packFloat64(aSign, 0x7FF, 0u, 0u);\n"
+ " }\n"
+ "\n"
+ " if (aExp == 0) {\n"
+ " if (aFrac == 0u)\n"
+ " return __packFloat64(aSign, 0, 0u, 0u);\n"
+ " /* Normalize subnormal */\n"
+ " int shiftCount = __countLeadingZeros32(aFrac) - 8;\n"
+ " aFrac <<= shiftCount;\n"
+ " aExp = 1 - shiftCount;\n"
+ " --aExp;\n"
+ " }\n"
+ "\n"
+ " __shift64Right(aFrac, 0u, 3, zFrac0, zFrac1);\n"
+ " return __packFloat64(aSign, aExp + 0x380, zFrac0, zFrac1);\n"
+ "}\n"
+ "\n"
+ "/* Adds the 96-bit value formed by concatenating `a0', `a1', and `a2' to the\n"
+ " * 96-bit value formed by concatenating `b0', `b1', and `b2'. Addition is\n"
+ " * modulo 2^96, so any carry out is lost. The result is broken into three\n"
+ " * 32-bit pieces which are stored at the locations pointed to by `z0Ptr',\n"
+ " * `z1Ptr', and `z2Ptr'.\n"
+ " */\n"
+ "void\n"
+ "__add96(uint a0, uint a1, uint a2,\n"
+ " uint b0, uint b1, uint b2,\n"
+ " out uint z0Ptr,\n"
+ " out uint z1Ptr,\n"
+ " out uint z2Ptr)\n"
+ "{\n"
+ " uint z2 = a2 + b2;\n"
+ " uint carry1 = uint(z2 < a2);\n"
+ " uint z1 = a1 + b1;\n"
+ " uint carry0 = uint(z1 < a1);\n"
+ " uint z0 = a0 + b0;\n"
+ " z1 += carry1;\n"
+ " z0 += uint(z1 < carry1);\n"
+ " z0 += carry0;\n"
+ " z2Ptr = z2;\n"
+ " z1Ptr = z1;\n"
+ " z0Ptr = z0;\n"
+ "}\n"
+ "\n"
+ "/* Subtracts the 96-bit value formed by concatenating `b0', `b1', and `b2' from\n"
+ " * the 96-bit value formed by concatenating `a0', `a1', and `a2'. Subtraction\n"
+ " * is modulo 2^96, so any borrow out (carry out) is lost. The result is broken\n"
+ " * into three 32-bit pieces which are stored at the locations pointed to by\n"
+ " * `z0Ptr', `z1Ptr', and `z2Ptr'.\n"
+ " */\n"
+ "void\n"
+ "__sub96(uint a0, uint a1, uint a2,\n"
+ " uint b0, uint b1, uint b2,\n"
+ " out uint z0Ptr,\n"
+ " out uint z1Ptr,\n"
+ " out uint z2Ptr)\n"
+ "{\n"
+ " uint z2 = a2 - b2;\n"
+ " uint borrow1 = uint(a2 < b2);\n"
+ " uint z1 = a1 - b1;\n"
+ " uint borrow0 = uint(a1 < b1);\n"
+ " uint z0 = a0 - b0;\n"
+ " z0 -= uint(z1 < borrow1);\n"
+ " z1 -= borrow1;\n"
+ " z0 -= borrow0;\n"
+ " z2Ptr = z2;\n"
+ " z1Ptr = z1;\n"
+ " z0Ptr = z0;\n"
+ "}\n"
+ "\n"
+ "/* Returns an approximation to the 32-bit integer quotient obtained by dividing\n"
+ " * `b' into the 64-bit value formed by concatenating `a0' and `a1'. The\n"
+ " * divisor `b' must be at least 2^31. If q is the exact quotient truncated\n"
+ " * toward zero, the approximation returned lies between q and q + 2 inclusive.\n"
+ " * If the exact quotient q is larger than 32 bits, the maximum positive 32-bit\n"
+ " * unsigned integer is returned.\n"
+ " */\n"
+ "uint\n"
+ "__estimateDiv64To32(uint a0, uint a1, uint b)\n"
+ "{\n"
+ " uint b0;\n"
+ " uint b1;\n"
+ " uint rem0 = 0u;\n"
+ " uint rem1 = 0u;\n"
+ " uint term0 = 0u;\n"
+ " uint term1 = 0u;\n"
+ " uint z;\n"
+ "\n"
+ " if (b <= a0)\n"
+ " return 0xFFFFFFFFu;\n"
+ " b0 = b>>16;\n"
+ " z = (b0<<16 <= a0) ? 0xFFFF0000u : (a0 / b0)<<16;\n"
+ " __mul32To64(b, z, term0, term1);\n"
+ " __sub64(a0, a1, term0, term1, rem0, rem1);\n"
+ " while (int(rem0) < 0) {\n"
+ " z -= 0x10000u;\n"
+ " b1 = b<<16;\n"
+ " __add64(rem0, rem1, b0, b1, rem0, rem1);\n"
+ " }\n"
+ " rem0 = (rem0<<16) | (rem1>>16);\n"
+ " z |= (b0<<16 <= rem0) ? 0xFFFFu : rem0 / b0;\n"
+ " return z;\n"
+ "}\n"
+ "\n"
+ "uint\n"
+ "__sqrtOddAdjustments(int index)\n"
+ "{\n"
+ " uint res = 0u;\n"
+ " if (index == 0)\n"
+ " res = 0x0004u;\n"
+ " if (index == 1)\n"
+ " res = 0x0022u;\n"
+ " if (index == 2)\n"
+ " res = 0x005Du;\n"
+ " if (index == 3)\n"
+ " res = 0x00B1u;\n"
+ " if (index == 4)\n"
+ " res = 0x011Du;\n"
+ " if (index == 5)\n"
+ " res = 0x019Fu;\n"
+ " if (index == 6)\n"
+ " res = 0x0236u;\n"
+ " if (index == 7)\n"
+ " res = 0x02E0u;\n"
+ " if (index == 8)\n"
+ " res = 0x039Cu;\n"
+ " if (index == 9)\n"
+ " res = 0x0468u;\n"
+ " if (index == 10)\n"
+ " res = 0x0545u;\n"
+ " if (index == 11)\n"
+ " res = 0x631u;\n"
+ " if (index == 12)\n"
+ " res = 0x072Bu;\n"
+ " if (index == 13)\n"
+ " res = 0x0832u;\n"
+ " if (index == 14)\n"
+ " res = 0x0946u;\n"
+ " if (index == 15)\n"
+ " res = 0x0A67u;\n"
+ "\n"
+ " return res;\n"
+ "}\n"
+ "\n"
+ "uint\n"
+ "__sqrtEvenAdjustments(int index)\n"
+ "{\n"
+ " uint res = 0u;\n"
+ " if (index == 0)\n"
+ " res = 0x0A2Du;\n"
+ " if (index == 1)\n"
+ " res = 0x08AFu;\n"
+ " if (index == 2)\n"
+ " res = 0x075Au;\n"
+ " if (index == 3)\n"
+ " res = 0x0629u;\n"
+ " if (index == 4)\n"
+ " res = 0x051Au;\n"
+ " if (index == 5)\n"
+ " res = 0x0429u;\n"
+ " if (index == 6)\n"
+ " res = 0x0356u;\n"
+ " if (index == 7)\n"
+ " res = 0x029Eu;\n"
+ " if (index == 8)\n"
+ " res = 0x0200u;\n"
+ " if (index == 9)\n"
+ " res = 0x0179u;\n"
+ " if (index == 10)\n"
+ " res = 0x0109u;\n"
+ " if (index == 11)\n"
+ " res = 0x00AFu;\n"
+ " if (index == 12)\n"
+ " res = 0x0068u;\n"
+ " if (index == 13)\n"
+ " res = 0x0034u;\n"
+ " if (index == 14)\n"
+ " res = 0x0012u;\n"
+ " if (index == 15)\n"
+ " res = 0x0002u;\n"
+ "\n"
+ " return res;\n"
+ "}\n"
+ "\n"
+ "/* Returns an approximation to the square root of the 32-bit significand given\n"
+ " * by `a'. Considered as an integer, `a' must be at least 2^31. If bit 0 of\n"
+ " * `aExp' (the least significant bit) is 1, the integer returned approximates\n"
+ " * 2^31*sqrt(`a'/2^31), where `a' is considered an integer. If bit 0 of `aExp'\n"
+ " * is 0, the integer returned approximates 2^31*sqrt(`a'/2^30). In either\n"
+ " * case, the approximation returned lies strictly within +/-2 of the exact\n"
+ " * value.\n"
+ " */\n"
+ "uint\n"
+ "__estimateSqrt32(int aExp, uint a)\n"
+ "{\n"
+ " uint z;\n"
+ "\n"
+ " int index = int(a>>27 & 15u);\n"
+ " if ((aExp & 1) != 0) {\n"
+ " z = 0x4000u + (a>>17) - __sqrtOddAdjustments(index);\n"
+ " z = ((a / z)<<14) + (z<<15);\n"
+ " a >>= 1;\n"
+ " } else {\n"
+ " z = 0x8000u + (a>>17) - __sqrtEvenAdjustments(index);\n"
+ " z = a / z + z;\n"
+ " z = (0x20000u <= z) ? 0xFFFF8000u : (z<<15);\n"
+ " if (z <= a)\n"
+ " return uint(int(a)>>1);\n"
+ " }\n"
+ " return ((__estimateDiv64To32(a, 0u, z))>>1) + (z>>1);\n"
+ "}\n"
+ "\n"
+ "/* Returns the square root of the double-precision floating-point value `a'.\n"
+ " * The operation is performed according to the IEEE Standard for Floating-Point\n"
+ " * Arithmetic.\n"
+ " */\n"
+ "uint64_t\n"
+ "__fsqrt64(uint64_t a)\n"
+ "{\n"
+ " uint zFrac0 = 0u;\n"
+ " uint zFrac1 = 0u;\n"
+ " uint zFrac2 = 0u;\n"
+ " uint doubleZFrac0 = 0u;\n"
+ " uint rem0 = 0u;\n"
+ " uint rem1 = 0u;\n"
+ " uint rem2 = 0u;\n"
+ " uint rem3 = 0u;\n"
+ " uint term0 = 0u;\n"
+ " uint term1 = 0u;\n"
+ " uint term2 = 0u;\n"
+ " uint term3 = 0u;\n"
+ " uint64_t default_nan = 0xFFFFFFFFFFFFFFFFUL;\n"
+ "\n"
+ " uint aFracLo = __extractFloat64FracLo(a);\n"
+ " uint aFracHi = __extractFloat64FracHi(a);\n"
+ " int aExp = __extractFloat64Exp(a);\n"
+ " uint aSign = __extractFloat64Sign(a);\n"
+ " if (aExp == 0x7FF) {\n"
+ " if ((aFracHi | aFracLo) != 0u)\n"
+ " return __propagateFloat64NaN(a, a);\n"
+ " if (aSign == 0u)\n"
+ " return a;\n"
+ " return default_nan;\n"
+ " }\n"
+ " if (aSign != 0u) {\n"
+ " if ((uint(aExp) | aFracHi | aFracLo) == 0u)\n"
+ " return a;\n"
+ " return default_nan;\n"
+ " }\n"
+ " if (aExp == 0) {\n"
+ " if ((aFracHi | aFracLo) == 0u)\n"
+ " return __packFloat64(0u, 0, 0u, 0u);\n"
+ " __normalizeFloat64Subnormal(aFracHi, aFracLo, aExp, aFracHi, aFracLo);\n"
+ " }\n"
+ " int zExp = ((aExp - 0x3FF)>>1) + 0x3FE;\n"
+ " aFracHi |= 0x00100000u;\n"
+ " __shortShift64Left(aFracHi, aFracLo, 11, term0, term1);\n"
+ " zFrac0 = (__estimateSqrt32(aExp, term0)>>1) + 1u;\n"
+ " if (zFrac0 == 0u)\n"
+ " zFrac0 = 0x7FFFFFFFu;\n"
+ " doubleZFrac0 = zFrac0 + zFrac0;\n"
+ " __shortShift64Left(aFracHi, aFracLo, 9 - (aExp & 1), aFracHi, aFracLo);\n"
+ " __mul32To64(zFrac0, zFrac0, term0, term1);\n"
+ " __sub64(aFracHi, aFracLo, term0, term1, rem0, rem1);\n"
+ " while (int(rem0) < 0) {\n"
+ " --zFrac0;\n"
+ " doubleZFrac0 -= 2u;\n"
+ " __add64(rem0, rem1, 0u, doubleZFrac0 | 1u, rem0, rem1);\n"
+ " }\n"
+ " zFrac1 = __estimateDiv64To32(rem1, 0u, doubleZFrac0);\n"
+ " if ((zFrac1 & 0x1FFu) <= 5u) {\n"
+ " if (zFrac1 == 0u)\n"
+ " zFrac1 = 1u;\n"
+ " __mul32To64(doubleZFrac0, zFrac1, term1, term2);\n"
+ " __sub64(rem1, 0u, term1, term2, rem1, rem2);\n"
+ " __mul32To64(zFrac1, zFrac1, term2, term3);\n"
+ " __sub96(rem1, rem2, 0u, 0u, term2, term3, rem1, rem2, rem3);\n"
+ " while (int(rem1) < 0) {\n"
+ " --zFrac1;\n"
+ " __shortShift64Left(0u, zFrac1, 1, term2, term3);\n"
+ " term3 |= 1u;\n"
+ " term2 |= doubleZFrac0;\n"
+ " __add96(rem1, rem2, rem3, 0u, term2, term3, rem1, rem2, rem3);\n"
+ " }\n"
+ " zFrac1 |= uint((rem1 | rem2 | rem3) != 0u);\n"
+ " }\n"
+ " __shift64ExtraRightJamming(zFrac0, zFrac1, 0u, 10, zFrac0, zFrac1, zFrac2);\n"
+ " return __roundAndPackFloat64(0u, zExp, zFrac0, zFrac1, zFrac2);\n"
+ "}\n"
+ "\n"
+ "uint64_t\n"
+ "__ftrunc64(uint64_t __a)\n"
+ "{\n"
+ " uvec2 a = unpackUint2x32(__a);\n"
+ " int aExp = __extractFloat64Exp(__a);\n"
+ " uint zLo;\n"
+ " uint zHi;\n"
+ "\n"
+ " int unbiasedExp = aExp - 1023;\n"
+ " int fracBits = 52 - unbiasedExp;\n"
+ " uint maskLo = mix(~0u << fracBits, 0u, fracBits >= 32);\n"
+ " uint maskHi = mix(~0u << (fracBits - 32), ~0u, fracBits < 33);\n"
+ " zLo = maskLo & a.x;\n"
+ " zHi = maskHi & a.y;\n"
+ "\n"
+ " zLo = mix(zLo, 0u, unbiasedExp < 0);\n"
+ " zHi = mix(zHi, 0u, unbiasedExp < 0);\n"
+ " zLo = mix(zLo, a.x, unbiasedExp > 52);\n"
+ " zHi = mix(zHi, a.y, unbiasedExp > 52);\n"
+ " return packUint2x32(uvec2(zLo, zHi));\n"
+ "}\n"
+ "\n"
+ "uint64_t\n"
+ "__ffloor64(uint64_t a)\n"
+ "{\n"
+ " bool is_positive = __fge64(a, 0ul);\n"
+ " uint64_t tr = __ftrunc64(a);\n"
+ "\n"
+ " if (is_positive || __feq64(tr, a)) {\n"
+ " return tr;\n"
+ " } else {\n"
+ " return __fadd64(tr, 0xbff0000000000000ul /* -1.0 */);\n"
+ " }\n"
+ "}\n"
+ "\n"
+ "uint64_t\n"
+ "__fround64(uint64_t __a)\n"
+ "{\n"
+ " uvec2 a = unpackUint2x32(__a);\n"
+ " int unbiasedExp = __extractFloat64Exp(__a) - 1023;\n"
+ " uint aHi = a.y;\n"
+ " uint aLo = a.x;\n"
+ "\n"
+ " if (unbiasedExp < 20) {\n"
+ " if (unbiasedExp < 0) {\n"
+ " aHi &= 0x80000000u;\n"
+ " if (unbiasedExp == -1 && aLo != 0u)\n"
+ " aHi |= (1023u << 20);\n"
+ " aLo = 0u;\n"
+ " } else {\n"
+ " uint maskExp = 0x000FFFFFu >> unbiasedExp;\n"
+ " /* a is an integral value */\n"
+ " if (((aHi & maskExp) == 0u) && (aLo == 0u))\n"
+ " return __a;\n"
+ "\n"
+ " aHi += 0x00080000u >> unbiasedExp;\n"
+ " aHi &= ~maskExp;\n"
+ " aLo = 0u;\n"
+ " }\n"
+ " } else if (unbiasedExp > 51 || unbiasedExp == 1024) {\n"
+ " return __a;\n"
+ " } else {\n"
+ " uint maskExp = 0xFFFFFFFFu >> (unbiasedExp - 20);\n"
+ " if ((aLo & maskExp) == 0u)\n"
+ " return __a;\n"
+ " uint tmp = aLo + (1u << (51 - unbiasedExp));\n"
+ " if(tmp < aLo)\n"
+ " aHi += 1u;\n"
+ " aLo = tmp;\n"
+ " aLo &= ~maskExp;\n"
+ " }\n"
+ "\n"
+ " a.x = aLo;\n"
+ " a.y = aHi;\n"
+ " return packUint2x32(a);\n"
+ "}\n"
+ "\n"
+ "uint64_t\n"
+ "__fmin64(uint64_t a, uint64_t b)\n"
+ "{\n"
+ " if (__is_nan(a)) return b;\n"
+ " if (__is_nan(b)) return a;\n"
+ "\n"
+ " if (__flt64_nonnan(a, b)) return a;\n"
+ " return b;\n"
+ "}\n"
+ "\n"
+ "uint64_t\n"
+ "__fmax64(uint64_t a, uint64_t b)\n"
+ "{\n"
+ " if (__is_nan(a)) return b;\n"
+ " if (__is_nan(b)) return a;\n"
+ "\n"
+ " if (__flt64_nonnan(a, b)) return b;\n"
+ " return a;\n"
+ "}\n"
+ "\n"
+ "uint64_t\n"
+ "__ffract64(uint64_t a)\n"
+ "{\n"
+ " return __fadd64(a, __fneg64(__ffloor64(a)));\n"
+ "}\n"
+ ""
+ ;
diff --git a/lib/mesa/src/compiler/glsl/gl_nir_linker.c b/lib/mesa/src/compiler/glsl/gl_nir_linker.c
index 547549bc4..92e90714e 100644
--- a/lib/mesa/src/compiler/glsl/gl_nir_linker.c
+++ b/lib/mesa/src/compiler/glsl/gl_nir_linker.c
@@ -44,9 +44,7 @@ nir_build_program_resource_list(struct gl_context *ctx,
prog->data->NumProgramResourceList = 0;
}
- struct set *resource_set = _mesa_set_create(NULL,
- _mesa_hash_pointer,
- _mesa_key_pointer_equal);
+ struct set *resource_set = _mesa_pointer_set_create(NULL);
/* Add uniforms
*
diff --git a/lib/mesa/src/compiler/glsl/gl_nir_lower_atomics.c b/lib/mesa/src/compiler/glsl/gl_nir_lower_atomics.c
index 36e273c45..60642e713 100644
--- a/lib/mesa/src/compiler/glsl/gl_nir_lower_atomics.c
+++ b/lib/mesa/src/compiler/glsl/gl_nir_lower_atomics.c
@@ -101,8 +101,8 @@ lower_deref_instr(nir_builder *b, nir_intrinsic_instr *instr,
nir_variable *var = nir_deref_instr_get_variable(deref);
if (var->data.mode != nir_var_uniform &&
- var->data.mode != nir_var_shader_storage &&
- var->data.mode != nir_var_shared)
+ var->data.mode != nir_var_mem_ssbo &&
+ var->data.mode != nir_var_mem_shared)
return false; /* atomics passed as function arguments can't be lowered */
const unsigned uniform_loc = var->data.location;
diff --git a/lib/mesa/src/compiler/glsl/gl_nir_lower_samplers.c b/lib/mesa/src/compiler/glsl/gl_nir_lower_samplers.c
index 1ee075cfd..85535c074 100644
--- a/lib/mesa/src/compiler/glsl/gl_nir_lower_samplers.c
+++ b/lib/mesa/src/compiler/glsl/gl_nir_lower_samplers.c
@@ -33,15 +33,13 @@
static void
lower_tex_src_to_offset(nir_builder *b,
- nir_tex_instr *instr, unsigned src_idx,
- const struct gl_shader_program *shader_program)
+ nir_tex_instr *instr, unsigned src_idx)
{
nir_ssa_def *index = NULL;
unsigned base_index = 0;
unsigned array_elements = 1;
nir_tex_src *src = &instr->src[src_idx];
bool is_sampler = src->src_type == nir_tex_src_sampler_deref;
- unsigned location = 0;
/* We compute first the offsets */
nir_deref_instr *deref = nir_instr_as_deref(src->src.ssa->parent_instr);
@@ -50,35 +48,24 @@ lower_tex_src_to_offset(nir_builder *b,
nir_deref_instr *parent =
nir_instr_as_deref(deref->parent.ssa->parent_instr);
- switch (deref->deref_type) {
- case nir_deref_type_struct:
- location += glsl_get_record_location_offset(parent->type,
- deref->strct.index);
- break;
-
- case nir_deref_type_array: {
- if (nir_src_is_const(deref->arr.index) && index == NULL) {
- /* We're still building a direct index */
- base_index += nir_src_as_uint(deref->arr.index) * array_elements;
- } else {
- if (index == NULL) {
- /* We used to be direct but not anymore */
- index = nir_imm_int(b, base_index);
- base_index = 0;
- }
-
- index = nir_iadd(b, index,
- nir_imul(b, nir_imm_int(b, array_elements),
- nir_ssa_for_src(b, deref->arr.index, 1)));
+ assert(deref->deref_type == nir_deref_type_array);
+
+ if (nir_src_is_const(deref->arr.index) && index == NULL) {
+ /* We're still building a direct index */
+ base_index += nir_src_as_uint(deref->arr.index) * array_elements;
+ } else {
+ if (index == NULL) {
+ /* We used to be direct but not anymore */
+ index = nir_imm_int(b, base_index);
+ base_index = 0;
}
- array_elements *= glsl_get_length(parent->type);
- break;
+ index = nir_iadd(b, index,
+ nir_imul(b, nir_imm_int(b, array_elements),
+ nir_ssa_for_src(b, deref->arr.index, 1)));
}
- default:
- unreachable("Invalid sampler deref type");
- }
+ array_elements *= glsl_get_length(parent->type);
deref = parent;
}
@@ -89,14 +76,7 @@ lower_tex_src_to_offset(nir_builder *b,
/* We hit the deref_var. This is the end of the line */
assert(deref->deref_type == nir_deref_type_var);
- location += deref->var->data.location;
-
- gl_shader_stage stage = b->shader->info.stage;
- assert(location < shader_program->data->NumUniformStorage &&
- shader_program->data->UniformStorage[location].opaque[stage].active);
-
- base_index +=
- shader_program->data->UniformStorage[location].opaque[stage].index;
+ base_index += deref->var->data.binding;
/* We have the offsets, we apply them, rewriting the source or removing
* instr if needed
@@ -123,8 +103,7 @@ lower_tex_src_to_offset(nir_builder *b,
}
static bool
-lower_sampler(nir_builder *b, nir_tex_instr *instr,
- const struct gl_shader_program *shader_program)
+lower_sampler(nir_builder *b, nir_tex_instr *instr)
{
int texture_idx =
nir_tex_instr_src_index(instr, nir_tex_src_texture_deref);
@@ -132,16 +111,14 @@ lower_sampler(nir_builder *b, nir_tex_instr *instr,
if (texture_idx >= 0) {
b->cursor = nir_before_instr(&instr->instr);
- lower_tex_src_to_offset(b, instr, texture_idx,
- shader_program);
+ lower_tex_src_to_offset(b, instr, texture_idx);
}
int sampler_idx =
nir_tex_instr_src_index(instr, nir_tex_src_sampler_deref);
if (sampler_idx >= 0) {
- lower_tex_src_to_offset(b, instr, sampler_idx,
- shader_program);
+ lower_tex_src_to_offset(b, instr, sampler_idx);
}
if (texture_idx < 0 && sampler_idx < 0)
@@ -151,8 +128,7 @@ lower_sampler(nir_builder *b, nir_tex_instr *instr,
}
static bool
-lower_impl(nir_function_impl *impl,
- const struct gl_shader_program *shader_program)
+lower_impl(nir_function_impl *impl)
{
nir_builder b;
nir_builder_init(&b, impl);
@@ -161,8 +137,7 @@ lower_impl(nir_function_impl *impl,
nir_foreach_block(block, impl) {
nir_foreach_instr(instr, block) {
if (instr->type == nir_instr_type_tex)
- progress |= lower_sampler(&b, nir_instr_as_tex(instr),
- shader_program);
+ progress |= lower_sampler(&b, nir_instr_as_tex(instr));
}
}
@@ -175,9 +150,15 @@ gl_nir_lower_samplers(nir_shader *shader,
{
bool progress = false;
+ /* First, use gl_nir_lower_samplers_as_derefs to set var->data.binding
+ * based on the uniforms, and split structures to simplify derefs.
+ */
+ gl_nir_lower_samplers_as_deref(shader, shader_program);
+
+ /* Next, lower derefs to offsets. */
nir_foreach_function(function, shader) {
if (function->impl)
- progress |= lower_impl(function->impl, shader_program);
+ progress |= lower_impl(function->impl);
}
return progress;
diff --git a/lib/mesa/src/compiler/glsl/gl_nir_lower_samplers_as_deref.c b/lib/mesa/src/compiler/glsl/gl_nir_lower_samplers_as_deref.c
index 9ff5708f5..87718112d 100644
--- a/lib/mesa/src/compiler/glsl/gl_nir_lower_samplers_as_deref.c
+++ b/lib/mesa/src/compiler/glsl/gl_nir_lower_samplers_as_deref.c
@@ -99,7 +99,7 @@ remove_struct_derefs_prep(nir_deref_instr **p, char **name,
remove_struct_derefs_prep(&p[1], name, location, type);
- *type = glsl_get_array_instance(*type, length);
+ *type = glsl_array_type(*type, length, glsl_get_explicit_stride(cur->type));
break;
}
@@ -109,9 +109,6 @@ remove_struct_derefs_prep(nir_deref_instr **p, char **name,
glsl_get_struct_elem_name(cur->type, next->strct.index));
remove_struct_derefs_prep(&p[1], name, location, type);
-
- /* skip over the struct type: */
- *type = next->type;
break;
}
@@ -150,10 +147,20 @@ lower_deref(nir_builder *b, struct lower_samplers_as_deref_state *state,
remove_struct_derefs_prep(path.path, &name, &location, &type);
- assert(location < state->shader_program->data->NumUniformStorage &&
- state->shader_program->data->UniformStorage[location].opaque[stage].active);
+ if (state->shader_program && var->data.how_declared != nir_var_hidden) {
+ /* For GLSL programs, look up the bindings in the uniform storage. */
+ assert(location < state->shader_program->data->NumUniformStorage &&
+ state->shader_program->data->UniformStorage[location].opaque[stage].active);
- binding = state->shader_program->data->UniformStorage[location].opaque[stage].index;
+ binding = state->shader_program->data->UniformStorage[location].opaque[stage].index;
+ } else {
+ /* For ARB programs, built-in shaders, or internally generated sampler
+ * variables in GLSL programs, assume that whoever created the shader
+ * set the bindings correctly already.
+ */
+ assert(var->data.explicit_binding);
+ binding = var->data.binding;
+ }
if (var->type == type) {
/* Fast path: We did not encounter any struct derefs. */
@@ -170,6 +177,14 @@ lower_deref(nir_builder *b, struct lower_samplers_as_deref_state *state,
} else {
var = nir_variable_create(state->shader, nir_var_uniform, type, name);
var->data.binding = binding;
+
+ /* Don't set var->data.location. The old structure location could be
+ * used to index into gl_uniform_storage, assuming the full structure
+ * was walked in order. With the new split variables, this invariant
+ * no longer holds and there's no meaningful way to start from a base
+ * location and access a particular array element. Just leave it 0.
+ */
+
_mesa_hash_table_insert_pre_hashed(state->remap_table, hash, name, var);
}
@@ -199,28 +214,30 @@ lower_sampler(nir_tex_instr *instr, struct lower_samplers_as_deref_state *state,
int sampler_idx =
nir_tex_instr_src_index(instr, nir_tex_src_sampler_deref);
- if (texture_idx < 0)
- return false;
+ b->cursor = nir_before_instr(&instr->instr);
- assert(texture_idx >= 0 && sampler_idx >= 0);
- assert(instr->src[texture_idx].src.is_ssa);
- assert(instr->src[sampler_idx].src.is_ssa);
- assert(instr->src[texture_idx].src.ssa == instr->src[sampler_idx].src.ssa);
+ if (texture_idx >= 0) {
+ assert(instr->src[texture_idx].src.is_ssa);
- b->cursor = nir_before_instr(&instr->instr);
+ nir_deref_instr *texture_deref =
+ lower_deref(b, state, nir_src_as_deref(instr->src[texture_idx].src));
+ /* only lower non-bindless: */
+ if (texture_deref) {
+ nir_instr_rewrite_src(&instr->instr, &instr->src[texture_idx].src,
+ nir_src_for_ssa(&texture_deref->dest.ssa));
+ }
+ }
- nir_deref_instr *texture_deref =
- lower_deref(b, state, nir_src_as_deref(instr->src[texture_idx].src));
- /* don't lower bindless: */
- if (!texture_deref)
- return false;
- nir_instr_rewrite_src(&instr->instr, &instr->src[texture_idx].src,
- nir_src_for_ssa(&texture_deref->dest.ssa));
-
- nir_deref_instr *sampler_deref =
- lower_deref(b, state, nir_src_as_deref(instr->src[sampler_idx].src));
- nir_instr_rewrite_src(&instr->instr, &instr->src[sampler_idx].src,
- nir_src_for_ssa(&sampler_deref->dest.ssa));
+ if (sampler_idx >= 0) {
+ assert(instr->src[sampler_idx].src.is_ssa);
+ nir_deref_instr *sampler_deref =
+ lower_deref(b, state, nir_src_as_deref(instr->src[sampler_idx].src));
+ /* only lower non-bindless: */
+ if (sampler_deref) {
+ nir_instr_rewrite_src(&instr->instr, &instr->src[sampler_idx].src,
+ nir_src_for_ssa(&sampler_deref->dest.ssa));
+ }
+ }
return true;
}
diff --git a/lib/mesa/src/compiler/glsl/glcpp/glcpp-parse.h b/lib/mesa/src/compiler/glsl/glcpp/glcpp-parse.h
index 3a9160ccf..2cbeb5045 100644
--- a/lib/mesa/src/compiler/glsl/glcpp/glcpp-parse.h
+++ b/lib/mesa/src/compiler/glsl/glcpp/glcpp-parse.h
@@ -1,8 +1,9 @@
-/* A Bison parser, made by GNU Bison 3.2. */
+/* A Bison parser, made by GNU Bison 3.3.2. */
/* Bison interface for Yacc-like parsers in C
- Copyright (C) 1984, 1989-1990, 2000-2015, 2018 Free Software Foundation, Inc.
+ Copyright (C) 1984, 1989-1990, 2000-2015, 2018-2019 Free Software Foundation,
+ Inc.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
diff --git a/lib/mesa/src/compiler/glsl/glcpp/meson.build b/lib/mesa/src/compiler/glsl/glcpp/meson.build
index a03d589b3..677baf1df 100644
--- a/lib/mesa/src/compiler/glsl/glcpp/meson.build
+++ b/lib/mesa/src/compiler/glsl/glcpp/meson.build
@@ -70,6 +70,7 @@ if with_any_opengl and with_tests
glcpp, join_paths(meson.current_source_dir(), 'tests'),
'--@0@'.format(m),
],
+ suite : ['compiler', 'glcpp'],
)
endforeach
endif
diff --git a/lib/mesa/src/compiler/glsl/ir_array_refcount.cpp b/lib/mesa/src/compiler/glsl/ir_array_refcount.cpp
index 1a46b2185..c2295c96c 100644
--- a/lib/mesa/src/compiler/glsl/ir_array_refcount.cpp
+++ b/lib/mesa/src/compiler/glsl/ir_array_refcount.cpp
@@ -37,8 +37,7 @@ ir_array_refcount_visitor::ir_array_refcount_visitor()
: last_array_deref(0), derefs(0), num_derefs(0), derefs_size(0)
{
this->mem_ctx = ralloc_context(NULL);
- this->ht = _mesa_hash_table_create(NULL, _mesa_hash_pointer,
- _mesa_key_pointer_equal);
+ this->ht = _mesa_pointer_hash_table_create(NULL);
}
static void
diff --git a/lib/mesa/src/compiler/glsl/ir_function_detect_recursion.cpp b/lib/mesa/src/compiler/glsl/ir_function_detect_recursion.cpp
index 5b05274d0..4a774f666 100644
--- a/lib/mesa/src/compiler/glsl/ir_function_detect_recursion.cpp
+++ b/lib/mesa/src/compiler/glsl/ir_function_detect_recursion.cpp
@@ -158,8 +158,7 @@ public:
{
progress = false;
this->mem_ctx = ralloc_context(NULL);
- this->function_hash = _mesa_hash_table_create(NULL, _mesa_hash_pointer,
- _mesa_key_pointer_equal);
+ this->function_hash = _mesa_pointer_hash_table_create(NULL);
}
~has_recursion_visitor()
diff --git a/lib/mesa/src/compiler/glsl/ir_variable_refcount.cpp b/lib/mesa/src/compiler/glsl/ir_variable_refcount.cpp
index 8306be10b..47e9d0c89 100644
--- a/lib/mesa/src/compiler/glsl/ir_variable_refcount.cpp
+++ b/lib/mesa/src/compiler/glsl/ir_variable_refcount.cpp
@@ -38,8 +38,7 @@
ir_variable_refcount_visitor::ir_variable_refcount_visitor()
{
this->mem_ctx = ralloc_context(NULL);
- this->ht = _mesa_hash_table_create(NULL, _mesa_hash_pointer,
- _mesa_key_pointer_equal);
+ this->ht = _mesa_pointer_hash_table_create(NULL);
}
static void
diff --git a/lib/mesa/src/compiler/glsl/link_functions.cpp b/lib/mesa/src/compiler/glsl/link_functions.cpp
index e73a72c86..4998d39dc 100644
--- a/lib/mesa/src/compiler/glsl/link_functions.cpp
+++ b/lib/mesa/src/compiler/glsl/link_functions.cpp
@@ -47,8 +47,7 @@ public:
this->success = true;
this->linked = linked;
- this->locals = _mesa_set_create(NULL, _mesa_hash_pointer,
- _mesa_key_pointer_equal);
+ this->locals = _mesa_pointer_set_create(NULL);
}
~call_link_visitor()
@@ -148,8 +147,7 @@ public:
* replace signature stored in a function. One could easily be added,
* but this avoids the need.
*/
- struct hash_table *ht = _mesa_hash_table_create(NULL, _mesa_hash_pointer,
- _mesa_key_pointer_equal);
+ struct hash_table *ht = _mesa_pointer_hash_table_create(NULL);
exec_list formal_parameters;
foreach_in_list(const ir_instruction, original, &sig->parameters) {
diff --git a/lib/mesa/src/compiler/glsl/loop_analysis.cpp b/lib/mesa/src/compiler/glsl/loop_analysis.cpp
index 0fb6e9feb..e90eb1ccd 100644
--- a/lib/mesa/src/compiler/glsl/loop_analysis.cpp
+++ b/lib/mesa/src/compiler/glsl/loop_analysis.cpp
@@ -252,8 +252,7 @@ loop_variable::record_reference(bool in_assignee,
loop_state::loop_state()
{
- this->ht = _mesa_hash_table_create(NULL, _mesa_hash_pointer,
- _mesa_key_pointer_equal);
+ this->ht = _mesa_pointer_hash_table_create(NULL);
this->mem_ctx = ralloc_context(NULL);
this->loop_found = false;
}
diff --git a/lib/mesa/src/compiler/glsl/meson.build b/lib/mesa/src/compiler/glsl/meson.build
index 71b4c42e4..8096f2d04 100644
--- a/lib/mesa/src/compiler/glsl/meson.build
+++ b/lib/mesa/src/compiler/glsl/meson.build
@@ -53,6 +53,13 @@ ir_expression_operation_strings_h = custom_target(
capture : true,
)
+float64_glsl_h = custom_target(
+ 'float64_glsl.h',
+ input : ['xxd.py', 'float64.glsl'],
+ output : 'float64_glsl.h',
+ command : [prog_python, '@INPUT@', '@OUTPUT@', '-n', 'float64_source'],
+)
+
files_libglsl = files(
'ast.h',
'ast_array_index.cpp',
@@ -213,7 +220,8 @@ files_libglsl_standalone = files(
libglsl = static_library(
'glsl',
[files_libglsl, glsl_parser, glsl_lexer_cpp, ir_expression_operation_h,
- ir_expression_operation_strings_h, ir_expression_operation_constant_h],
+ ir_expression_operation_strings_h, ir_expression_operation_constant_h,
+ float64_glsl_h],
c_args : [c_vis_args, c_msvc_compat_args, no_override_init_args],
cpp_args : [cpp_vis_args, cpp_msvc_compat_args],
link_with : libglcpp,
diff --git a/lib/mesa/src/compiler/glsl/opt_constant_variable.cpp b/lib/mesa/src/compiler/glsl/opt_constant_variable.cpp
index 15d8cf7cd..a1fffd4a5 100644
--- a/lib/mesa/src/compiler/glsl/opt_constant_variable.cpp
+++ b/lib/mesa/src/compiler/glsl/opt_constant_variable.cpp
@@ -186,8 +186,7 @@ do_constant_variable(exec_list *instructions)
bool progress = false;
ir_constant_variable_visitor v;
- v.ht = _mesa_hash_table_create(NULL, _mesa_hash_pointer,
- _mesa_key_pointer_equal);
+ v.ht = _mesa_pointer_hash_table_create(NULL);
v.run(instructions);
hash_table_foreach(v.ht, hte) {
diff --git a/lib/mesa/src/compiler/glsl/tests/meson.build b/lib/mesa/src/compiler/glsl/tests/meson.build
index 2a41e30a2..02f3355c4 100644
--- a/lib/mesa/src/compiler/glsl/tests/meson.build
+++ b/lib/mesa/src/compiler/glsl/tests/meson.build
@@ -26,7 +26,8 @@ test(
c_args : [c_vis_args, c_msvc_compat_args, no_override_init_args],
include_directories : [inc_common, inc_compiler],
link_with : [libglsl],
- )
+ ),
+ suite : ['compiler', 'glsl'],
)
test(
@@ -38,7 +39,8 @@ test(
include_directories : [inc_common, inc_glsl],
link_with : [libglsl],
dependencies : [dep_clock, dep_thread],
- )
+ ),
+ suite : ['compiler', 'glsl'],
)
@@ -54,7 +56,8 @@ test(
include_directories : [inc_common, inc_glsl],
link_with : [libglsl, libglsl_standalone, libglsl_util],
dependencies : [dep_clock, dep_thread, idep_gtest],
- )
+ ),
+ suite : ['compiler', 'glsl'],
)
test(
@@ -68,7 +71,8 @@ test(
include_directories : [inc_common, inc_glsl],
link_with : [libglsl, libglsl_util],
dependencies : [dep_thread, idep_gtest],
- )
+ ),
+ suite : ['compiler', 'glsl'],
)
test(
@@ -80,7 +84,8 @@ test(
include_directories : [inc_common, inc_glsl],
link_with : [libglsl, libglsl_util],
dependencies : [dep_thread, idep_gtest],
- )
+ ),
+ suite : ['compiler', 'glsl'],
)
test(
@@ -93,7 +98,9 @@ test(
meson.source_root(), 'src', 'compiler', 'glsl', 'tests', 'warnings'
),
],
+ suite : ['compiler', 'glsl'],
)
+
test(
'glsl optimization',
prog_python,
@@ -101,4 +108,5 @@ test(
join_paths(meson.current_source_dir(), 'optimization_test.py'),
'--test-runner', glsl_test
],
+ suite : ['compiler', 'glsl'],
)
diff --git a/lib/mesa/src/compiler/glsl/tests/warnings/031-__-in-function-name.vert b/lib/mesa/src/compiler/glsl/tests/warnings/031-__-in-function-name.vert
new file mode 100644
index 000000000..84d643d45
--- /dev/null
+++ b/lib/mesa/src/compiler/glsl/tests/warnings/031-__-in-function-name.vert
@@ -0,0 +1,22 @@
+#version 130
+
+float __foo(float x)
+{
+ return 6.0 * x;
+}
+
+float __bar(float x)
+{
+ return 3.0 * x;
+}
+
+float __blat(float x)
+{
+ return 2.0 * x;
+}
+
+void main()
+{
+ gl_Position = vec4(__foo(gl_Vertex.x), __bar(gl_Vertex.y), __blat(gl_Vertex.z), 1.0);
+}
+
diff --git a/lib/mesa/src/compiler/glsl/tests/warnings/031-__-in-function-name.vert.expected b/lib/mesa/src/compiler/glsl/tests/warnings/031-__-in-function-name.vert.expected
new file mode 100644
index 000000000..1111ee441
--- /dev/null
+++ b/lib/mesa/src/compiler/glsl/tests/warnings/031-__-in-function-name.vert.expected
@@ -0,0 +1,3 @@
+0:3(7): warning: identifier `__foo' uses reserved `__' string
+0:8(7): warning: identifier `__bar' uses reserved `__' string
+0:13(7): warning: identifier `__blat' uses reserved `__' string
diff --git a/lib/mesa/src/compiler/glsl/tests/warnings/032-__-in-function-name-pragma-disable.vert b/lib/mesa/src/compiler/glsl/tests/warnings/032-__-in-function-name-pragma-disable.vert
new file mode 100644
index 000000000..d0d8d915a
--- /dev/null
+++ b/lib/mesa/src/compiler/glsl/tests/warnings/032-__-in-function-name-pragma-disable.vert
@@ -0,0 +1,24 @@
+#version 130
+
+float __foo(float x)
+{
+ return 6.0 * x;
+}
+
+#pragma warning(off)
+float __bar(float x)
+{
+ return 3.0 * x;
+}
+#pragma warning(on)
+
+float __blat(float x)
+{
+ return 2.0 * x;
+}
+
+void main()
+{
+ gl_Position = vec4(__foo(gl_Vertex.x), __bar(gl_Vertex.y), __blat(gl_Vertex.z), 1.0);
+}
+
diff --git a/lib/mesa/src/compiler/glsl/tests/warnings/032-__-in-function-name-pragma-disable.vert.expected b/lib/mesa/src/compiler/glsl/tests/warnings/032-__-in-function-name-pragma-disable.vert.expected
new file mode 100644
index 000000000..4b490a9c8
--- /dev/null
+++ b/lib/mesa/src/compiler/glsl/tests/warnings/032-__-in-function-name-pragma-disable.vert.expected
@@ -0,0 +1,2 @@
+0:3(7): warning: identifier `__foo' uses reserved `__' string
+0:15(7): warning: identifier `__blat' uses reserved `__' string
diff --git a/lib/mesa/src/compiler/glsl/xxd.py b/lib/mesa/src/compiler/glsl/xxd.py
new file mode 100644
index 000000000..f8f57d771
--- /dev/null
+++ b/lib/mesa/src/compiler/glsl/xxd.py
@@ -0,0 +1,111 @@
+# encoding=utf-8
+# Copyright © 2018 Intel Corporation
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice (including the next
+# paragraph) shall be included in all copies or substantial portions of the
+# Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+# Converts a file to a C/C++ #include containing a string
+
+from __future__ import unicode_literals
+import argparse
+import io
+import string
+import sys
+
+
+def get_args():
+ parser = argparse.ArgumentParser()
+ parser.add_argument('input', help="Name of input file")
+ parser.add_argument('output', help="Name of output file")
+ parser.add_argument("-n", "--name",
+ help="Name of C variable")
+ args = parser.parse_args()
+ return args
+
+
+def filename_to_C_identifier(n):
+ if n[0] != '_' and not n[0].isalpha():
+ n = "_" + n[1:]
+
+ return "".join([c if c.isalnum() or c == "_" else "_" for c in n])
+
+
+def emit_byte(f, b):
+ if ord(b) == ord('\n'):
+ f.write(b"\\n\"\n \"")
+ return
+ elif ord(b) == ord('\r'):
+ f.write(b"\\r\"\n \"")
+ return
+ elif ord(b) == ord('\t'):
+ f.write(b"\\t")
+ return
+ elif ord(b) == ord('"'):
+ f.write(b"\\\"")
+ return
+ elif ord(b) == ord('\\'):
+ f.write(b"\\\\")
+ return
+
+ if ord(b) >= ord(' ') and ord(b) <= ord('~'):
+ f.write(b)
+ else:
+ hi = ord(b) >> 4
+ lo = ord(b) & 0x0f
+ f.write("\\x{:x}{:x}".format(hi, lo).encode('utf-8'))
+
+
+def process_file(args):
+ with io.open(args.input, "rb") as infile:
+ try:
+ with io.open(args.output, "wb") as outfile:
+ # If a name was not specified on the command line, pick one based on the
+ # name of the input file. If no input filename was specified, use
+ # from_stdin.
+ if args.name is not None:
+ name = args.name
+ else:
+ name = filename_to_C_identifier(args.input)
+
+ outfile.write("static const char {}[] =\n \"".format(name).encode('utf-8'))
+
+ while True:
+ byte = infile.read(1)
+ if byte == b"":
+ break
+
+ emit_byte(outfile, byte)
+
+ outfile.write(b"\"\n ;\n")
+ except Exception:
+ # In the event that anything goes wrong, delete the output file,
+ # then re-raise the exception. Deleteing the output file should
+ # ensure that the build system doesn't try to use the stale,
+ # half-generated file.
+ os.unlink(args.output)
+ raise
+
+
+def main():
+ args = get_args()
+ process_file(args)
+
+
+if __name__ == "__main__":
+ main()