Extract repeated constants from f32 and f64 source

This will make it easier to keep `f16` and `f128` consistent as their
implementations get added.
This commit is contained in:
Trevor Gross 2024-06-16 03:09:58 -05:00
parent d49994b060
commit fce07a82c6
4 changed files with 160 additions and 101 deletions

View File

@ -490,6 +490,21 @@ impl f32 {
#[stable(feature = "assoc_int_consts", since = "1.43.0")]
pub const NEG_INFINITY: f32 = -1.0_f32 / 0.0_f32;
/// Sign bit
const SIGN_MASK: u32 = 0x8000_0000;
/// Exponent mask
const EXP_MASK: u32 = 0x7f80_0000;
/// Mantissa mask
const MAN_MASK: u32 = 0x007f_ffff;
/// Minimum representable positive value (min subnormal)
const TINY_BITS: u32 = 0x1;
/// Minimum representable negative value (min negative subnormal)
const NEG_TINY_BITS: u32 = Self::TINY_BITS | Self::SIGN_MASK;
/// Returns `true` if this value is NaN.
///
/// ```
@ -515,7 +530,7 @@ pub const fn is_nan(self) -> bool {
#[rustc_const_unstable(feature = "const_float_classify", issue = "72505")]
pub(crate) const fn abs_private(self) -> f32 {
// SAFETY: This transmutation is fine. Probably. For the reasons std is using it.
unsafe { mem::transmute::<u32, f32>(mem::transmute::<f32, u32>(self) & 0x7fff_ffff) }
unsafe { mem::transmute::<u32, f32>(mem::transmute::<f32, u32>(self) & !Self::SIGN_MASK) }
}
/// Returns `true` if this value is positive infinity or negative infinity, and
@ -682,12 +697,9 @@ pub const fn classify(self) -> FpCategory {
// runtime-deviating logic which may or may not be acceptable.
#[rustc_const_unstable(feature = "const_float_classify", issue = "72505")]
const unsafe fn partial_classify(self) -> FpCategory {
const EXP_MASK: u32 = 0x7f800000;
const MAN_MASK: u32 = 0x007fffff;
// SAFETY: The caller is not asking questions for which this will tell lies.
let b = unsafe { mem::transmute::<f32, u32>(self) };
match (b & MAN_MASK, b & EXP_MASK) {
match (b & Self::MAN_MASK, b & Self::EXP_MASK) {
(0, 0) => FpCategory::Zero,
(_, 0) => FpCategory::Subnormal,
_ => FpCategory::Normal,
@ -699,12 +711,9 @@ pub const fn classify(self) -> FpCategory {
// plus a transmute. We do not live in a just world, but we can make it more so.
#[rustc_const_unstable(feature = "const_float_classify", issue = "72505")]
const fn classify_bits(b: u32) -> FpCategory {
const EXP_MASK: u32 = 0x7f800000;
const MAN_MASK: u32 = 0x007fffff;
match (b & MAN_MASK, b & EXP_MASK) {
(0, EXP_MASK) => FpCategory::Infinite,
(_, EXP_MASK) => FpCategory::Nan,
match (b & Self::MAN_MASK, b & Self::EXP_MASK) {
(0, Self::EXP_MASK) => FpCategory::Infinite,
(_, Self::EXP_MASK) => FpCategory::Nan,
(0, 0) => FpCategory::Zero,
(_, 0) => FpCategory::Subnormal,
_ => FpCategory::Normal,
@ -789,17 +798,14 @@ pub const fn is_sign_negative(self) -> bool {
pub const fn next_up(self) -> Self {
// We must use strictly integer arithmetic to prevent denormals from
// flushing to zero after an arithmetic operation on some platforms.
const TINY_BITS: u32 = 0x1; // Smallest positive f32.
const CLEAR_SIGN_MASK: u32 = 0x7fff_ffff;
let bits = self.to_bits();
if self.is_nan() || bits == Self::INFINITY.to_bits() {
return self;
}
let abs = bits & CLEAR_SIGN_MASK;
let abs = bits & !Self::SIGN_MASK;
let next_bits = if abs == 0 {
TINY_BITS
Self::TINY_BITS
} else if bits == abs {
bits + 1
} else {
@ -839,17 +845,14 @@ pub const fn next_up(self) -> Self {
pub const fn next_down(self) -> Self {
// We must use strictly integer arithmetic to prevent denormals from
// flushing to zero after an arithmetic operation on some platforms.
const NEG_TINY_BITS: u32 = 0x8000_0001; // Smallest (in magnitude) negative f32.
const CLEAR_SIGN_MASK: u32 = 0x7fff_ffff;
let bits = self.to_bits();
if self.is_nan() || bits == Self::NEG_INFINITY.to_bits() {
return self;
}
let abs = bits & CLEAR_SIGN_MASK;
let abs = bits & !Self::SIGN_MASK;
let next_bits = if abs == 0 {
NEG_TINY_BITS
Self::NEG_TINY_BITS
} else if bits == abs {
bits - 1
} else {

View File

@ -489,6 +489,21 @@ impl f64 {
#[stable(feature = "assoc_int_consts", since = "1.43.0")]
pub const NEG_INFINITY: f64 = -1.0_f64 / 0.0_f64;
/// Sign bit
const SIGN_MASK: u64 = 0x8000_0000_0000_0000;
/// Exponent mask
const EXP_MASK: u64 = 0x7ff0_0000_0000_0000;
/// Mantissa mask
const MAN_MASK: u64 = 0x000f_ffff_ffff_ffff;
/// Minimum representable positive value (min subnormal)
const TINY_BITS: u64 = 0x1;
/// Minimum representable negative value (min negative subnormal)
const NEG_TINY_BITS: u64 = Self::TINY_BITS | Self::SIGN_MASK;
/// Returns `true` if this value is NaN.
///
/// ```
@ -514,9 +529,7 @@ pub const fn is_nan(self) -> bool {
#[rustc_const_unstable(feature = "const_float_classify", issue = "72505")]
pub(crate) const fn abs_private(self) -> f64 {
// SAFETY: This transmutation is fine. Probably. For the reasons std is using it.
unsafe {
mem::transmute::<u64, f64>(mem::transmute::<f64, u64>(self) & 0x7fff_ffff_ffff_ffff)
}
unsafe { mem::transmute::<u64, f64>(mem::transmute::<f64, u64>(self) & !Self::SIGN_MASK) }
}
/// Returns `true` if this value is positive infinity or negative infinity, and
@ -673,13 +686,10 @@ pub const fn classify(self) -> FpCategory {
// and some normal floating point numbers truncated from an x87 FPU.
#[rustc_const_unstable(feature = "const_float_classify", issue = "72505")]
const unsafe fn partial_classify(self) -> FpCategory {
const EXP_MASK: u64 = 0x7ff0000000000000;
const MAN_MASK: u64 = 0x000fffffffffffff;
// SAFETY: The caller is not asking questions for which this will tell lies.
let b = unsafe { mem::transmute::<f64, u64>(self) };
match (b & MAN_MASK, b & EXP_MASK) {
(0, EXP_MASK) => FpCategory::Infinite,
match (b & Self::MAN_MASK, b & Self::EXP_MASK) {
(0, Self::EXP_MASK) => FpCategory::Infinite,
(0, 0) => FpCategory::Zero,
(_, 0) => FpCategory::Subnormal,
_ => FpCategory::Normal,
@ -691,12 +701,9 @@ pub const fn classify(self) -> FpCategory {
// plus a transmute. We do not live in a just world, but we can make it more so.
#[rustc_const_unstable(feature = "const_float_classify", issue = "72505")]
const fn classify_bits(b: u64) -> FpCategory {
const EXP_MASK: u64 = 0x7ff0000000000000;
const MAN_MASK: u64 = 0x000fffffffffffff;
match (b & MAN_MASK, b & EXP_MASK) {
(0, EXP_MASK) => FpCategory::Infinite,
(_, EXP_MASK) => FpCategory::Nan,
match (b & Self::MAN_MASK, b & Self::EXP_MASK) {
(0, Self::EXP_MASK) => FpCategory::Infinite,
(_, Self::EXP_MASK) => FpCategory::Nan,
(0, 0) => FpCategory::Zero,
(_, 0) => FpCategory::Subnormal,
_ => FpCategory::Normal,
@ -756,7 +763,7 @@ pub const fn is_sign_negative(self) -> bool {
// IEEE754 says: isSignMinus(x) is true if and only if x has negative sign. isSignMinus
// applies to zeros and NaNs as well.
// SAFETY: This is just transmuting to get the sign bit, it's fine.
unsafe { mem::transmute::<f64, u64>(self) & 0x8000_0000_0000_0000 != 0 }
unsafe { mem::transmute::<f64, u64>(self) & Self::SIGN_MASK != 0 }
}
#[must_use]
@ -799,17 +806,14 @@ pub fn is_negative(self) -> bool {
pub const fn next_up(self) -> Self {
// We must use strictly integer arithmetic to prevent denormals from
// flushing to zero after an arithmetic operation on some platforms.
const TINY_BITS: u64 = 0x1; // Smallest positive f64.
const CLEAR_SIGN_MASK: u64 = 0x7fff_ffff_ffff_ffff;
let bits = self.to_bits();
if self.is_nan() || bits == Self::INFINITY.to_bits() {
return self;
}
let abs = bits & CLEAR_SIGN_MASK;
let abs = bits & !Self::SIGN_MASK;
let next_bits = if abs == 0 {
TINY_BITS
Self::TINY_BITS
} else if bits == abs {
bits + 1
} else {
@ -849,17 +853,14 @@ pub const fn next_up(self) -> Self {
pub const fn next_down(self) -> Self {
// We must use strictly integer arithmetic to prevent denormals from
// flushing to zero after an arithmetic operation on some platforms.
const NEG_TINY_BITS: u64 = 0x8000_0000_0000_0001; // Smallest (in magnitude) negative f64.
const CLEAR_SIGN_MASK: u64 = 0x7fff_ffff_ffff_ffff;
let bits = self.to_bits();
if self.is_nan() || bits == Self::NEG_INFINITY.to_bits() {
return self;
}
let abs = bits & CLEAR_SIGN_MASK;
let abs = bits & !Self::SIGN_MASK;
let next_bits = if abs == 0 {
NEG_TINY_BITS
Self::NEG_TINY_BITS
} else if bits == abs {
bits - 1
} else {

View File

@ -2,6 +2,45 @@
use crate::num::FpCategory as Fp;
use crate::num::*;
/// Smallest number
#[allow(dead_code)] // unused on x86
const TINY_BITS: u32 = 0x1;
/// Next smallest number
#[allow(dead_code)] // unused on x86
const TINY_UP_BITS: u32 = 0x2;
/// Exponent = 0b11...10, Sifnificand 0b1111..10. Min val > 0
#[allow(dead_code)] // unused on x86
const MAX_DOWN_BITS: u32 = 0x7f7f_fffe;
/// Zeroed exponent, full significant
#[allow(dead_code)] // unused on x86
const LARGEST_SUBNORMAL_BITS: u32 = 0x007f_ffff;
/// Exponent = 0b1, zeroed significand
#[allow(dead_code)] // unused on x86
const SMALLEST_NORMAL_BITS: u32 = 0x0080_0000;
/// First pattern over the mantissa
#[allow(dead_code)] // unused on x86
const NAN_MASK1: u32 = 0x002a_aaaa;
/// Second pattern over the mantissa
#[allow(dead_code)] // unused on x86
const NAN_MASK2: u32 = 0x0055_5555;
#[allow(unused_macros)]
macro_rules! assert_f32_biteq {
($left : expr, $right : expr) => {
let l: &f32 = &$left;
let r: &f32 = &$right;
let lb = l.to_bits();
let rb = r.to_bits();
assert_eq!(lb, rb, "float {l} ({lb:#010x}) is not bitequal to {r} ({rb:#010x})");
};
}
#[test]
fn test_num_f32() {
test_num(10f32, 2f32);
@ -315,27 +354,16 @@ fn test_is_sign_negative() {
assert!((-f32::NAN).is_sign_negative());
}
#[allow(unused_macros)]
macro_rules! assert_f32_biteq {
($left : expr, $right : expr) => {
let l: &f32 = &$left;
let r: &f32 = &$right;
let lb = l.to_bits();
let rb = r.to_bits();
assert_eq!(lb, rb, "float {} ({:#x}) is not equal to {} ({:#x})", *l, lb, *r, rb);
};
}
// Ignore test on x87 floating point, these platforms do not guarantee NaN
// payloads are preserved and flush denormals to zero, failing the tests.
#[cfg(not(target_arch = "x86"))]
#[test]
fn test_next_up() {
let tiny = f32::from_bits(1);
let tiny_up = f32::from_bits(2);
let max_down = f32::from_bits(0x7f7f_fffe);
let largest_subnormal = f32::from_bits(0x007f_ffff);
let smallest_normal = f32::from_bits(0x0080_0000);
let tiny = f32::from_bits(TINY_BITS);
let tiny_up = f32::from_bits(TINY_UP_BITS);
let max_down = f32::from_bits(MAX_DOWN_BITS);
let largest_subnormal = f32::from_bits(LARGEST_SUBNORMAL_BITS);
let smallest_normal = f32::from_bits(SMALLEST_NORMAL_BITS);
assert_f32_biteq!(f32::NEG_INFINITY.next_up(), f32::MIN);
assert_f32_biteq!(f32::MIN.next_up(), -max_down);
assert_f32_biteq!((-1.0 - f32::EPSILON).next_up(), -1.0);
@ -352,8 +380,8 @@ fn test_next_up() {
// Check that NaNs roundtrip.
let nan0 = f32::NAN;
let nan1 = f32::from_bits(f32::NAN.to_bits() ^ 0x002a_aaaa);
let nan2 = f32::from_bits(f32::NAN.to_bits() ^ 0x0055_5555);
let nan1 = f32::from_bits(f32::NAN.to_bits() ^ NAN_MASK1);
let nan2 = f32::from_bits(f32::NAN.to_bits() ^ NAN_MASK2);
assert_f32_biteq!(nan0.next_up(), nan0);
assert_f32_biteq!(nan1.next_up(), nan1);
assert_f32_biteq!(nan2.next_up(), nan2);
@ -364,11 +392,11 @@ fn test_next_up() {
#[cfg(not(target_arch = "x86"))]
#[test]
fn test_next_down() {
let tiny = f32::from_bits(1);
let tiny_up = f32::from_bits(2);
let max_down = f32::from_bits(0x7f7f_fffe);
let largest_subnormal = f32::from_bits(0x007f_ffff);
let smallest_normal = f32::from_bits(0x0080_0000);
let tiny = f32::from_bits(TINY_BITS);
let tiny_up = f32::from_bits(TINY_UP_BITS);
let max_down = f32::from_bits(MAX_DOWN_BITS);
let largest_subnormal = f32::from_bits(LARGEST_SUBNORMAL_BITS);
let smallest_normal = f32::from_bits(SMALLEST_NORMAL_BITS);
assert_f32_biteq!(f32::NEG_INFINITY.next_down(), f32::NEG_INFINITY);
assert_f32_biteq!(f32::MIN.next_down(), f32::NEG_INFINITY);
assert_f32_biteq!((-max_down).next_down(), f32::MIN);
@ -386,8 +414,8 @@ fn test_next_down() {
// Check that NaNs roundtrip.
let nan0 = f32::NAN;
let nan1 = f32::from_bits(f32::NAN.to_bits() ^ 0x002a_aaaa);
let nan2 = f32::from_bits(f32::NAN.to_bits() ^ 0x0055_5555);
let nan1 = f32::from_bits(f32::NAN.to_bits() ^ NAN_MASK1);
let nan2 = f32::from_bits(f32::NAN.to_bits() ^ NAN_MASK2);
assert_f32_biteq!(nan0.next_down(), nan0);
assert_f32_biteq!(nan1.next_down(), nan1);
assert_f32_biteq!(nan2.next_down(), nan2);
@ -734,8 +762,8 @@ fn test_float_bits_conv() {
// Check that NaNs roundtrip their bits regardless of signaling-ness
// 0xA is 0b1010; 0x5 is 0b0101 -- so these two together clobbers all the mantissa bits
let masked_nan1 = f32::NAN.to_bits() ^ 0x002A_AAAA;
let masked_nan2 = f32::NAN.to_bits() ^ 0x0055_5555;
let masked_nan1 = f32::NAN.to_bits() ^ NAN_MASK1;
let masked_nan2 = f32::NAN.to_bits() ^ NAN_MASK2;
assert!(f32::from_bits(masked_nan1).is_nan());
assert!(f32::from_bits(masked_nan2).is_nan());

View File

@ -2,6 +2,45 @@
use crate::num::FpCategory as Fp;
use crate::num::*;
/// Smallest number
#[allow(dead_code)] // unused on x86
const TINY_BITS: u64 = 0x1;
/// Next smallest number
#[allow(dead_code)] // unused on x86
const TINY_UP_BITS: u64 = 0x2;
/// Exponent = 0b11...10, Sifnificand 0b1111..10. Min val > 0
#[allow(dead_code)] // unused on x86
const MAX_DOWN_BITS: u64 = 0x7fef_ffff_ffff_fffe;
/// Zeroed exponent, full significant
#[allow(dead_code)] // unused on x86
const LARGEST_SUBNORMAL_BITS: u64 = 0x000f_ffff_ffff_ffff;
/// Exponent = 0b1, zeroed significand
#[allow(dead_code)] // unused on x86
const SMALLEST_NORMAL_BITS: u64 = 0x0010_0000_0000_0000;
/// First pattern over the mantissa
#[allow(dead_code)] // unused on x86
const NAN_MASK1: u64 = 0x000a_aaaa_aaaa_aaaa;
/// Second pattern over the mantissa
#[allow(dead_code)] // unused on x86
const NAN_MASK2: u64 = 0x0005_5555_5555_5555;
#[allow(unused_macros)]
macro_rules! assert_f64_biteq {
($left : expr, $right : expr) => {
let l: &f64 = &$left;
let r: &f64 = &$right;
let lb = l.to_bits();
let rb = r.to_bits();
assert_eq!(lb, rb, "float {l} ({lb:#018x}) is not bitequal to {r} ({rb:#018x})");
};
}
#[test]
fn test_num_f64() {
test_num(10f64, 2f64);
@ -305,27 +344,16 @@ fn test_is_sign_negative() {
assert!((-f64::NAN).is_sign_negative());
}
#[allow(unused_macros)]
macro_rules! assert_f64_biteq {
($left : expr, $right : expr) => {
let l: &f64 = &$left;
let r: &f64 = &$right;
let lb = l.to_bits();
let rb = r.to_bits();
assert_eq!(lb, rb, "float {} ({:#x}) is not equal to {} ({:#x})", *l, lb, *r, rb);
};
}
// Ignore test on x87 floating point, these platforms do not guarantee NaN
// payloads are preserved and flush denormals to zero, failing the tests.
#[cfg(not(target_arch = "x86"))]
#[test]
fn test_next_up() {
let tiny = f64::from_bits(1);
let tiny_up = f64::from_bits(2);
let max_down = f64::from_bits(0x7fef_ffff_ffff_fffe);
let largest_subnormal = f64::from_bits(0x000f_ffff_ffff_ffff);
let smallest_normal = f64::from_bits(0x0010_0000_0000_0000);
let tiny = f64::from_bits(TINY_BITS);
let tiny_up = f64::from_bits(TINY_UP_BITS);
let max_down = f64::from_bits(MAX_DOWN_BITS);
let largest_subnormal = f64::from_bits(LARGEST_SUBNORMAL_BITS);
let smallest_normal = f64::from_bits(SMALLEST_NORMAL_BITS);
assert_f64_biteq!(f64::NEG_INFINITY.next_up(), f64::MIN);
assert_f64_biteq!(f64::MIN.next_up(), -max_down);
assert_f64_biteq!((-1.0 - f64::EPSILON).next_up(), -1.0);
@ -341,8 +369,8 @@ fn test_next_up() {
assert_f64_biteq!(f64::INFINITY.next_up(), f64::INFINITY);
let nan0 = f64::NAN;
let nan1 = f64::from_bits(f64::NAN.to_bits() ^ 0x000a_aaaa_aaaa_aaaa);
let nan2 = f64::from_bits(f64::NAN.to_bits() ^ 0x0005_5555_5555_5555);
let nan1 = f64::from_bits(f64::NAN.to_bits() ^ NAN_MASK1);
let nan2 = f64::from_bits(f64::NAN.to_bits() ^ NAN_MASK2);
assert_f64_biteq!(nan0.next_up(), nan0);
assert_f64_biteq!(nan1.next_up(), nan1);
assert_f64_biteq!(nan2.next_up(), nan2);
@ -353,11 +381,11 @@ fn test_next_up() {
#[cfg(not(target_arch = "x86"))]
#[test]
fn test_next_down() {
let tiny = f64::from_bits(1);
let tiny_up = f64::from_bits(2);
let max_down = f64::from_bits(0x7fef_ffff_ffff_fffe);
let largest_subnormal = f64::from_bits(0x000f_ffff_ffff_ffff);
let smallest_normal = f64::from_bits(0x0010_0000_0000_0000);
let tiny = f64::from_bits(TINY_BITS);
let tiny_up = f64::from_bits(TINY_UP_BITS);
let max_down = f64::from_bits(MAX_DOWN_BITS);
let largest_subnormal = f64::from_bits(LARGEST_SUBNORMAL_BITS);
let smallest_normal = f64::from_bits(SMALLEST_NORMAL_BITS);
assert_f64_biteq!(f64::NEG_INFINITY.next_down(), f64::NEG_INFINITY);
assert_f64_biteq!(f64::MIN.next_down(), f64::NEG_INFINITY);
assert_f64_biteq!((-max_down).next_down(), f64::MIN);
@ -374,8 +402,8 @@ fn test_next_down() {
assert_f64_biteq!(f64::INFINITY.next_down(), f64::MAX);
let nan0 = f64::NAN;
let nan1 = f64::from_bits(f64::NAN.to_bits() ^ 0x000a_aaaa_aaaa_aaaa);
let nan2 = f64::from_bits(f64::NAN.to_bits() ^ 0x0005_5555_5555_5555);
let nan1 = f64::from_bits(f64::NAN.to_bits() ^ NAN_MASK1);
let nan2 = f64::from_bits(f64::NAN.to_bits() ^ NAN_MASK2);
assert_f64_biteq!(nan0.next_down(), nan0);
assert_f64_biteq!(nan1.next_down(), nan1);
assert_f64_biteq!(nan2.next_down(), nan2);
@ -715,9 +743,8 @@ fn test_float_bits_conv() {
assert_approx_eq!(f64::from_bits(0xc02c800000000000), -14.25);
// Check that NaNs roundtrip their bits regardless of signaling-ness
// 0xA is 0b1010; 0x5 is 0b0101 -- so these two together clobbers all the mantissa bits
let masked_nan1 = f64::NAN.to_bits() ^ 0x000A_AAAA_AAAA_AAAA;
let masked_nan2 = f64::NAN.to_bits() ^ 0x0005_5555_5555_5555;
let masked_nan1 = f64::NAN.to_bits() ^ NAN_MASK1;
let masked_nan2 = f64::NAN.to_bits() ^ NAN_MASK2;
assert!(f64::from_bits(masked_nan1).is_nan());
assert!(f64::from_bits(masked_nan2).is_nan());