diff --git a/src/lib.rs b/src/lib.rs index b0e431211..c4242fe12 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -12,6 +12,7 @@ #![allow(clippy::float_cmp)] #![allow(clippy::int_plus_one)] #![allow(clippy::many_single_char_names)] +#![allow(clippy::just_underscores_and_digits)] #![allow(clippy::mixed_case_hex_literals)] #![allow(clippy::needless_late_init)] #![allow(clippy::needless_return)] diff --git a/src/math/generic/fmod.rs b/src/math/generic/fmod.rs index c74b593d5..4d76ed6de 100644 --- a/src/math/generic/fmod.rs +++ b/src/math/generic/fmod.rs @@ -1,84 +1,63 @@ -/* SPDX-License-Identifier: MIT */ -/* origin: musl src/math/fmod.c. Ported to generic Rust algorithm in 2025, TG. */ - use super::super::{CastFrom, Float, Int, MinInt}; +/// Given the bits of a positive float, clamp the exponent field to [0,1] +fn collapse_exponent(bits: F::Int) -> F::Int { + let sig = bits & F::SIG_MASK; + if sig == bits { sig } else { sig | F::IMPLICIT_BIT } +} + +/// Computes (x << e) % y +fn reduction(mut x: I, e: u32, y: I) -> I { + x %= y; + for _ in 0..e { + x <<= 1; + x = x.checked_sub(y).unwrap_or(x); + } + x +} + #[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)] pub fn fmod(x: F, y: F) -> F { - let zero = F::Int::ZERO; - let one = F::Int::ONE; + let _1 = F::Int::ONE; let mut ix = x.to_bits(); let mut iy = y.to_bits(); - let mut ex = x.ex().signed(); - let mut ey = y.ex().signed(); - let sx = ix & F::SIGN_MASK; - - if iy << 1 == zero || y.is_nan() || ex == F::EXP_SAT as i32 { - return (x * y) / (x * y); - } - if ix << 1 <= iy << 1 { - if ix << 1 == iy << 1 { - return F::ZERO * x; - } - return x; - } + let sx = ix & F::SIGN_MASK; + ix &= !F::SIGN_MASK; + iy &= !F::SIGN_MASK; - /* normalize x and y */ - if ex == 0 { - let i = ix << F::EXP_BITS; - ex -= i.leading_zeros() as i32; - ix <<= -ex + 1; - } else { - ix &= F::Int::MAX >> F::EXP_BITS; - ix |= one << F::SIG_BITS; + if ix >= F::EXP_MASK { + // x is nan or inf + return F::NAN; } - if ey == 0 { - let i = iy << F::EXP_BITS; - ey -= i.leading_zeros() as i32; - iy <<= -ey + 1; - } else { - iy &= F::Int::MAX >> F::EXP_BITS; - iy |= one << F::SIG_BITS; + if iy.wrapping_sub(_1) >= F::EXP_MASK { + // y is nan or zero + return F::NAN; } - /* x mod y */ - while ex > ey { - let i = ix.wrapping_sub(iy); - if i >> (F::BITS - 1) == zero { - if i == zero { - return F::ZERO * x; - } - ix = i; - } - - ix <<= 1; - ex -= 1; - } + if ix < iy { + // |x| < |y| + return x; + }; - let i = ix.wrapping_sub(iy); - if i >> (F::BITS - 1) == zero { - if i == zero { - return F::ZERO * x; - } + let ex: u32 = x.ex().saturating_sub(1); + let ey: u32 = y.ex().saturating_sub(1); - ix = i; - } + let num = collapse_exponent::(ix); + let div = collapse_exponent::(iy); - let shift = ix.leading_zeros().saturating_sub(F::EXP_BITS); - ix <<= shift; - ex -= shift as i32; + let num = reduction(num, ex - ey, div); - /* scale result */ - if ex > 0 { - ix -= one << F::SIG_BITS; - ix |= F::Int::cast_from(ex) << F::SIG_BITS; + if num.is_zero() { + F::from_bits(sx) } else { - ix >>= -ex + 1; - } + let ilog = num.ilog2(); + let shift = (ey + ilog).min(F::SIG_BITS) - ilog; + let scale = (ey + ilog).saturating_sub(F::SIG_BITS); - ix |= sx; - - F::from_bits(ix) + let normalized = num << shift; + let scaled = normalized + (F::Int::cast_from(scale) << F::SIG_BITS); + F::from_bits(sx | scaled) + } } diff --git a/src/math/support/int_traits.rs b/src/math/support/int_traits.rs index 491adb1f2..3ec1faba1 100644 --- a/src/math/support/int_traits.rs +++ b/src/math/support/int_traits.rs @@ -40,6 +40,9 @@ pub trait Int: + PartialOrd + ops::AddAssign + ops::SubAssign + + ops::MulAssign + + ops::DivAssign + + ops::RemAssign + ops::BitAndAssign + ops::BitOrAssign + ops::BitXorAssign @@ -51,6 +54,7 @@ pub trait Int: + ops::Sub + ops::Mul + ops::Div + + ops::Rem + ops::Shl + ops::Shl + ops::Shr