Closed
Description
#21817 tried to solve a type inference quirk for the binary operators. But as @Byron reported, it is still not perfect:
use std::num::Float;
#[derive(Debug, PartialEq, Eq, Copy)]
pub struct Vector<T: Float> {
x: T,
y: T,
z: T,
}
impl<T: Float> Vector<T> {
#[inline(always)]
fn mulfed(&self, m: T) -> Vector<T> {
Vector { x: self.x * m, y: self.y * m, z: self.z * m }
}
fn dot(&self, r: &Vector<T>) -> T {
self.x * r.x + self.y * r.y + self.z * r.z
}
fn len(&self) -> T {
self.dot(self).sqrt()
}
fn normalized(&self) -> Vector<T> {
self.mulfed(Float::one() / self.dot(self).sqrt())
}
fn normalized_(&self) -> Vector<T> {
self.mulfed(Float::one() / self.len())
}
}
fn main() {}
Notice that there are two versions of normalized
, one can compile whereas the other can't. This is a bug.