From fba4177d0fd07fc3f2d2202b39199ab913fb6164 Mon Sep 17 00:00:00 2001 From: Camille GILLOT Date: Mon, 23 Jun 2025 16:08:01 +0000 Subject: [PATCH 1/6] Simplify assignments. --- compiler/rustc_mir_transform/src/gvn.rs | 61 +++++++++++++------------ 1 file changed, 33 insertions(+), 28 deletions(-) diff --git a/compiler/rustc_mir_transform/src/gvn.rs b/compiler/rustc_mir_transform/src/gvn.rs index bda71ceaa551a..b19323239b6f8 100644 --- a/compiler/rustc_mir_transform/src/gvn.rs +++ b/compiler/rustc_mir_transform/src/gvn.rs @@ -1755,7 +1755,7 @@ impl<'tcx> MutVisitor<'tcx> for VnState<'_, 'tcx> { fn visit_place(&mut self, place: &mut Place<'tcx>, context: PlaceContext, location: Location) { self.simplify_place_projection(place, location); - if context.is_mutating_use() && !place.projection.is_empty() { + if context.is_mutating_use() && place.is_indirect() { // Non-local mutation maybe invalidate deref. self.invalidate_derefs(); } @@ -1767,36 +1767,41 @@ impl<'tcx> MutVisitor<'tcx> for VnState<'_, 'tcx> { self.super_operand(operand, location); } - fn visit_statement(&mut self, stmt: &mut Statement<'tcx>, location: Location) { - if let StatementKind::Assign(box (ref mut lhs, ref mut rvalue)) = stmt.kind { - self.simplify_place_projection(lhs, location); - - let value = self.simplify_rvalue(lhs, rvalue, location); - let value = if let Some(local) = lhs.as_local() - && self.ssa.is_ssa(local) - // FIXME(#112651) `rvalue` may have a subtype to `local`. We can only mark - // `local` as reusable if we have an exact type match. - && self.local_decls[local].ty == rvalue.ty(self.local_decls, self.tcx) + fn visit_assign( + &mut self, + lhs: &mut Place<'tcx>, + rvalue: &mut Rvalue<'tcx>, + location: Location, + ) { + self.simplify_place_projection(lhs, location); + + let value = self.simplify_rvalue(lhs, rvalue, location); + if let Some(value) = value { + if let Some(const_) = self.try_as_constant(value) { + *rvalue = Rvalue::Use(Operand::Constant(Box::new(const_))); + } else if let Some(place) = self.try_as_place(value, location, false) + && *rvalue != Rvalue::Use(Operand::Move(place)) + && *rvalue != Rvalue::Use(Operand::Copy(place)) { - let value = value.unwrap_or_else(|| self.new_opaque()); - self.assign(local, value); - Some(value) - } else { - value - }; - if let Some(value) = value { - if let Some(const_) = self.try_as_constant(value) { - *rvalue = Rvalue::Use(Operand::Constant(Box::new(const_))); - } else if let Some(place) = self.try_as_place(value, location, false) - && *rvalue != Rvalue::Use(Operand::Move(place)) - && *rvalue != Rvalue::Use(Operand::Copy(place)) - { - *rvalue = Rvalue::Use(Operand::Copy(place)); - self.reused_locals.insert(place.local); - } + *rvalue = Rvalue::Use(Operand::Copy(place)); + self.reused_locals.insert(place.local); } } - self.super_statement(stmt, location); + + if lhs.is_indirect() { + // Non-local mutation maybe invalidate deref. + self.invalidate_derefs(); + } + + if let Some(local) = lhs.as_local() + && self.ssa.is_ssa(local) + // FIXME(#112651) `rvalue` may have a subtype to `local`. We can only mark + // `local` as reusable if we have an exact type match. + && self.local_decls[local].ty == rvalue.ty(self.local_decls, self.tcx) + { + let value = value.unwrap_or_else(|| self.new_opaque()); + self.assign(local, value); + } } fn visit_terminator(&mut self, terminator: &mut Terminator<'tcx>, location: Location) { From ac70dc85e74d789b82b8352a895335b18e1a569e Mon Sep 17 00:00:00 2001 From: Camille GILLOT Date: Mon, 30 Jun 2025 18:26:37 +0000 Subject: [PATCH 2/6] Introduce Value::RawPtr as it behaves differently from other aggregates. --- compiler/rustc_mir_transform/src/gvn.rs | 122 +++++++++++++----------- 1 file changed, 67 insertions(+), 55 deletions(-) diff --git a/compiler/rustc_mir_transform/src/gvn.rs b/compiler/rustc_mir_transform/src/gvn.rs index b19323239b6f8..b59b73531129e 100644 --- a/compiler/rustc_mir_transform/src/gvn.rs +++ b/compiler/rustc_mir_transform/src/gvn.rs @@ -163,12 +163,6 @@ enum AggregateTy<'tcx> { Array, Tuple, Def(DefId, ty::GenericArgsRef<'tcx>), - RawPtr { - /// Needed for cast propagation. - data_pointer_ty: Ty<'tcx>, - /// The data pointer can be anything thin, so doesn't determine the output. - output_pointer_ty: Ty<'tcx>, - }, } #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] @@ -194,6 +188,17 @@ enum Value<'tcx> { /// An aggregate value, either tuple/closure/struct/enum. /// This does not contain unions, as we cannot reason with the value. Aggregate(AggregateTy<'tcx>, VariantIdx, Vec), + /// A raw pointer aggregate built from a thin pointer and metadata. + RawPtr { + /// Thin pointer component. This is field 0 in MIR. + pointer: VnIndex, + /// Metadata component. This is field 1 in MIR. + metadata: VnIndex, + /// Needed for cast propagation. + data_pointer_ty: Ty<'tcx>, + /// The data pointer can be anything thin, so doesn't determine the output. + output_pointer_ty: Ty<'tcx>, + }, /// This corresponds to a `[value; count]` expression. Repeat(VnIndex, ty::Const<'tcx>), /// The address of a place. @@ -402,22 +407,11 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { AggregateTy::Def(def_id, args) => { self.tcx.type_of(def_id).instantiate(self.tcx, args) } - AggregateTy::RawPtr { output_pointer_ty, .. } => output_pointer_ty, }; let variant = if ty.is_enum() { Some(variant) } else { None }; let ty = self.ecx.layout_of(ty).ok()?; if ty.is_zst() { ImmTy::uninit(ty).into() - } else if matches!(kind, AggregateTy::RawPtr { .. }) { - // Pointers don't have fields, so don't `project_field` them. - let data = self.ecx.read_pointer(fields[0]).discard_err()?; - let meta = if fields[1].layout.is_zst() { - MemPlaceMeta::None - } else { - MemPlaceMeta::Meta(self.ecx.read_scalar(fields[1]).discard_err()?) - }; - let ptr_imm = Immediate::new_pointer_with_meta(data, meta, &self.ecx); - ImmTy::from_immediate(ptr_imm, ty).into() } else if matches!( ty.backend_repr, BackendRepr::Scalar(..) | BackendRepr::ScalarPair(..) @@ -446,6 +440,22 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { return None; } } + RawPtr { pointer, metadata, output_pointer_ty, data_pointer_ty: _ } => { + let pointer = self.evaluated[pointer].as_ref()?; + let metadata = self.evaluated[metadata].as_ref()?; + let output_pointer_ty = self.ecx.layout_of(output_pointer_ty).ok()?; + debug_assert!(!output_pointer_ty.is_zst()); + + // Pointers don't have fields, so don't `project_field` them. + let data = self.ecx.read_pointer(pointer).discard_err()?; + let meta = if metadata.layout.is_zst() { + MemPlaceMeta::None + } else { + MemPlaceMeta::Meta(self.ecx.read_scalar(metadata).discard_err()?) + }; + let ptr_imm = Immediate::new_pointer_with_meta(data, meta, &self.ecx); + ImmTy::from_immediate(ptr_imm, output_pointer_ty).into() + } Projection(base, elem) => { let value = self.evaluated[base].as_ref()?; @@ -1026,7 +1036,12 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { } } - let (mut ty, variant_index) = match *kind { + let fields: Vec<_> = field_ops + .iter_mut() + .map(|op| self.simplify_operand(op, location).unwrap_or_else(|| self.new_opaque())) + .collect(); + + let (ty, variant_index) = match *kind { AggregateKind::Array(..) => { assert!(!field_ops.is_empty()); (AggregateTy::Array, FIRST_VARIANT) @@ -1045,41 +1060,41 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { AggregateKind::Adt(_, _, _, _, Some(_)) => return None, AggregateKind::RawPtr(pointee_ty, mtbl) => { assert_eq!(field_ops.len(), 2); - let data_pointer_ty = field_ops[FieldIdx::ZERO].ty(self.local_decls, self.tcx); + let mut data_pointer_ty = field_ops[FieldIdx::ZERO].ty(self.local_decls, self.tcx); let output_pointer_ty = Ty::new_ptr(self.tcx, pointee_ty, mtbl); - (AggregateTy::RawPtr { data_pointer_ty, output_pointer_ty }, FIRST_VARIANT) - } - }; - - let mut fields: Vec<_> = field_ops - .iter_mut() - .map(|op| self.simplify_operand(op, location).unwrap_or_else(|| self.new_opaque())) - .collect(); - if let AggregateTy::RawPtr { data_pointer_ty, output_pointer_ty } = &mut ty { - let mut was_updated = false; + let [mut pointer, metadata] = fields.try_into().unwrap(); + + // Any thin pointer of matching mutability is fine as the data pointer. + let mut was_updated = false; + while let Value::Cast { + kind: CastKind::PtrToPtr, + value: cast_value, + from: cast_from, + to: _, + } = self.get(pointer) + && let ty::RawPtr(from_pointee_ty, from_mtbl) = cast_from.kind() + && let ty::RawPtr(_, output_mtbl) = output_pointer_ty.kind() + && from_mtbl == output_mtbl + && from_pointee_ty.is_sized(self.tcx, self.typing_env()) + { + pointer = *cast_value; + data_pointer_ty = *cast_from; + was_updated = true; + } - // Any thin pointer of matching mutability is fine as the data pointer. - while let Value::Cast { - kind: CastKind::PtrToPtr, - value: cast_value, - from: cast_from, - to: _, - } = self.get(fields[0]) - && let ty::RawPtr(from_pointee_ty, from_mtbl) = cast_from.kind() - && let ty::RawPtr(_, output_mtbl) = output_pointer_ty.kind() - && from_mtbl == output_mtbl - && from_pointee_ty.is_sized(self.tcx, self.typing_env()) - { - fields[0] = *cast_value; - *data_pointer_ty = *cast_from; - was_updated = true; - } + if was_updated && let Some(op) = self.try_as_operand(pointer, location) { + field_ops[FieldIdx::ZERO] = op; + } - if was_updated && let Some(op) = self.try_as_operand(fields[0], location) { - field_ops[FieldIdx::ZERO] = op; + return Some(self.insert(Value::RawPtr { + output_pointer_ty, + data_pointer_ty, + pointer, + metadata, + })); } - } + }; if let AggregateTy::Array = ty && fields.len() > 4 @@ -1165,9 +1180,7 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { (UnOp::Not, Value::BinaryOp(BinOp::Ne, lhs, rhs)) => { Value::BinaryOp(BinOp::Eq, *lhs, *rhs) } - (UnOp::PtrMetadata, Value::Aggregate(AggregateTy::RawPtr { .. }, _, fields)) => { - return Some(fields[1]); - } + (UnOp::PtrMetadata, Value::RawPtr { metadata, .. }) => return Some(*metadata), // We have an unsizing cast, which assigns the length to wide pointer metadata. ( UnOp::PtrMetadata, @@ -1399,16 +1412,15 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { // If a cast just casts away the metadata again, then we can get it by // casting the original thin pointer passed to `from_raw_parts` if let PtrToPtr = kind - && let Value::Aggregate(AggregateTy::RawPtr { data_pointer_ty, .. }, _, fields) = - self.get(value) + && let Value::RawPtr { data_pointer_ty, pointer, .. } = self.get(value) && let ty::RawPtr(to_pointee, _) = to.kind() && to_pointee.is_sized(self.tcx, self.typing_env()) { from = *data_pointer_ty; - value = fields[0]; + value = *pointer; was_updated_this_iteration = true; if *data_pointer_ty == to { - return Some(fields[0]); + return Some(*pointer); } } From be41333604c15acef2766619beac604bb3726538 Mon Sep 17 00:00:00 2001 From: Camille GILLOT Date: Mon, 30 Jun 2025 19:27:27 +0000 Subject: [PATCH 3/6] Store a full Ty with each Value. --- compiler/rustc_mir_transform/src/gvn.rs | 244 ++++++++---------- tests/mir-opt/const_prop/transmute.rs | 2 +- ...ransmute.unreachable_direct.GVN.32bit.diff | 2 +- ...ransmute.unreachable_direct.GVN.64bit.diff | 2 +- ...generic_cast_metadata.GVN.panic-abort.diff | 3 +- ...eneric_cast_metadata.GVN.panic-unwind.diff | 3 +- tests/mir-opt/gvn.rs | 2 +- 7 files changed, 122 insertions(+), 136 deletions(-) diff --git a/compiler/rustc_mir_transform/src/gvn.rs b/compiler/rustc_mir_transform/src/gvn.rs index b59b73531129e..3e4c05b355f6b 100644 --- a/compiler/rustc_mir_transform/src/gvn.rs +++ b/compiler/rustc_mir_transform/src/gvn.rs @@ -130,7 +130,7 @@ impl<'tcx> crate::MirPass<'tcx> for GVN { let mut state = VnState::new(tcx, body, typing_env, &ssa, dominators, &body.local_decls); for local in body.args_iter().filter(|&local| ssa.is_ssa(local)) { - let opaque = state.new_opaque(); + let opaque = state.new_opaque(body.local_decls[local].ty); state.assign(local, opaque); } @@ -238,7 +238,7 @@ struct VnState<'body, 'tcx> { /// Locals that are assigned that value. // This vector does not hold all the values of `VnIndex` that we create. rev_locals: IndexVec>, - values: FxIndexSet>, + values: FxIndexSet<(Value<'tcx>, Ty<'tcx>)>, /// Values evaluated as constants if possible. evaluated: IndexVec>>, /// Counter to generate different values. @@ -287,8 +287,8 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { } #[instrument(level = "trace", skip(self), ret)] - fn insert(&mut self, value: Value<'tcx>) -> VnIndex { - let (index, new) = self.values.insert_full(value); + fn insert(&mut self, ty: Ty<'tcx>, value: Value<'tcx>) -> VnIndex { + let (index, new) = self.values.insert_full((value, ty)); let index = VnIndex::from_usize(index); if new { // Grow `evaluated` and `rev_locals` here to amortize the allocations. @@ -310,20 +310,33 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { /// Create a new `Value` for which we have no information at all, except that it is distinct /// from all the others. #[instrument(level = "trace", skip(self), ret)] - fn new_opaque(&mut self) -> VnIndex { + fn new_opaque(&mut self, ty: Ty<'tcx>) -> VnIndex { let value = Value::Opaque(self.next_opaque()); - self.insert(value) + self.insert(ty, value) } /// Create a new `Value::Address` distinct from all the others. #[instrument(level = "trace", skip(self), ret)] fn new_pointer(&mut self, place: Place<'tcx>, kind: AddressKind) -> VnIndex { + let pty = place.ty(self.local_decls, self.tcx).ty; + let ty = match kind { + AddressKind::Ref(bk) => { + Ty::new_ref(self.tcx, self.tcx.lifetimes.re_erased, pty, bk.to_mutbl_lossy()) + } + AddressKind::Address(mutbl) => Ty::new_ptr(self.tcx, pty, mutbl.to_mutbl_lossy()), + }; let value = Value::Address { place, kind, provenance: self.next_opaque() }; - self.insert(value) + self.insert(ty, value) } + #[inline] fn get(&self, index: VnIndex) -> &Value<'tcx> { - self.values.get_index(index.as_usize()).unwrap() + &self.values.get_index(index.as_usize()).unwrap().0 + } + + #[inline] + fn ty(&self, index: VnIndex) -> Ty<'tcx> { + self.values.get_index(index.as_usize()).unwrap().1 } /// Record that `local` is assigned `value`. `local` must be SSA. @@ -346,29 +359,29 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { debug_assert_ne!(disambiguator, 0); disambiguator }; - self.insert(Value::Constant { value, disambiguator }) + self.insert(value.ty(), Value::Constant { value, disambiguator }) } fn insert_bool(&mut self, flag: bool) -> VnIndex { // Booleans are deterministic. let value = Const::from_bool(self.tcx, flag); debug_assert!(value.is_deterministic()); - self.insert(Value::Constant { value, disambiguator: 0 }) + self.insert(self.tcx.types.bool, Value::Constant { value, disambiguator: 0 }) } - fn insert_scalar(&mut self, scalar: Scalar, ty: Ty<'tcx>) -> VnIndex { + fn insert_scalar(&mut self, ty: Ty<'tcx>, scalar: Scalar) -> VnIndex { // Scalars are deterministic. let value = Const::from_scalar(self.tcx, scalar, ty); debug_assert!(value.is_deterministic()); - self.insert(Value::Constant { value, disambiguator: 0 }) + self.insert(ty, Value::Constant { value, disambiguator: 0 }) } - fn insert_tuple(&mut self, values: Vec) -> VnIndex { - self.insert(Value::Aggregate(AggregateTy::Tuple, VariantIdx::ZERO, values)) + fn insert_tuple(&mut self, ty: Ty<'tcx>, values: Vec) -> VnIndex { + self.insert(ty, Value::Aggregate(AggregateTy::Tuple, VariantIdx::ZERO, values)) } - fn insert_deref(&mut self, value: VnIndex) -> VnIndex { - let value = self.insert(Value::Projection(value, ProjectionElem::Deref)); + fn insert_deref(&mut self, ty: Ty<'tcx>, value: VnIndex) -> VnIndex { + let value = self.insert(ty, Value::Projection(value, ProjectionElem::Deref)); self.derefs.push(value); value } @@ -376,14 +389,18 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { fn invalidate_derefs(&mut self) { for deref in std::mem::take(&mut self.derefs) { let opaque = self.next_opaque(); - *self.values.get_index_mut2(deref.index()).unwrap() = Value::Opaque(opaque); + self.values.get_index_mut2(deref.index()).unwrap().0 = Value::Opaque(opaque); } } #[instrument(level = "trace", skip(self), ret)] fn eval_to_const(&mut self, value: VnIndex) -> Option> { use Value::*; + let ty = self.ty(value); + let ty = self.ecx.layout_of(ty).ok()?; let op = match *self.get(value) { + _ if ty.is_zst() => ImmTy::uninit(ty).into(), + Opaque(_) => return None, // Do not bother evaluating repeat expressions. This would uselessly consume memory. Repeat(..) => return None, @@ -391,31 +408,14 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { Constant { ref value, disambiguator: _ } => { self.ecx.eval_mir_constant(value, DUMMY_SP, None).discard_err()? } - Aggregate(kind, variant, ref fields) => { + Aggregate(_, variant, ref fields) => { let fields = fields .iter() .map(|&f| self.evaluated[f].as_ref()) .collect::>>()?; - let ty = match kind { - AggregateTy::Array => { - assert!(fields.len() > 0); - Ty::new_array(self.tcx, fields[0].layout.ty, fields.len() as u64) - } - AggregateTy::Tuple => { - Ty::new_tup_from_iter(self.tcx, fields.iter().map(|f| f.layout.ty)) - } - AggregateTy::Def(def_id, args) => { - self.tcx.type_of(def_id).instantiate(self.tcx, args) - } - }; - let variant = if ty.is_enum() { Some(variant) } else { None }; - let ty = self.ecx.layout_of(ty).ok()?; - if ty.is_zst() { - ImmTy::uninit(ty).into() - } else if matches!( - ty.backend_repr, - BackendRepr::Scalar(..) | BackendRepr::ScalarPair(..) - ) { + let variant = if ty.ty.is_enum() { Some(variant) } else { None }; + if matches!(ty.backend_repr, BackendRepr::Scalar(..) | BackendRepr::ScalarPair(..)) + { let dest = self.ecx.allocate(ty, MemoryKind::Stack).discard_err()?; let variant_dest = if let Some(variant) = variant { self.ecx.project_downcast(&dest, variant).discard_err()? @@ -440,11 +440,9 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { return None; } } - RawPtr { pointer, metadata, output_pointer_ty, data_pointer_ty: _ } => { + RawPtr { pointer, metadata, output_pointer_ty: _, data_pointer_ty: _ } => { let pointer = self.evaluated[pointer].as_ref()?; let metadata = self.evaluated[metadata].as_ref()?; - let output_pointer_ty = self.ecx.layout_of(output_pointer_ty).ok()?; - debug_assert!(!output_pointer_ty.is_zst()); // Pointers don't have fields, so don't `project_field` them. let data = self.ecx.read_pointer(pointer).discard_err()?; @@ -454,7 +452,7 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { MemPlaceMeta::Meta(self.ecx.read_scalar(metadata).discard_err()?) }; let ptr_imm = Immediate::new_pointer_with_meta(data, meta, &self.ecx); - ImmTy::from_immediate(ptr_imm, output_pointer_ty).into() + ImmTy::from_immediate(ptr_imm, ty).into() } Projection(base, elem) => { @@ -481,7 +479,7 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { }; self.ecx.project(value, elem).discard_err()? } - Address { place, kind, provenance: _ } => { + Address { place, kind: _, provenance: _ } => { if !place.is_indirect_first_projection() { return None; } @@ -497,19 +495,7 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { mplace = self.ecx.project(&mplace, proj).discard_err()?; } let pointer = mplace.to_ref(&self.ecx); - let ty = match kind { - AddressKind::Ref(bk) => Ty::new_ref( - self.tcx, - self.tcx.lifetimes.re_erased, - mplace.layout.ty, - bk.to_mutbl_lossy(), - ), - AddressKind::Address(mutbl) => { - Ty::new_ptr(self.tcx, mplace.layout.ty, mutbl.to_mutbl_lossy()) - } - }; - let layout = self.ecx.layout_of(ty).ok()?; - ImmTy::from_immediate(pointer, layout).into() + ImmTy::from_immediate(pointer, ty).into() } Discriminant(base) => { @@ -521,32 +507,28 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { } Len(slice) => { let slice = self.evaluated[slice].as_ref()?; - let usize_layout = self.ecx.layout_of(self.tcx.types.usize).unwrap(); let len = slice.len(&self.ecx).discard_err()?; - let imm = ImmTy::from_uint(len, usize_layout); - imm.into() + ImmTy::from_uint(len, ty).into() } - NullaryOp(null_op, ty) => { - let layout = self.ecx.layout_of(ty).ok()?; + NullaryOp(null_op, arg_ty) => { + let arg_layout = self.ecx.layout_of(arg_ty).ok()?; if let NullOp::SizeOf | NullOp::AlignOf = null_op - && layout.is_unsized() + && arg_layout.is_unsized() { return None; } let val = match null_op { - NullOp::SizeOf => layout.size.bytes(), - NullOp::AlignOf => layout.align.abi.bytes(), + NullOp::SizeOf => arg_layout.size.bytes(), + NullOp::AlignOf => arg_layout.align.abi.bytes(), NullOp::OffsetOf(fields) => self .ecx .tcx - .offset_of_subfield(self.typing_env(), layout, fields.iter()) + .offset_of_subfield(self.typing_env(), arg_layout, fields.iter()) .bytes(), NullOp::UbChecks => return None, NullOp::ContractChecks => return None, }; - let usize_layout = self.ecx.layout_of(self.tcx.types.usize).unwrap(); - let imm = ImmTy::from_uint(val, usize_layout); - imm.into() + ImmTy::from_uint(val, ty).into() } UnaryOp(un_op, operand) => { let operand = self.evaluated[operand].as_ref()?; @@ -562,30 +544,27 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { let val = self.ecx.binary_op(bin_op, &lhs, &rhs).discard_err()?; val.into() } - Cast { kind, value, from: _, to } => match kind { + Cast { kind, value, from: _, to: _ } => match kind { CastKind::IntToInt | CastKind::IntToFloat => { let value = self.evaluated[value].as_ref()?; let value = self.ecx.read_immediate(value).discard_err()?; - let to = self.ecx.layout_of(to).ok()?; - let res = self.ecx.int_to_int_or_float(&value, to).discard_err()?; + let res = self.ecx.int_to_int_or_float(&value, ty).discard_err()?; res.into() } CastKind::FloatToFloat | CastKind::FloatToInt => { let value = self.evaluated[value].as_ref()?; let value = self.ecx.read_immediate(value).discard_err()?; - let to = self.ecx.layout_of(to).ok()?; - let res = self.ecx.float_to_float_or_int(&value, to).discard_err()?; + let res = self.ecx.float_to_float_or_int(&value, ty).discard_err()?; res.into() } CastKind::Transmute => { let value = self.evaluated[value].as_ref()?; - let to = self.ecx.layout_of(to).ok()?; // `offset` for immediates generally only supports projections that match the // type of the immediate. However, as a HACK, we exploit that it can also do // limited transmutes: it only works between types with the same layout, and // cannot transmute pointers to integers. if value.as_mplace_or_imm().is_right() { - let can_transmute = match (value.layout.backend_repr, to.backend_repr) { + let can_transmute = match (value.layout.backend_repr, ty.backend_repr) { (BackendRepr::Scalar(s1), BackendRepr::Scalar(s2)) => { s1.size(&self.ecx) == s2.size(&self.ecx) && !matches!(s1.primitive(), Primitive::Pointer(..)) @@ -605,13 +584,12 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { return None; } } - value.offset(Size::ZERO, to, &self.ecx).discard_err()? + value.offset(Size::ZERO, ty, &self.ecx).discard_err()? } CastKind::PointerCoercion(ty::adjustment::PointerCoercion::Unsize, _) => { let src = self.evaluated[value].as_ref()?; - let to = self.ecx.layout_of(to).ok()?; - let dest = self.ecx.allocate(to, MemoryKind::Stack).discard_err()?; - self.ecx.unsize_into(src, to, &dest.clone().into()).discard_err()?; + let dest = self.ecx.allocate(ty, MemoryKind::Stack).discard_err()?; + self.ecx.unsize_into(src, ty, &dest.clone().into()).discard_err()?; self.ecx .alloc_mark_immutable(dest.ptr().provenance.unwrap().alloc_id()) .discard_err()?; @@ -620,15 +598,13 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { CastKind::FnPtrToPtr | CastKind::PtrToPtr => { let src = self.evaluated[value].as_ref()?; let src = self.ecx.read_immediate(src).discard_err()?; - let to = self.ecx.layout_of(to).ok()?; - let ret = self.ecx.ptr_to_ptr(&src, to).discard_err()?; + let ret = self.ecx.ptr_to_ptr(&src, ty).discard_err()?; ret.into() } CastKind::PointerCoercion(ty::adjustment::PointerCoercion::UnsafeFnPointer, _) => { let src = self.evaluated[value].as_ref()?; let src = self.ecx.read_immediate(src).discard_err()?; - let to = self.ecx.layout_of(to).ok()?; - ImmTy::from_immediate(*src, to).into() + ImmTy::from_immediate(*src, ty).into() } _ => return None, }, @@ -638,21 +614,20 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { fn project( &mut self, - place: PlaceRef<'tcx>, + place_ty: PlaceTy<'tcx>, value: VnIndex, proj: PlaceElem<'tcx>, from_non_ssa_index: &mut bool, - ) -> Option { + ) -> Option<(PlaceTy<'tcx>, VnIndex)> { + let projection_ty = place_ty.projection_ty(self.tcx, proj); let proj = match proj { ProjectionElem::Deref => { - let ty = place.ty(self.local_decls, self.tcx).ty; - if let Some(Mutability::Not) = ty.ref_mutability() - && let Some(pointee_ty) = ty.builtin_deref(true) - && pointee_ty.is_freeze(self.tcx, self.typing_env()) + if let Some(Mutability::Not) = place_ty.ty.ref_mutability() + && projection_ty.ty.is_freeze(self.tcx, self.typing_env()) { // An immutable borrow `_x` always points to the same value for the // lifetime of the borrow, so we can merge all instances of `*_x`. - return Some(self.insert_deref(value)); + return Some((projection_ty, self.insert_deref(projection_ty.ty, value))); } else { return None; } @@ -660,7 +635,7 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { ProjectionElem::Downcast(name, index) => ProjectionElem::Downcast(name, index), ProjectionElem::Field(f, ty) => { if let Value::Aggregate(_, _, fields) = self.get(value) { - return Some(fields[f.as_usize()]); + return Some((projection_ty, fields[f.as_usize()])); } else if let Value::Projection(outer_value, ProjectionElem::Downcast(_, read_variant)) = self.get(value) && let Value::Aggregate(_, written_variant, fields) = self.get(*outer_value) // This pass is not aware of control-flow, so we do not know whether the @@ -680,14 +655,14 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { // a downcast to an inactive variant. && written_variant == read_variant { - return Some(fields[f.as_usize()]); + return Some((projection_ty, fields[f.as_usize()])); } ProjectionElem::Field(f, ty) } ProjectionElem::Index(idx) => { if let Value::Repeat(inner, _) = self.get(value) { *from_non_ssa_index |= self.locals[idx].is_none(); - return Some(*inner); + return Some((projection_ty, *inner)); } let idx = self.locals[idx]?; ProjectionElem::Index(idx) @@ -695,7 +670,7 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { ProjectionElem::ConstantIndex { offset, min_length, from_end } => { match self.get(value) { Value::Repeat(inner, _) => { - return Some(*inner); + return Some((projection_ty, *inner)); } Value::Aggregate(AggregateTy::Array, _, operands) => { let offset = if from_end { @@ -703,7 +678,8 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { } else { offset as usize }; - return operands.get(offset).copied(); + let value = operands.get(offset).copied()?; + return Some((projection_ty, value)); } _ => {} }; @@ -717,7 +693,8 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { ProjectionElem::UnwrapUnsafeBinder(ty) => ProjectionElem::UnwrapUnsafeBinder(ty), }; - Some(self.insert(Value::Projection(value, proj))) + let value = self.insert(projection_ty.ty, Value::Projection(value, proj)); + Some((projection_ty, value)) } /// Simplify the projection chain if we know better. @@ -779,6 +756,8 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { // Invariant: `value` holds the value up-to the `index`th projection excluded. let mut value = self.locals[place.local]?; + // Invariant: `value` has type `place_ty`, with optional downcast variant if needed. + let mut place_ty = PlaceTy::from_ty(self.local_decls[place.local].ty); let mut from_non_ssa_index = false; for (index, proj) in place.projection.iter().enumerate() { if let Value::Projection(pointer, ProjectionElem::Deref) = *self.get(value) @@ -796,8 +775,7 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { place_ref = PlaceRef { local, projection: &place.projection[index..] }; } - let base = PlaceRef { local: place.local, projection: &place.projection[..index] }; - value = self.project(base, value, proj, &mut from_non_ssa_index)?; + (place_ty, value) = self.project(place_ty, value, proj, &mut from_non_ssa_index)?; } if let Value::Projection(pointer, ProjectionElem::Deref) = *self.get(value) @@ -906,8 +884,8 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { // Unsupported values. Rvalue::ThreadLocalRef(..) | Rvalue::ShallowInitBox(..) => return None, }; - debug!(?value); - Some(self.insert(value)) + let ty = rvalue.ty(self.local_decls, self.tcx); + Some(self.insert(ty, value)) } fn simplify_discriminant(&mut self, place: VnIndex) -> Option { @@ -917,7 +895,7 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { { let enum_ty = self.tcx.type_of(enum_did).instantiate(self.tcx, enum_args); let discr = self.ecx.discriminant_for_variant(enum_ty, variant).discard_err()?; - return Some(self.insert_scalar(discr.to_scalar(), discr.layout.ty)); + return Some(self.insert_scalar(discr.layout.ty, discr.to_scalar())); } None @@ -1014,9 +992,11 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { rvalue: &mut Rvalue<'tcx>, location: Location, ) -> Option { + let tcx = self.tcx; + let ty = rvalue.ty(self.local_decls, tcx); + let Rvalue::Aggregate(box ref kind, ref mut field_ops) = *rvalue else { bug!() }; - let tcx = self.tcx; if field_ops.is_empty() { let is_zst = match *kind { AggregateKind::Array(..) @@ -1031,17 +1011,19 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { }; if is_zst { - let ty = rvalue.ty(self.local_decls, tcx); return Some(self.insert_constant(Const::zero_sized(ty))); } } let fields: Vec<_> = field_ops .iter_mut() - .map(|op| self.simplify_operand(op, location).unwrap_or_else(|| self.new_opaque())) + .map(|op| { + self.simplify_operand(op, location) + .unwrap_or_else(|| self.new_opaque(op.ty(self.local_decls, self.tcx))) + }) .collect(); - let (ty, variant_index) = match *kind { + let (aty, variant_index) = match *kind { AggregateKind::Array(..) => { assert!(!field_ops.is_empty()); (AggregateTy::Array, FIRST_VARIANT) @@ -1058,10 +1040,9 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { } // Do not track unions. AggregateKind::Adt(_, _, _, _, Some(_)) => return None, - AggregateKind::RawPtr(pointee_ty, mtbl) => { + AggregateKind::RawPtr(..) => { assert_eq!(field_ops.len(), 2); let mut data_pointer_ty = field_ops[FieldIdx::ZERO].ty(self.local_decls, self.tcx); - let output_pointer_ty = Ty::new_ptr(self.tcx, pointee_ty, mtbl); let [mut pointer, metadata] = fields.try_into().unwrap(); @@ -1074,7 +1055,7 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { to: _, } = self.get(pointer) && let ty::RawPtr(from_pointee_ty, from_mtbl) = cast_from.kind() - && let ty::RawPtr(_, output_mtbl) = output_pointer_ty.kind() + && let ty::RawPtr(_, output_mtbl) = ty.kind() && from_mtbl == output_mtbl && from_pointee_ty.is_sized(self.tcx, self.typing_env()) { @@ -1087,16 +1068,14 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { field_ops[FieldIdx::ZERO] = op; } - return Some(self.insert(Value::RawPtr { - output_pointer_ty, - data_pointer_ty, - pointer, - metadata, - })); + return Some(self.insert( + ty, + Value::RawPtr { output_pointer_ty: ty, data_pointer_ty, pointer, metadata }, + )); } }; - if let AggregateTy::Array = ty + if let AggregateTy::Array = aty && fields.len() > 4 { let first = fields[0]; @@ -1105,18 +1084,18 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { if let Some(op) = self.try_as_operand(first, location) { *rvalue = Rvalue::Repeat(op, len); } - return Some(self.insert(Value::Repeat(first, len))); + return Some(self.insert(ty, Value::Repeat(first, len))); } } - if let AggregateTy::Def(_, _) = ty + if let AggregateTy::Def(_, _) = aty && let Some(value) = self.simplify_aggregate_to_copy(lhs, rvalue, location, &fields, variant_index) { return Some(value); } - Some(self.insert(Value::Aggregate(ty, variant_index, fields))) + Some(self.insert(ty, Value::Aggregate(aty, variant_index, fields))) } #[instrument(level = "trace", skip(self), ret)] @@ -1197,7 +1176,8 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { } _ => Value::UnaryOp(op, arg_index), }; - Some(self.insert(value)) + let ty = op.ty(self.tcx, self.ty(arg_index)); + Some(self.insert(ty, value)) } #[instrument(level = "trace", skip(self), ret)] @@ -1243,8 +1223,9 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { if let Some(value) = self.simplify_binary_inner(op, lhs_ty, lhs, rhs) { return Some(value); } + let ty = op.ty(self.tcx, self.ty(lhs), self.ty(rhs)); let value = Value::BinaryOp(op, lhs, rhs); - Some(self.insert(value)) + Some(self.insert(ty, value)) } fn simplify_binary_inner( @@ -1336,19 +1317,19 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { | BinOp::Shr, Left(0), _, - ) => self.insert_scalar(Scalar::from_uint(0u128, layout.size), lhs_ty), + ) => self.insert_scalar(lhs_ty, Scalar::from_uint(0u128, layout.size)), // Attempt to simplify `x | ALL_ONES` to `ALL_ONES`. (BinOp::BitOr, _, Left(ones)) | (BinOp::BitOr, Left(ones), _) if ones == layout.size.truncate(u128::MAX) || (layout.ty.is_bool() && ones == 1) => { - self.insert_scalar(Scalar::from_uint(ones, layout.size), lhs_ty) + self.insert_scalar(lhs_ty, Scalar::from_uint(ones, layout.size)) } // Sub/Xor with itself. (BinOp::Sub | BinOp::SubWithOverflow | BinOp::SubUnchecked | BinOp::BitXor, a, b) if a == b => { - self.insert_scalar(Scalar::from_uint(0u128, layout.size), lhs_ty) + self.insert_scalar(lhs_ty, Scalar::from_uint(0u128, layout.size)) } // Comparison: // - if both operands can be computed as bits, just compare the bits; @@ -1362,8 +1343,9 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { }; if op.is_overflowing() { + let ty = Ty::new_tup(self.tcx, &[self.ty(result), self.tcx.types.bool]); let false_val = self.insert_bool(false); - Some(self.insert_tuple(vec![result, false_val])) + Some(self.insert_tuple(ty, vec![result, false_val])) } else { Some(result) } @@ -1389,7 +1371,7 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { if let CastKind::PointerCoercion(ReifyFnPointer | ClosureFnPointer(_), _) = kind { // Each reification of a generic fn may get a different pointer. // Do not try to merge them. - return Some(self.new_opaque()); + return Some(self.new_opaque(to)); } let mut was_ever_updated = false; @@ -1497,7 +1479,7 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { *initial_kind = kind; } - Some(self.insert(Value::Cast { kind, value, from, to })) + Some(self.insert(to, Value::Cast { kind, value, from, to })) } fn simplify_len(&mut self, place: &mut Place<'tcx>, location: Location) -> Option { @@ -1530,7 +1512,7 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { } // Fallback: a symbolic `Len`. - Some(self.insert(Value::Len(inner))) + Some(self.insert(self.tcx.types.usize, Value::Len(inner))) } fn pointers_have_same_metadata(&self, left_ptr_ty: Ty<'tcx>, right_ptr_ty: Ty<'tcx>) -> bool { @@ -1807,11 +1789,12 @@ impl<'tcx> MutVisitor<'tcx> for VnState<'_, 'tcx> { if let Some(local) = lhs.as_local() && self.ssa.is_ssa(local) + && let rvalue_ty = rvalue.ty(self.local_decls, self.tcx) // FIXME(#112651) `rvalue` may have a subtype to `local`. We can only mark // `local` as reusable if we have an exact type match. - && self.local_decls[local].ty == rvalue.ty(self.local_decls, self.tcx) + && self.local_decls[local].ty == rvalue_ty { - let value = value.unwrap_or_else(|| self.new_opaque()); + let value = value.unwrap_or_else(|| self.new_opaque(rvalue_ty)); self.assign(local, value); } } @@ -1821,7 +1804,8 @@ impl<'tcx> MutVisitor<'tcx> for VnState<'_, 'tcx> { if let Some(local) = destination.as_local() && self.ssa.is_ssa(local) { - let opaque = self.new_opaque(); + let ty = self.local_decls[local].ty; + let opaque = self.new_opaque(ty); self.assign(local, opaque); } } diff --git a/tests/mir-opt/const_prop/transmute.rs b/tests/mir-opt/const_prop/transmute.rs index 892b91a5414c3..33cbefbf053f5 100644 --- a/tests/mir-opt/const_prop/transmute.rs +++ b/tests/mir-opt/const_prop/transmute.rs @@ -56,7 +56,7 @@ pub unsafe fn undef_union_as_integer() -> u32 { pub unsafe fn unreachable_direct() -> ! { // CHECK-LABEL: fn unreachable_direct( // CHECK: = const (); - // CHECK: = const () as Never (Transmute); + // CHECK: = const ZeroSized: Never; let x: Never = unsafe { transmute(()) }; match x {} } diff --git a/tests/mir-opt/const_prop/transmute.unreachable_direct.GVN.32bit.diff b/tests/mir-opt/const_prop/transmute.unreachable_direct.GVN.32bit.diff index 3364782022dc7..5aeb55a5a6a53 100644 --- a/tests/mir-opt/const_prop/transmute.unreachable_direct.GVN.32bit.diff +++ b/tests/mir-opt/const_prop/transmute.unreachable_direct.GVN.32bit.diff @@ -15,7 +15,7 @@ - _2 = (); - _1 = move _2 as Never (Transmute); + _2 = const (); -+ _1 = const () as Never (Transmute); ++ _1 = const ZeroSized: Never; unreachable; } } diff --git a/tests/mir-opt/const_prop/transmute.unreachable_direct.GVN.64bit.diff b/tests/mir-opt/const_prop/transmute.unreachable_direct.GVN.64bit.diff index 3364782022dc7..5aeb55a5a6a53 100644 --- a/tests/mir-opt/const_prop/transmute.unreachable_direct.GVN.64bit.diff +++ b/tests/mir-opt/const_prop/transmute.unreachable_direct.GVN.64bit.diff @@ -15,7 +15,7 @@ - _2 = (); - _1 = move _2 as Never (Transmute); + _2 = const (); -+ _1 = const () as Never (Transmute); ++ _1 = const ZeroSized: Never; unreachable; } } diff --git a/tests/mir-opt/gvn.generic_cast_metadata.GVN.panic-abort.diff b/tests/mir-opt/gvn.generic_cast_metadata.GVN.panic-abort.diff index 770c673077533..ffe4a0272b4b1 100644 --- a/tests/mir-opt/gvn.generic_cast_metadata.GVN.panic-abort.diff +++ b/tests/mir-opt/gvn.generic_cast_metadata.GVN.panic-abort.diff @@ -18,7 +18,8 @@ bb0: { _4 = copy _1 as *const T (PtrToPtr); - _5 = PtrMetadata(copy _4); +- _5 = PtrMetadata(copy _4); ++ _5 = const (); _6 = copy _1 as *const (&A, [T]) (PtrToPtr); - _7 = PtrMetadata(copy _6); + _7 = PtrMetadata(copy _1); diff --git a/tests/mir-opt/gvn.generic_cast_metadata.GVN.panic-unwind.diff b/tests/mir-opt/gvn.generic_cast_metadata.GVN.panic-unwind.diff index 770c673077533..ffe4a0272b4b1 100644 --- a/tests/mir-opt/gvn.generic_cast_metadata.GVN.panic-unwind.diff +++ b/tests/mir-opt/gvn.generic_cast_metadata.GVN.panic-unwind.diff @@ -18,7 +18,8 @@ bb0: { _4 = copy _1 as *const T (PtrToPtr); - _5 = PtrMetadata(copy _4); +- _5 = PtrMetadata(copy _4); ++ _5 = const (); _6 = copy _1 as *const (&A, [T]) (PtrToPtr); - _7 = PtrMetadata(copy _6); + _7 = PtrMetadata(copy _1); diff --git a/tests/mir-opt/gvn.rs b/tests/mir-opt/gvn.rs index 6ef320c90de12..5d348bc3c1e40 100644 --- a/tests/mir-opt/gvn.rs +++ b/tests/mir-opt/gvn.rs @@ -869,7 +869,7 @@ fn generic_cast_metadata(ps: *const [T], pa: *const A, // Metadata usize -> (), do not optimize. // CHECK: [[T:_.+]] = copy _1 as - // CHECK-NEXT: PtrMetadata(copy [[T]]) + // CHECK-NEXT: const (); let t1 = CastPtrToPtr::<_, *const T>(ps); let m1 = PtrMetadata(t1); From 4750dff54eab52a308a1a1afd85b4614d652841a Mon Sep 17 00:00:00 2001 From: Camille GILLOT Date: Mon, 30 Jun 2025 19:50:11 +0000 Subject: [PATCH 4/6] Remove extraneous types. --- compiler/rustc_mir_transform/src/gvn.rs | 209 +++++++----------- ...ted_loop.PreCodegen.after.panic-unwind.mir | 5 +- 2 files changed, 80 insertions(+), 134 deletions(-) diff --git a/compiler/rustc_mir_transform/src/gvn.rs b/compiler/rustc_mir_transform/src/gvn.rs index 3e4c05b355f6b..e081d1fde93bf 100644 --- a/compiler/rustc_mir_transform/src/gvn.rs +++ b/compiler/rustc_mir_transform/src/gvn.rs @@ -105,7 +105,6 @@ use rustc_middle::mir::*; use rustc_middle::ty::layout::{HasTypingEnv, LayoutOf}; use rustc_middle::ty::{self, Ty, TyCtxt}; use rustc_span::DUMMY_SP; -use rustc_span::def_id::DefId; use smallvec::SmallVec; use tracing::{debug, instrument, trace}; @@ -155,16 +154,6 @@ newtype_index! { struct VnIndex {} } -/// Computing the aggregate's type can be quite slow, so we only keep the minimal amount of -/// information to reconstruct it when needed. -#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] -enum AggregateTy<'tcx> { - /// Invariant: this must not be used for an empty array. - Array, - Tuple, - Def(DefId, ty::GenericArgsRef<'tcx>), -} - #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] enum AddressKind { Ref(BorrowKind), @@ -187,17 +176,13 @@ enum Value<'tcx> { }, /// An aggregate value, either tuple/closure/struct/enum. /// This does not contain unions, as we cannot reason with the value. - Aggregate(AggregateTy<'tcx>, VariantIdx, Vec), + Aggregate(VariantIdx, Vec), /// A raw pointer aggregate built from a thin pointer and metadata. RawPtr { /// Thin pointer component. This is field 0 in MIR. pointer: VnIndex, /// Metadata component. This is field 1 in MIR. metadata: VnIndex, - /// Needed for cast propagation. - data_pointer_ty: Ty<'tcx>, - /// The data pointer can be anything thin, so doesn't determine the output. - output_pointer_ty: Ty<'tcx>, }, /// This corresponds to a `[value; count]` expression. Repeat(VnIndex, ty::Const<'tcx>), @@ -211,7 +196,7 @@ enum Value<'tcx> { // Extractions. /// This is the *value* obtained by projecting another value. - Projection(VnIndex, ProjectionElem>), + Projection(VnIndex, ProjectionElem), /// Discriminant of the given value. Discriminant(VnIndex), /// Length of an array or slice. @@ -224,8 +209,6 @@ enum Value<'tcx> { Cast { kind: CastKind, value: VnIndex, - from: Ty<'tcx>, - to: Ty<'tcx>, }, } @@ -377,7 +360,7 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { } fn insert_tuple(&mut self, ty: Ty<'tcx>, values: Vec) -> VnIndex { - self.insert(ty, Value::Aggregate(AggregateTy::Tuple, VariantIdx::ZERO, values)) + self.insert(ty, Value::Aggregate(VariantIdx::ZERO, values)) } fn insert_deref(&mut self, ty: Ty<'tcx>, value: VnIndex) -> VnIndex { @@ -408,7 +391,7 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { Constant { ref value, disambiguator: _ } => { self.ecx.eval_mir_constant(value, DUMMY_SP, None).discard_err()? } - Aggregate(_, variant, ref fields) => { + Aggregate(variant, ref fields) => { let fields = fields .iter() .map(|&f| self.evaluated[f].as_ref()) @@ -440,7 +423,7 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { return None; } } - RawPtr { pointer, metadata, output_pointer_ty: _, data_pointer_ty: _ } => { + RawPtr { pointer, metadata } => { let pointer = self.evaluated[pointer].as_ref()?; let metadata = self.evaluated[metadata].as_ref()?; @@ -456,28 +439,28 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { } Projection(base, elem) => { - let value = self.evaluated[base].as_ref()?; + let base = self.evaluated[base].as_ref()?; let elem = match elem { ProjectionElem::Deref => ProjectionElem::Deref, ProjectionElem::Downcast(name, read_variant) => { ProjectionElem::Downcast(name, read_variant) } - ProjectionElem::Field(f, ty) => ProjectionElem::Field(f, ty), + ProjectionElem::Field(f, ()) => ProjectionElem::Field(f, ty.ty), ProjectionElem::ConstantIndex { offset, min_length, from_end } => { ProjectionElem::ConstantIndex { offset, min_length, from_end } } ProjectionElem::Subslice { from, to, from_end } => { ProjectionElem::Subslice { from, to, from_end } } - ProjectionElem::OpaqueCast(ty) => ProjectionElem::OpaqueCast(ty), - ProjectionElem::Subtype(ty) => ProjectionElem::Subtype(ty), - ProjectionElem::UnwrapUnsafeBinder(ty) => { - ProjectionElem::UnwrapUnsafeBinder(ty) + ProjectionElem::OpaqueCast(()) => ProjectionElem::OpaqueCast(ty.ty), + ProjectionElem::Subtype(()) => ProjectionElem::Subtype(ty.ty), + ProjectionElem::UnwrapUnsafeBinder(()) => { + ProjectionElem::UnwrapUnsafeBinder(ty.ty) } // This should have been replaced by a `ConstantIndex` earlier. ProjectionElem::Index(_) => return None, }; - self.ecx.project(value, elem).discard_err()? + self.ecx.project(base, elem).discard_err()? } Address { place, kind: _, provenance: _ } => { if !place.is_indirect_first_projection() { @@ -544,7 +527,7 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { let val = self.ecx.binary_op(bin_op, &lhs, &rhs).discard_err()?; val.into() } - Cast { kind, value, from: _, to: _ } => match kind { + Cast { kind, value } => match kind { CastKind::IntToInt | CastKind::IntToFloat => { let value = self.evaluated[value].as_ref()?; let value = self.ecx.read_immediate(value).discard_err()?; @@ -633,11 +616,11 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { } } ProjectionElem::Downcast(name, index) => ProjectionElem::Downcast(name, index), - ProjectionElem::Field(f, ty) => { - if let Value::Aggregate(_, _, fields) = self.get(value) { + ProjectionElem::Field(f, _) => { + if let Value::Aggregate(_, fields) = self.get(value) { return Some((projection_ty, fields[f.as_usize()])); } else if let Value::Projection(outer_value, ProjectionElem::Downcast(_, read_variant)) = self.get(value) - && let Value::Aggregate(_, written_variant, fields) = self.get(*outer_value) + && let Value::Aggregate(written_variant, fields) = self.get(*outer_value) // This pass is not aware of control-flow, so we do not know whether the // replacement we are doing is actually reachable. We could be in any arm of // ``` @@ -657,7 +640,7 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { { return Some((projection_ty, fields[f.as_usize()])); } - ProjectionElem::Field(f, ty) + ProjectionElem::Field(f, ()) } ProjectionElem::Index(idx) => { if let Value::Repeat(inner, _) = self.get(value) { @@ -672,7 +655,7 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { Value::Repeat(inner, _) => { return Some((projection_ty, *inner)); } - Value::Aggregate(AggregateTy::Array, _, operands) => { + Value::Aggregate(_, operands) => { let offset = if from_end { operands.len() - offset as usize } else { @@ -688,9 +671,9 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { ProjectionElem::Subslice { from, to, from_end } => { ProjectionElem::Subslice { from, to, from_end } } - ProjectionElem::OpaqueCast(ty) => ProjectionElem::OpaqueCast(ty), - ProjectionElem::Subtype(ty) => ProjectionElem::Subtype(ty), - ProjectionElem::UnwrapUnsafeBinder(ty) => ProjectionElem::UnwrapUnsafeBinder(ty), + ProjectionElem::OpaqueCast(_) => ProjectionElem::OpaqueCast(()), + ProjectionElem::Subtype(_) => ProjectionElem::Subtype(()), + ProjectionElem::UnwrapUnsafeBinder(_) => ProjectionElem::UnwrapUnsafeBinder(()), }; let value = self.insert(projection_ty.ty, Value::Projection(value, proj)); @@ -852,14 +835,9 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { self.simplify_place_projection(place, location); return Some(self.new_pointer(*place, AddressKind::Address(mutbl))); } - Rvalue::WrapUnsafeBinder(ref mut op, ty) => { + Rvalue::WrapUnsafeBinder(ref mut op, _) => { let value = self.simplify_operand(op, location)?; - Value::Cast { - kind: CastKind::Transmute, - value, - from: op.ty(self.local_decls, self.tcx), - to: ty, - } + Value::Cast { kind: CastKind::Transmute, value } } // Operations. @@ -889,11 +867,10 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { } fn simplify_discriminant(&mut self, place: VnIndex) -> Option { - if let Value::Aggregate(enum_ty, variant, _) = *self.get(place) - && let AggregateTy::Def(enum_did, enum_args) = enum_ty - && let DefKind::Enum = self.tcx.def_kind(enum_did) + let enum_ty = self.ty(place); + if enum_ty.is_enum() + && let Value::Aggregate(variant, _) = *self.get(place) { - let enum_ty = self.tcx.type_of(enum_did).instantiate(self.tcx, enum_args); let discr = self.ecx.discriminant_for_variant(enum_ty, variant).discard_err()?; return Some(self.insert_scalar(discr.layout.ty, discr.to_scalar())); } @@ -903,12 +880,13 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { fn try_as_place_elem( &mut self, - proj: ProjectionElem>, + ty: Ty<'tcx>, + proj: ProjectionElem, loc: Location, ) -> Option> { Some(match proj { ProjectionElem::Deref => ProjectionElem::Deref, - ProjectionElem::Field(idx, ty) => ProjectionElem::Field(idx, ty), + ProjectionElem::Field(idx, ()) => ProjectionElem::Field(idx, ty), ProjectionElem::Index(idx) => { let Some(local) = self.try_as_local(idx, loc) else { return None; @@ -923,9 +901,9 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { ProjectionElem::Subslice { from, to, from_end } } ProjectionElem::Downcast(symbol, idx) => ProjectionElem::Downcast(symbol, idx), - ProjectionElem::OpaqueCast(idx) => ProjectionElem::OpaqueCast(idx), - ProjectionElem::Subtype(idx) => ProjectionElem::Subtype(idx), - ProjectionElem::UnwrapUnsafeBinder(ty) => ProjectionElem::UnwrapUnsafeBinder(ty), + ProjectionElem::OpaqueCast(()) => ProjectionElem::OpaqueCast(ty), + ProjectionElem::Subtype(()) => ProjectionElem::Subtype(ty), + ProjectionElem::UnwrapUnsafeBinder(()) => ProjectionElem::UnwrapUnsafeBinder(ty), }) } @@ -971,8 +949,8 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { // Allow introducing places with non-constant offsets, as those are still better than // reconstructing an aggregate. - if let Some(place) = self.try_as_place(copy_from_local_value, location, true) - && rvalue.ty(self.local_decls, self.tcx) == place.ty(self.local_decls, self.tcx).ty + if self.ty(copy_from_local_value) == rvalue.ty(self.local_decls, self.tcx) + && let Some(place) = self.try_as_place(copy_from_local_value, location, true) { // Avoid creating `*a = copy (*b)`, as they might be aliases resulting in overlapping assignments. // FIXME: This also avoids any kind of projection, not just derefs. We can add allowed projections. @@ -1023,44 +1001,31 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { }) .collect(); - let (aty, variant_index) = match *kind { - AggregateKind::Array(..) => { + let variant_index = match *kind { + AggregateKind::Array(..) | AggregateKind::Tuple => { assert!(!field_ops.is_empty()); - (AggregateTy::Array, FIRST_VARIANT) - } - AggregateKind::Tuple => { - assert!(!field_ops.is_empty()); - (AggregateTy::Tuple, FIRST_VARIANT) - } - AggregateKind::Closure(did, args) - | AggregateKind::CoroutineClosure(did, args) - | AggregateKind::Coroutine(did, args) => (AggregateTy::Def(did, args), FIRST_VARIANT), - AggregateKind::Adt(did, variant_index, args, _, None) => { - (AggregateTy::Def(did, args), variant_index) + FIRST_VARIANT } + AggregateKind::Closure(..) + | AggregateKind::CoroutineClosure(..) + | AggregateKind::Coroutine(..) => FIRST_VARIANT, + AggregateKind::Adt(_, variant_index, _, _, None) => variant_index, // Do not track unions. AggregateKind::Adt(_, _, _, _, Some(_)) => return None, AggregateKind::RawPtr(..) => { assert_eq!(field_ops.len(), 2); - let mut data_pointer_ty = field_ops[FieldIdx::ZERO].ty(self.local_decls, self.tcx); - let [mut pointer, metadata] = fields.try_into().unwrap(); // Any thin pointer of matching mutability is fine as the data pointer. let mut was_updated = false; - while let Value::Cast { - kind: CastKind::PtrToPtr, - value: cast_value, - from: cast_from, - to: _, - } = self.get(pointer) - && let ty::RawPtr(from_pointee_ty, from_mtbl) = cast_from.kind() + while let Value::Cast { kind: CastKind::PtrToPtr, value: cast_value } = + self.get(pointer) + && let ty::RawPtr(from_pointee_ty, from_mtbl) = self.ty(*cast_value).kind() && let ty::RawPtr(_, output_mtbl) = ty.kind() && from_mtbl == output_mtbl && from_pointee_ty.is_sized(self.tcx, self.typing_env()) { pointer = *cast_value; - data_pointer_ty = *cast_from; was_updated = true; } @@ -1068,16 +1033,11 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { field_ops[FieldIdx::ZERO] = op; } - return Some(self.insert( - ty, - Value::RawPtr { output_pointer_ty: ty, data_pointer_ty, pointer, metadata }, - )); + return Some(self.insert(ty, Value::RawPtr { pointer, metadata })); } }; - if let AggregateTy::Array = aty - && fields.len() > 4 - { + if ty.is_array() && fields.len() > 4 { let first = fields[0]; if fields.iter().all(|&v| v == first) { let len = ty::Const::from_target_usize(self.tcx, fields.len().try_into().unwrap()); @@ -1088,14 +1048,13 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { } } - if let AggregateTy::Def(_, _) = aty - && let Some(value) = - self.simplify_aggregate_to_copy(lhs, rvalue, location, &fields, variant_index) + if let Some(value) = + self.simplify_aggregate_to_copy(lhs, rvalue, location, &fields, variant_index) { return Some(value); } - Some(self.insert(ty, Value::Aggregate(aty, variant_index, fields))) + Some(self.insert(ty, Value::Aggregate(variant_index, fields))) } #[instrument(level = "trace", skip(self), ret)] @@ -1106,6 +1065,8 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { location: Location, ) -> Option { let mut arg_index = self.simplify_operand(arg_op, location)?; + let arg_ty = self.ty(arg_index); + let ret_ty = op.ty(self.tcx, arg_ty); // PtrMetadata doesn't care about *const vs *mut vs & vs &mut, // so start by removing those distinctions so we can update the `Operand` @@ -1121,8 +1082,8 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { // we can't always know exactly what the metadata are. // To allow things like `*mut (?A, ?T)` <-> `*mut (?B, ?T)`, // it's fine to get a projection as the type. - Value::Cast { kind: CastKind::PtrToPtr, value: inner, from, to } - if self.pointers_have_same_metadata(*from, *to) => + Value::Cast { kind: CastKind::PtrToPtr, value: inner } + if self.pointers_have_same_metadata(self.ty(*inner), arg_ty) => { arg_index = *inner; was_updated = true; @@ -1165,19 +1126,16 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { UnOp::PtrMetadata, Value::Cast { kind: CastKind::PointerCoercion(ty::adjustment::PointerCoercion::Unsize, _), - from, - to, - .. + value: inner, }, - ) if let ty::Slice(..) = to.builtin_deref(true).unwrap().kind() - && let ty::Array(_, len) = from.builtin_deref(true).unwrap().kind() => + ) if let ty::Slice(..) = arg_ty.builtin_deref(true).unwrap().kind() + && let ty::Array(_, len) = self.ty(*inner).builtin_deref(true).unwrap().kind() => { return Some(self.insert_constant(Const::Ty(self.tcx.types.usize, *len))); } _ => Value::UnaryOp(op, arg_index), }; - let ty = op.ty(self.tcx, self.ty(arg_index)); - Some(self.insert(ty, value)) + Some(self.insert(ret_ty, value)) } #[instrument(level = "trace", skip(self), ret)] @@ -1190,25 +1148,23 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { ) -> Option { let lhs = self.simplify_operand(lhs_operand, location); let rhs = self.simplify_operand(rhs_operand, location); + // Only short-circuit options after we called `simplify_operand` // on both operands for side effect. let mut lhs = lhs?; let mut rhs = rhs?; - let lhs_ty = lhs_operand.ty(self.local_decls, self.tcx); + let lhs_ty = self.ty(lhs); // If we're comparing pointers, remove `PtrToPtr` casts if the from // types of both casts and the metadata all match. if let BinOp::Eq | BinOp::Ne | BinOp::Lt | BinOp::Le | BinOp::Gt | BinOp::Ge = op && lhs_ty.is_any_ptr() - && let Value::Cast { - kind: CastKind::PtrToPtr, value: lhs_value, from: lhs_from, .. - } = self.get(lhs) - && let Value::Cast { - kind: CastKind::PtrToPtr, value: rhs_value, from: rhs_from, .. - } = self.get(rhs) - && lhs_from == rhs_from - && self.pointers_have_same_metadata(*lhs_from, lhs_ty) + && let Value::Cast { kind: CastKind::PtrToPtr, value: lhs_value } = self.get(lhs) + && let Value::Cast { kind: CastKind::PtrToPtr, value: rhs_value } = self.get(rhs) + && let lhs_from = self.ty(*lhs_value) + && lhs_from == self.ty(*rhs_value) + && self.pointers_have_same_metadata(lhs_from, lhs_ty) { lhs = *lhs_value; rhs = *rhs_value; @@ -1223,7 +1179,7 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { if let Some(value) = self.simplify_binary_inner(op, lhs_ty, lhs, rhs) { return Some(value); } - let ty = op.ty(self.tcx, self.ty(lhs), self.ty(rhs)); + let ty = op.ty(self.tcx, lhs_ty, self.ty(rhs)); let value = Value::BinaryOp(op, lhs, rhs); Some(self.insert(ty, value)) } @@ -1361,9 +1317,9 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { use CastKind::*; use rustc_middle::ty::adjustment::PointerCoercion::*; - let mut from = initial_operand.ty(self.local_decls, self.tcx); let mut kind = *initial_kind; let mut value = self.simplify_operand(initial_operand, location)?; + let mut from = self.ty(value); if from == to { return Some(value); } @@ -1394,14 +1350,14 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { // If a cast just casts away the metadata again, then we can get it by // casting the original thin pointer passed to `from_raw_parts` if let PtrToPtr = kind - && let Value::RawPtr { data_pointer_ty, pointer, .. } = self.get(value) + && let Value::RawPtr { pointer, .. } = self.get(value) && let ty::RawPtr(to_pointee, _) = to.kind() && to_pointee.is_sized(self.tcx, self.typing_env()) { - from = *data_pointer_ty; + from = self.ty(*pointer); value = *pointer; was_updated_this_iteration = true; - if *data_pointer_ty == to { + if from == to { return Some(*pointer); } } @@ -1409,7 +1365,7 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { // Aggregate-then-Transmute can just transmute the original field value, // so long as the bytes of a value from only from a single field. if let Transmute = kind - && let Value::Aggregate(_aggregate_ty, variant_idx, field_values) = self.get(value) + && let Value::Aggregate(variant_idx, field_values) = self.get(value) && let Some((field_idx, field_ty)) = self.value_is_all_in_one_field(from, *variant_idx) { @@ -1422,13 +1378,8 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { } // Various cast-then-cast cases can be simplified. - if let Value::Cast { - kind: inner_kind, - value: inner_value, - from: inner_from, - to: inner_to, - } = *self.get(value) - { + if let Value::Cast { kind: inner_kind, value: inner_value } = *self.get(value) { + let inner_from = self.ty(inner_value); let new_kind = match (inner_kind, kind) { // Even if there's a narrowing cast in here that's fine, because // things like `*mut [i32] -> *mut i32 -> *const i32` and @@ -1437,9 +1388,7 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { // PtrToPtr-then-Transmute is fine so long as the pointer cast is identity: // `*const T -> *mut T -> NonNull` is fine, but we need to check for narrowing // to skip things like `*const [i32] -> *const i32 -> NonNull`. - (PtrToPtr, Transmute) - if self.pointers_have_same_metadata(inner_from, inner_to) => - { + (PtrToPtr, Transmute) if self.pointers_have_same_metadata(inner_from, from) => { Some(Transmute) } // Similarly, for Transmute-then-PtrToPtr. Note that we need to check different @@ -1450,7 +1399,7 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { // If would be legal to always do this, but we don't want to hide information // from the backend that it'd otherwise be able to use for optimizations. (Transmute, Transmute) - if !self.type_may_have_niche_of_interest_to_backend(inner_to) => + if !self.type_may_have_niche_of_interest_to_backend(from) => { Some(Transmute) } @@ -1479,7 +1428,7 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { *initial_kind = kind; } - Some(self.insert(to, Value::Cast { kind, value, from, to })) + Some(self.insert(to, Value::Cast { kind, value })) } fn simplify_len(&mut self, place: &mut Place<'tcx>, location: Location) -> Option { @@ -1501,11 +1450,11 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { } // We have an unsizing cast, which assigns the length to wide pointer metadata. - if let Value::Cast { kind, from, to, .. } = self.get(inner) + if let Value::Cast { kind, value: from } = self.get(inner) && let CastKind::PointerCoercion(ty::adjustment::PointerCoercion::Unsize, _) = kind - && let Some(from) = from.builtin_deref(true) + && let Some(from) = self.ty(*from).builtin_deref(true) && let ty::Array(_, len) = from.kind() - && let Some(to) = to.builtin_deref(true) + && let Some(to) = self.ty(inner).builtin_deref(true) && let ty::Slice(..) = to.kind() { return Some(self.insert_constant(Const::Ty(self.tcx.types.usize, *len))); @@ -1721,7 +1670,7 @@ impl<'tcx> VnState<'_, 'tcx> { return Some(place); } else if let Value::Projection(pointer, proj) = *self.get(index) && (allow_complex_projection || proj.is_stable_offset()) - && let Some(proj) = self.try_as_place_elem(proj, loc) + && let Some(proj) = self.try_as_place_elem(self.ty(index), proj, loc) { projection.push(proj); index = pointer; diff --git a/tests/mir-opt/pre-codegen/slice_iter.enumerated_loop.PreCodegen.after.panic-unwind.mir b/tests/mir-opt/pre-codegen/slice_iter.enumerated_loop.PreCodegen.after.panic-unwind.mir index 3b58f1d61f4bb..8c5fbda63921f 100644 --- a/tests/mir-opt/pre-codegen/slice_iter.enumerated_loop.PreCodegen.after.panic-unwind.mir +++ b/tests/mir-opt/pre-codegen/slice_iter.enumerated_loop.PreCodegen.after.panic-unwind.mir @@ -109,7 +109,6 @@ fn enumerated_loop(_1: &[T], _2: impl Fn(usize, &T)) -> () { } bb4: { - StorageLive(_15); _14 = &mut _13; _15 = > as Iterator>::next(move _14) -> [return: bb5, unwind: bb11]; } @@ -120,7 +119,6 @@ fn enumerated_loop(_1: &[T], _2: impl Fn(usize, &T)) -> () { } bb6: { - StorageDead(_15); StorageDead(_13); drop(_2) -> [return: bb7, unwind continue]; } @@ -135,14 +133,13 @@ fn enumerated_loop(_1: &[T], _2: impl Fn(usize, &T)) -> () { StorageLive(_19); _19 = &_2; StorageLive(_20); - _20 = (copy _17, copy _18); + _20 = copy ((_15 as Some).0: (usize, &T)); _21 = >::call(move _19, move _20) -> [return: bb9, unwind: bb11]; } bb9: { StorageDead(_20); StorageDead(_19); - StorageDead(_15); goto -> bb4; } From cbc3cf716e16c20ba8fb652f8a41dc1d62f03f66 Mon Sep 17 00:00:00 2001 From: Camille GILLOT Date: Tue, 1 Jul 2025 12:06:39 +0000 Subject: [PATCH 5/6] Avoid computing layouts inside coroutines. --- compiler/rustc_mir_transform/src/gvn.rs | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/compiler/rustc_mir_transform/src/gvn.rs b/compiler/rustc_mir_transform/src/gvn.rs index e081d1fde93bf..461aa7e5284f9 100644 --- a/compiler/rustc_mir_transform/src/gvn.rs +++ b/compiler/rustc_mir_transform/src/gvn.rs @@ -216,6 +216,7 @@ struct VnState<'body, 'tcx> { tcx: TyCtxt<'tcx>, ecx: InterpCx<'tcx, DummyMachine>, local_decls: &'body LocalDecls<'tcx>, + is_coroutine: bool, /// Value stored in each local. locals: IndexVec>, /// Locals that are assigned that value. @@ -253,6 +254,7 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { tcx, ecx: InterpCx::new(tcx, DUMMY_SP, typing_env, DummyMachine), local_decls, + is_coroutine: body.coroutine.is_some(), locals: IndexVec::from_elem(None, local_decls), rev_locals: IndexVec::with_capacity(num_values), values: FxIndexSet::with_capacity_and_hasher(num_values, Default::default()), @@ -380,7 +382,12 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { fn eval_to_const(&mut self, value: VnIndex) -> Option> { use Value::*; let ty = self.ty(value); - let ty = self.ecx.layout_of(ty).ok()?; + // Avoid computing layouts inside a coroutine, as that can cause cycles. + let ty = if !self.is_coroutine || ty.is_scalar() { + self.ecx.layout_of(ty).ok()? + } else { + return None; + }; let op = match *self.get(value) { _ if ty.is_zst() => ImmTy::uninit(ty).into(), From 4a95b165274f9de5c6a0d3c13ae3103e2ac23eb9 Mon Sep 17 00:00:00 2001 From: Camille GILLOT Date: Wed, 2 Jul 2025 10:42:07 +0000 Subject: [PATCH 6/6] Move crashes tests. --- .../mir/gvn-nonsensical-coroutine-layout.rs} | 6 ++++- .../gvn-nonsensical-coroutine-layout.stderr | 26 +++++++++++++++++++ .../mir/gvn-nonsensical-sized-str.rs} | 5 +++- tests/ui/mir/gvn-nonsensical-sized-str.stderr | 10 +++++++ 4 files changed, 45 insertions(+), 2 deletions(-) rename tests/{crashes/128094.rs => ui/mir/gvn-nonsensical-coroutine-layout.rs} (53%) create mode 100644 tests/ui/mir/gvn-nonsensical-coroutine-layout.stderr rename tests/{crashes/135128.rs => ui/mir/gvn-nonsensical-sized-str.rs} (51%) create mode 100644 tests/ui/mir/gvn-nonsensical-sized-str.stderr diff --git a/tests/crashes/128094.rs b/tests/ui/mir/gvn-nonsensical-coroutine-layout.rs similarity index 53% rename from tests/crashes/128094.rs rename to tests/ui/mir/gvn-nonsensical-coroutine-layout.rs index 56d09d78bed91..f0d174cd01b0a 100644 --- a/tests/crashes/128094.rs +++ b/tests/ui/mir/gvn-nonsensical-coroutine-layout.rs @@ -1,15 +1,19 @@ -//@ known-bug: rust-lang/rust#128094 +//! Verify that we do not ICE when a coroutine body is malformed. //@ compile-flags: -Zmir-enable-passes=+GVN //@ edition: 2018 pub enum Request { TestSome(T), + //~^ ERROR cannot find type `T` in this scope [E0412] } pub async fn handle_event(event: Request) { async move { static instance: Request = Request { bar: 17 }; + //~^ ERROR expected struct, variant or union type, found enum `Request` [E0574] &instance } .await; } + +fn main() {} diff --git a/tests/ui/mir/gvn-nonsensical-coroutine-layout.stderr b/tests/ui/mir/gvn-nonsensical-coroutine-layout.stderr new file mode 100644 index 0000000000000..abc7b16ca7415 --- /dev/null +++ b/tests/ui/mir/gvn-nonsensical-coroutine-layout.stderr @@ -0,0 +1,26 @@ +error[E0412]: cannot find type `T` in this scope + --> $DIR/gvn-nonsensical-coroutine-layout.rs:6:14 + | +LL | TestSome(T), + | ^ not found in this scope + | +help: you might be missing a type parameter + | +LL | pub enum Request { + | +++ + +error[E0574]: expected struct, variant or union type, found enum `Request` + --> $DIR/gvn-nonsensical-coroutine-layout.rs:12:36 + | +LL | static instance: Request = Request { bar: 17 }; + | ^^^^^^^ not a struct, variant or union type + | +help: consider importing this struct instead + | +LL + use std::error::Request; + | + +error: aborting due to 2 previous errors + +Some errors have detailed explanations: E0412, E0574. +For more information about an error, try `rustc --explain E0412`. diff --git a/tests/crashes/135128.rs b/tests/ui/mir/gvn-nonsensical-sized-str.rs similarity index 51% rename from tests/crashes/135128.rs rename to tests/ui/mir/gvn-nonsensical-sized-str.rs index c718b758dc69b..29a82f8ce2a16 100644 --- a/tests/crashes/135128.rs +++ b/tests/ui/mir/gvn-nonsensical-sized-str.rs @@ -1,13 +1,16 @@ -//@ known-bug: #135128 +//! Verify that we do not ICE when optimizing bodies with nonsensical bounds. //@ compile-flags: -Copt-level=1 //@ edition: 2021 +//@ build-pass #![feature(trivial_bounds)] async fn return_str() -> str where str: Sized, + //~^ WARN trait bound str: Sized does not depend on any type or lifetime parameters { *"Sized".to_string().into_boxed_str() } + fn main() {} diff --git a/tests/ui/mir/gvn-nonsensical-sized-str.stderr b/tests/ui/mir/gvn-nonsensical-sized-str.stderr new file mode 100644 index 0000000000000..08f0193e9f61c --- /dev/null +++ b/tests/ui/mir/gvn-nonsensical-sized-str.stderr @@ -0,0 +1,10 @@ +warning: trait bound str: Sized does not depend on any type or lifetime parameters + --> $DIR/gvn-nonsensical-sized-str.rs:10:10 + | +LL | str: Sized, + | ^^^^^ + | + = note: `#[warn(trivial_bounds)]` on by default + +warning: 1 warning emitted +