diff --git a/compiler/rustc_abi/src/callconv.rs b/compiler/rustc_abi/src/callconv.rs index 400395f99ff01..daa365bf6e1d7 100644 --- a/compiler/rustc_abi/src/callconv.rs +++ b/compiler/rustc_abi/src/callconv.rs @@ -1,73 +1,10 @@ -mod abi { - pub(crate) use crate::Primitive::*; - pub(crate) use crate::Variants; -} - -#[cfg(feature = "nightly")] -use rustc_macros::HashStable_Generic; - -use crate::{Align, HasDataLayout, Size}; #[cfg(feature = "nightly")] use crate::{BackendRepr, FieldsShape, TyAbiInterface, TyAndLayout}; +use crate::{Primitive, Size, Variants}; -#[cfg_attr(feature = "nightly", derive(HashStable_Generic))] -#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] -pub enum RegKind { - Integer, - Float, - Vector, -} - -#[cfg_attr(feature = "nightly", derive(HashStable_Generic))] -#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] -pub struct Reg { - pub kind: RegKind, - pub size: Size, -} - -macro_rules! reg_ctor { - ($name:ident, $kind:ident, $bits:expr) => { - pub fn $name() -> Reg { - Reg { kind: RegKind::$kind, size: Size::from_bits($bits) } - } - }; -} - -impl Reg { - reg_ctor!(i8, Integer, 8); - reg_ctor!(i16, Integer, 16); - reg_ctor!(i32, Integer, 32); - reg_ctor!(i64, Integer, 64); - reg_ctor!(i128, Integer, 128); - - reg_ctor!(f32, Float, 32); - reg_ctor!(f64, Float, 64); -} +mod reg; -impl Reg { - pub fn align(&self, cx: &C) -> Align { - let dl = cx.data_layout(); - match self.kind { - RegKind::Integer => match self.size.bits() { - 1 => dl.i1_align.abi, - 2..=8 => dl.i8_align.abi, - 9..=16 => dl.i16_align.abi, - 17..=32 => dl.i32_align.abi, - 33..=64 => dl.i64_align.abi, - 65..=128 => dl.i128_align.abi, - _ => panic!("unsupported integer: {self:?}"), - }, - RegKind::Float => match self.size.bits() { - 16 => dl.f16_align.abi, - 32 => dl.f32_align.abi, - 64 => dl.f64_align.abi, - 128 => dl.f128_align.abi, - _ => panic!("unsupported float: {self:?}"), - }, - RegKind::Vector => dl.vector_align(self.size).abi, - } - } -} +pub use reg::{Reg, RegKind}; /// Return value from the `homogeneous_aggregate` test function. #[derive(Copy, Clone, Debug)] @@ -134,8 +71,8 @@ impl<'a, Ty> TyAndLayout<'a, Ty> { // The primitive for this algorithm. BackendRepr::Scalar(scalar) => { let kind = match scalar.primitive() { - abi::Int(..) | abi::Pointer(_) => RegKind::Integer, - abi::Float(_) => RegKind::Float, + Primitive::Int(..) | Primitive::Pointer(_) => RegKind::Integer, + Primitive::Float(_) => RegKind::Float, }; Ok(HomogeneousAggregate::Homogeneous(Reg { kind, size: self.size })) } @@ -206,8 +143,8 @@ impl<'a, Ty> TyAndLayout<'a, Ty> { let (mut result, mut total) = from_fields_at(*self, Size::ZERO)?; match &self.variants { - abi::Variants::Single { .. } | abi::Variants::Empty => {} - abi::Variants::Multiple { variants, .. } => { + Variants::Single { .. } | Variants::Empty => {} + Variants::Multiple { variants, .. } => { // Treat enum variants like union members. // HACK(eddyb) pretend the `enum` field (discriminant) // is at the start of every variant (otherwise the gap diff --git a/compiler/rustc_abi/src/callconv/reg.rs b/compiler/rustc_abi/src/callconv/reg.rs new file mode 100644 index 0000000000000..66f47c52c15d1 --- /dev/null +++ b/compiler/rustc_abi/src/callconv/reg.rs @@ -0,0 +1,63 @@ +#[cfg(feature = "nightly")] +use rustc_macros::HashStable_Generic; + +use crate::{Align, HasDataLayout, Size}; + +#[cfg_attr(feature = "nightly", derive(HashStable_Generic))] +#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] +pub enum RegKind { + Integer, + Float, + Vector, +} + +#[cfg_attr(feature = "nightly", derive(HashStable_Generic))] +#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] +pub struct Reg { + pub kind: RegKind, + pub size: Size, +} + +macro_rules! reg_ctor { + ($name:ident, $kind:ident, $bits:expr) => { + pub fn $name() -> Reg { + Reg { kind: RegKind::$kind, size: Size::from_bits($bits) } + } + }; +} + +impl Reg { + reg_ctor!(i8, Integer, 8); + reg_ctor!(i16, Integer, 16); + reg_ctor!(i32, Integer, 32); + reg_ctor!(i64, Integer, 64); + reg_ctor!(i128, Integer, 128); + + reg_ctor!(f32, Float, 32); + reg_ctor!(f64, Float, 64); +} + +impl Reg { + pub fn align(&self, cx: &C) -> Align { + let dl = cx.data_layout(); + match self.kind { + RegKind::Integer => match self.size.bits() { + 1 => dl.i1_align.abi, + 2..=8 => dl.i8_align.abi, + 9..=16 => dl.i16_align.abi, + 17..=32 => dl.i32_align.abi, + 33..=64 => dl.i64_align.abi, + 65..=128 => dl.i128_align.abi, + _ => panic!("unsupported integer: {self:?}"), + }, + RegKind::Float => match self.size.bits() { + 16 => dl.f16_align.abi, + 32 => dl.f32_align.abi, + 64 => dl.f64_align.abi, + 128 => dl.f128_align.abi, + _ => panic!("unsupported float: {self:?}"), + }, + RegKind::Vector => dl.vector_align(self.size).abi, + } + } +} diff --git a/compiler/rustc_abi/src/extern_abi/mod.rs b/compiler/rustc_abi/src/extern_abi.rs similarity index 100% rename from compiler/rustc_abi/src/extern_abi/mod.rs rename to compiler/rustc_abi/src/extern_abi.rs diff --git a/compiler/rustc_abi/src/layout/ty.rs b/compiler/rustc_abi/src/layout/ty.rs index d188750bfe100..221e990ae863b 100644 --- a/compiler/rustc_abi/src/layout/ty.rs +++ b/compiler/rustc_abi/src/layout/ty.rs @@ -1,13 +1,15 @@ use std::fmt; use std::ops::Deref; -use Float::*; -use Primitive::*; use rustc_data_structures::intern::Interned; use rustc_macros::HashStable_Generic; +use crate::{ + AbiAndPrefAlign, Align, BackendRepr, FieldsShape, Float, HasDataLayout, LayoutData, Niche, + PointeeInfo, Primitive, Scalar, Size, TargetDataLayout, Variants, +}; + // Explicitly import `Float` to avoid ambiguity with `Primitive::Float`. -use crate::{Float, *}; rustc_index::newtype_index! { /// The *source-order* index of a field in a variant. @@ -197,7 +199,9 @@ impl<'a, Ty> TyAndLayout<'a, Ty> { C: HasDataLayout, { match self.backend_repr { - BackendRepr::Scalar(scalar) => matches!(scalar.primitive(), Float(F32 | F64)), + BackendRepr::Scalar(scalar) => { + matches!(scalar.primitive(), Primitive::Float(Float::F32 | Float::F64)) + } BackendRepr::Memory { .. } => { if self.fields.count() == 1 && self.fields.offset(0).bytes() == 0 { self.field(cx, 0).is_single_fp_element(cx) diff --git a/compiler/rustc_target/src/asm/mod.rs b/compiler/rustc_target/src/asm/mod.rs index f17452b3ba039..0a4ffb152195e 100644 --- a/compiler/rustc_target/src/asm/mod.rs +++ b/compiler/rustc_target/src/asm/mod.rs @@ -1,11 +1,11 @@ use std::fmt; use std::str::FromStr; +use rustc_abi::Size; use rustc_data_structures::fx::{FxHashMap, FxIndexSet}; use rustc_macros::{Decodable, Encodable, HashStable_Generic}; use rustc_span::Symbol; -use crate::abi::Size; use crate::spec::{RelocModel, Target}; pub struct ModifierInfo { diff --git a/compiler/rustc_target/src/callconv/aarch64.rs b/compiler/rustc_target/src/callconv/aarch64.rs index 67345f0d47b71..d712bec9b780e 100644 --- a/compiler/rustc_target/src/callconv/aarch64.rs +++ b/compiler/rustc_target/src/callconv/aarch64.rs @@ -1,9 +1,8 @@ use std::iter; -use rustc_abi::{BackendRepr, Primitive}; +use rustc_abi::{BackendRepr, HasDataLayout, Primitive, TyAbiInterface}; use crate::abi::call::{ArgAbi, FnAbi, Reg, RegKind, Uniform}; -use crate::abi::{HasDataLayout, TyAbiInterface}; use crate::spec::{HasTargetSpec, Target}; /// Indicates the variant of the AArch64 ABI we are compiling for. diff --git a/compiler/rustc_target/src/callconv/amdgpu.rs b/compiler/rustc_target/src/callconv/amdgpu.rs index 3007a729a8b8a..91ac00e025001 100644 --- a/compiler/rustc_target/src/callconv/amdgpu.rs +++ b/compiler/rustc_target/src/callconv/amdgpu.rs @@ -1,5 +1,6 @@ +use rustc_abi::{HasDataLayout, TyAbiInterface}; + use crate::abi::call::{ArgAbi, FnAbi}; -use crate::abi::{HasDataLayout, TyAbiInterface}; fn classify_ret<'a, Ty, C>(_cx: &C, ret: &mut ArgAbi<'a, Ty>) where diff --git a/compiler/rustc_target/src/callconv/arm.rs b/compiler/rustc_target/src/callconv/arm.rs index bd6f781fb8120..75797daba695a 100644 --- a/compiler/rustc_target/src/callconv/arm.rs +++ b/compiler/rustc_target/src/callconv/arm.rs @@ -1,5 +1,6 @@ +use rustc_abi::{HasDataLayout, TyAbiInterface}; + use crate::abi::call::{ArgAbi, Conv, FnAbi, Reg, RegKind, Uniform}; -use crate::abi::{HasDataLayout, TyAbiInterface}; use crate::spec::HasTargetSpec; fn is_homogeneous_aggregate<'a, Ty, C>(cx: &C, arg: &mut ArgAbi<'a, Ty>) -> Option diff --git a/compiler/rustc_target/src/callconv/loongarch.rs b/compiler/rustc_target/src/callconv/loongarch.rs index 8bf61cb133766..47566bde6b4a4 100644 --- a/compiler/rustc_target/src/callconv/loongarch.rs +++ b/compiler/rustc_target/src/callconv/loongarch.rs @@ -1,9 +1,10 @@ -use crate::abi::call::{ArgAbi, ArgExtension, CastTarget, FnAbi, PassMode, Reg, RegKind, Uniform}; -use crate::abi::{ - self, BackendRepr, FieldsShape, HasDataLayout, Size, TyAbiInterface, TyAndLayout, +use rustc_abi::{ + BackendRepr, ExternAbi, FieldsShape, HasDataLayout, Primitive, Reg, RegKind, Size, + TyAbiInterface, TyAndLayout, Variants, }; + +use crate::callconv::{ArgAbi, ArgExtension, CastTarget, FnAbi, PassMode, Uniform}; use crate::spec::HasTargetSpec; -use crate::spec::abi::Abi as SpecAbi; #[derive(Copy, Clone)] enum RegPassKind { @@ -42,7 +43,7 @@ where { match arg_layout.backend_repr { BackendRepr::Scalar(scalar) => match scalar.primitive() { - abi::Int(..) | abi::Pointer(_) => { + Primitive::Int(..) | Primitive::Pointer(_) => { if arg_layout.size.bits() > xlen { return Err(CannotUseFpConv); } @@ -62,7 +63,7 @@ where _ => return Err(CannotUseFpConv), } } - abi::Float(_) => { + Primitive::Float(_) => { if arg_layout.size.bits() > flen { return Err(CannotUseFpConv); } @@ -115,8 +116,8 @@ where } FieldsShape::Arbitrary { .. } => { match arg_layout.variants { - abi::Variants::Multiple { .. } => return Err(CannotUseFpConv), - abi::Variants::Single { .. } | abi::Variants::Empty => (), + Variants::Multiple { .. } => return Err(CannotUseFpConv), + Variants::Single { .. } | Variants::Empty => (), } for i in arg_layout.fields.index_by_increasing_offset() { let field = arg_layout.field(cx, i); @@ -314,7 +315,7 @@ fn classify_arg<'a, Ty, C>( fn extend_integer_width(arg: &mut ArgAbi<'_, Ty>, xlen: u64) { if let BackendRepr::Scalar(scalar) = arg.layout.backend_repr { - if let abi::Int(i, _) = scalar.primitive() { + if let Primitive::Int(i, _) = scalar.primitive() { // 32-bit integers are always sign-extended if i.size().bits() == 32 && xlen > 32 { if let PassMode::Direct(ref mut attrs) = arg.mode { @@ -363,12 +364,12 @@ where } } -pub(crate) fn compute_rust_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>, abi: SpecAbi) +pub(crate) fn compute_rust_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>, abi: ExternAbi) where Ty: TyAbiInterface<'a, C> + Copy, C: HasDataLayout + HasTargetSpec, { - if abi == SpecAbi::RustIntrinsic { + if abi == ExternAbi::RustIntrinsic { return; } diff --git a/compiler/rustc_target/src/callconv/mips.rs b/compiler/rustc_target/src/callconv/mips.rs index 37980a91c7601..f7d688221552b 100644 --- a/compiler/rustc_target/src/callconv/mips.rs +++ b/compiler/rustc_target/src/callconv/mips.rs @@ -1,5 +1,6 @@ +use rustc_abi::{HasDataLayout, Size}; + use crate::abi::call::{ArgAbi, FnAbi, Reg, Uniform}; -use crate::abi::{HasDataLayout, Size}; fn classify_ret(cx: &C, ret: &mut ArgAbi<'_, Ty>, offset: &mut Size) where diff --git a/compiler/rustc_target/src/callconv/mips64.rs b/compiler/rustc_target/src/callconv/mips64.rs index 5bdf4c2ad77f0..89f324bc31308 100644 --- a/compiler/rustc_target/src/callconv/mips64.rs +++ b/compiler/rustc_target/src/callconv/mips64.rs @@ -1,12 +1,15 @@ -use crate::abi::call::{ - ArgAbi, ArgAttribute, ArgAttributes, ArgExtension, CastTarget, FnAbi, PassMode, Reg, Uniform, +use rustc_abi::{ + BackendRepr, FieldsShape, Float, HasDataLayout, Primitive, Reg, Size, TyAbiInterface, +}; + +use crate::callconv::{ + ArgAbi, ArgAttribute, ArgAttributes, ArgExtension, CastTarget, FnAbi, PassMode, Uniform, }; -use crate::abi::{self, HasDataLayout, Size, TyAbiInterface}; fn extend_integer_width_mips(arg: &mut ArgAbi<'_, Ty>, bits: u64) { // Always sign extend u32 values on 64-bit mips - if let abi::BackendRepr::Scalar(scalar) = arg.layout.backend_repr { - if let abi::Int(i, signed) = scalar.primitive() { + if let BackendRepr::Scalar(scalar) = arg.layout.backend_repr { + if let Primitive::Int(i, signed) = scalar.primitive() { if !signed && i.size().bits() == 32 { if let PassMode::Direct(ref mut attrs) = arg.mode { attrs.ext(ArgExtension::Sext); @@ -25,9 +28,9 @@ where C: HasDataLayout, { match ret.layout.field(cx, i).backend_repr { - abi::BackendRepr::Scalar(scalar) => match scalar.primitive() { - abi::Float(abi::F32) => Some(Reg::f32()), - abi::Float(abi::F64) => Some(Reg::f64()), + BackendRepr::Scalar(scalar) => match scalar.primitive() { + Primitive::Float(Float::F32) => Some(Reg::f32()), + Primitive::Float(Float::F64) => Some(Reg::f64()), _ => None, }, _ => None, @@ -51,7 +54,7 @@ where // use of float registers to structures (not unions) containing exactly one or two // float fields. - if let abi::FieldsShape::Arbitrary { .. } = ret.layout.fields { + if let FieldsShape::Arbitrary { .. } = ret.layout.fields { if ret.layout.fields.count() == 1 { if let Some(reg) = float_reg(cx, ret, 0) { ret.cast_to(reg); @@ -90,16 +93,16 @@ where let mut prefix_index = 0; match arg.layout.fields { - abi::FieldsShape::Primitive => unreachable!(), - abi::FieldsShape::Array { .. } => { + FieldsShape::Primitive => unreachable!(), + FieldsShape::Array { .. } => { // Arrays are passed indirectly arg.make_indirect(); return; } - abi::FieldsShape::Union(_) => { + FieldsShape::Union(_) => { // Unions and are always treated as a series of 64-bit integer chunks } - abi::FieldsShape::Arbitrary { .. } => { + FieldsShape::Arbitrary { .. } => { // Structures are split up into a series of 64-bit integer chunks, but any aligned // doubles not part of another aggregate are passed as floats. let mut last_offset = Size::ZERO; @@ -109,8 +112,8 @@ where let offset = arg.layout.fields.offset(i); // We only care about aligned doubles - if let abi::BackendRepr::Scalar(scalar) = field.backend_repr { - if scalar.primitive() == abi::Float(abi::F64) { + if let BackendRepr::Scalar(scalar) = field.backend_repr { + if scalar.primitive() == Primitive::Float(Float::F64) { if offset.is_aligned(dl.f64_align.abi) { // Insert enough integers to cover [last_offset, offset) assert!(last_offset.is_aligned(dl.f64_align.abi)); diff --git a/compiler/rustc_target/src/callconv/mod.rs b/compiler/rustc_target/src/callconv/mod.rs index 41b78d9121d6d..9e651376cd7ce 100644 --- a/compiler/rustc_target/src/callconv/mod.rs +++ b/compiler/rustc_target/src/callconv/mod.rs @@ -1,14 +1,14 @@ use std::str::FromStr; use std::{fmt, iter}; -pub use rustc_abi::{ExternAbi, Reg, RegKind}; +use rustc_abi::{ + AddressSpace, Align, BackendRepr, ExternAbi, HasDataLayout, Scalar, Size, TyAbiInterface, + TyAndLayout, +}; +pub use rustc_abi::{Primitive, Reg, RegKind}; use rustc_macros::HashStable_Generic; use rustc_span::Symbol; -use crate::abi::{ - self, AddressSpace, Align, BackendRepr, HasDataLayout, Pointer, Size, TyAbiInterface, - TyAndLayout, -}; use crate::spec::{HasTargetSpec, HasWasmCAbiOpt, HasX86AbiOpt, WasmCAbi}; mod aarch64; @@ -349,7 +349,7 @@ impl<'a, Ty> ArgAbi<'a, Ty> { pub fn new( cx: &impl HasDataLayout, layout: TyAndLayout<'a, Ty>, - scalar_attrs: impl Fn(&TyAndLayout<'a, Ty>, abi::Scalar, Size) -> ArgAttributes, + scalar_attrs: impl Fn(&TyAndLayout<'a, Ty>, Scalar, Size) -> ArgAttributes, ) -> Self { let mode = match layout.backend_repr { BackendRepr::Uninhabited => PassMode::Ignore, @@ -464,7 +464,7 @@ impl<'a, Ty> ArgAbi<'a, Ty> { pub fn extend_integer_width_to(&mut self, bits: u64) { // Only integers have signedness if let BackendRepr::Scalar(scalar) = self.layout.backend_repr { - if let abi::Int(i, signed) = scalar.primitive() { + if let Primitive::Int(i, signed) = scalar.primitive() { if i.size().bits() < bits { if let PassMode::Direct(ref mut attrs) = self.mode { if signed { @@ -756,7 +756,9 @@ impl<'a, Ty> FnAbi<'a, Ty> { continue; } - if arg_idx.is_none() && arg.layout.size > Pointer(AddressSpace::DATA).size(cx) * 2 { + if arg_idx.is_none() + && arg.layout.size > Primitive::Pointer(AddressSpace::DATA).size(cx) * 2 + { // Return values larger than 2 registers using a return area // pointer. LLVM and Cranelift disagree about how to return // values that don't fit in the registers designated for return @@ -837,7 +839,7 @@ impl<'a, Ty> FnAbi<'a, Ty> { assert!(is_indirect_not_on_stack); let size = arg.layout.size; - if !arg.layout.is_unsized() && size <= Pointer(AddressSpace::DATA).size(cx) { + if !arg.layout.is_unsized() && size <= Primitive::Pointer(AddressSpace::DATA).size(cx) { // We want to pass small aggregates as immediates, but using // an LLVM aggregate type for this leads to bad optimizations, // so we pick an appropriately sized integer type instead. diff --git a/compiler/rustc_target/src/callconv/nvptx64.rs b/compiler/rustc_target/src/callconv/nvptx64.rs index c64164372a11d..c5da185565887 100644 --- a/compiler/rustc_target/src/callconv/nvptx64.rs +++ b/compiler/rustc_target/src/callconv/nvptx64.rs @@ -1,6 +1,7 @@ +use rustc_abi::{HasDataLayout, Reg, Size, TyAbiInterface}; + use super::{ArgAttribute, ArgAttributes, ArgExtension, CastTarget}; -use crate::abi::call::{ArgAbi, FnAbi, Reg, Size, Uniform}; -use crate::abi::{HasDataLayout, TyAbiInterface}; +use crate::abi::call::{ArgAbi, FnAbi, Uniform}; fn classify_ret(ret: &mut ArgAbi<'_, Ty>) { if ret.layout.is_aggregate() && ret.layout.is_sized() { diff --git a/compiler/rustc_target/src/callconv/powerpc64.rs b/compiler/rustc_target/src/callconv/powerpc64.rs index 92c1f6e7148f3..7a66ce8529af0 100644 --- a/compiler/rustc_target/src/callconv/powerpc64.rs +++ b/compiler/rustc_target/src/callconv/powerpc64.rs @@ -2,8 +2,9 @@ // Alignment of 128 bit types is not currently handled, this will // need to be fixed when PowerPC vector support is added. +use rustc_abi::{Endian, HasDataLayout, TyAbiInterface}; + use crate::abi::call::{Align, ArgAbi, FnAbi, Reg, RegKind, Uniform}; -use crate::abi::{Endian, HasDataLayout, TyAbiInterface}; use crate::spec::HasTargetSpec; #[derive(Debug, Clone, Copy, PartialEq)] diff --git a/compiler/rustc_target/src/callconv/riscv.rs b/compiler/rustc_target/src/callconv/riscv.rs index 4d858392c979a..24531b0ef6356 100644 --- a/compiler/rustc_target/src/callconv/riscv.rs +++ b/compiler/rustc_target/src/callconv/riscv.rs @@ -4,12 +4,13 @@ // Reference: Clang RISC-V ELF psABI lowering code // https://github.com/llvm/llvm-project/blob/8e780252a7284be45cf1ba224cabd884847e8e92/clang/lib/CodeGen/TargetInfo.cpp#L9311-L9773 -use rustc_abi::{BackendRepr, FieldsShape, HasDataLayout, Size, TyAbiInterface, TyAndLayout}; +use rustc_abi::{ + BackendRepr, ExternAbi, FieldsShape, HasDataLayout, Primitive, Reg, RegKind, Size, + TyAbiInterface, TyAndLayout, Variants, +}; -use crate::abi; -use crate::abi::call::{ArgAbi, ArgExtension, CastTarget, FnAbi, PassMode, Reg, RegKind, Uniform}; +use crate::abi::call::{ArgAbi, ArgExtension, CastTarget, FnAbi, PassMode, Uniform}; use crate::spec::HasTargetSpec; -use crate::spec::abi::Abi as SpecAbi; #[derive(Copy, Clone)] enum RegPassKind { @@ -48,7 +49,7 @@ where { match arg_layout.backend_repr { BackendRepr::Scalar(scalar) => match scalar.primitive() { - abi::Int(..) | abi::Pointer(_) => { + Primitive::Int(..) | Primitive::Pointer(_) => { if arg_layout.size.bits() > xlen { return Err(CannotUseFpConv); } @@ -68,7 +69,7 @@ where _ => return Err(CannotUseFpConv), } } - abi::Float(_) => { + Primitive::Float(_) => { if arg_layout.size.bits() > flen { return Err(CannotUseFpConv); } @@ -121,8 +122,8 @@ where } FieldsShape::Arbitrary { .. } => { match arg_layout.variants { - abi::Variants::Multiple { .. } => return Err(CannotUseFpConv), - abi::Variants::Single { .. } | abi::Variants::Empty => (), + Variants::Multiple { .. } => return Err(CannotUseFpConv), + Variants::Single { .. } | Variants::Empty => (), } for i in arg_layout.fields.index_by_increasing_offset() { let field = arg_layout.field(cx, i); @@ -320,7 +321,7 @@ fn classify_arg<'a, Ty, C>( fn extend_integer_width(arg: &mut ArgAbi<'_, Ty>, xlen: u64) { if let BackendRepr::Scalar(scalar) = arg.layout.backend_repr { - if let abi::Int(i, _) = scalar.primitive() { + if let Primitive::Int(i, _) = scalar.primitive() { // 32-bit integers are always sign-extended if i.size().bits() == 32 && xlen > 32 { if let PassMode::Direct(ref mut attrs) = arg.mode { @@ -369,12 +370,12 @@ where } } -pub(crate) fn compute_rust_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>, abi: SpecAbi) +pub(crate) fn compute_rust_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>, abi: ExternAbi) where Ty: TyAbiInterface<'a, C> + Copy, C: HasDataLayout + HasTargetSpec, { - if abi == SpecAbi::RustIntrinsic { + if abi == ExternAbi::RustIntrinsic { return; } diff --git a/compiler/rustc_target/src/callconv/s390x.rs b/compiler/rustc_target/src/callconv/s390x.rs index a73c1a0f46c2b..edf57098d6d92 100644 --- a/compiler/rustc_target/src/callconv/s390x.rs +++ b/compiler/rustc_target/src/callconv/s390x.rs @@ -1,8 +1,9 @@ // Reference: ELF Application Binary Interface s390x Supplement // https://github.com/IBM/s390x-abi +use rustc_abi::{BackendRepr, HasDataLayout, TyAbiInterface}; + use crate::abi::call::{ArgAbi, FnAbi, Reg, RegKind}; -use crate::abi::{BackendRepr, HasDataLayout, TyAbiInterface}; use crate::spec::HasTargetSpec; fn classify_ret(ret: &mut ArgAbi<'_, Ty>) { diff --git a/compiler/rustc_target/src/callconv/sparc.rs b/compiler/rustc_target/src/callconv/sparc.rs index 37980a91c7601..f7d688221552b 100644 --- a/compiler/rustc_target/src/callconv/sparc.rs +++ b/compiler/rustc_target/src/callconv/sparc.rs @@ -1,5 +1,6 @@ +use rustc_abi::{HasDataLayout, Size}; + use crate::abi::call::{ArgAbi, FnAbi, Reg, Uniform}; -use crate::abi::{HasDataLayout, Size}; fn classify_ret(cx: &C, ret: &mut ArgAbi<'_, Ty>, offset: &mut Size) where diff --git a/compiler/rustc_target/src/callconv/sparc64.rs b/compiler/rustc_target/src/callconv/sparc64.rs index 313d8730399b2..392fb5156fce4 100644 --- a/compiler/rustc_target/src/callconv/sparc64.rs +++ b/compiler/rustc_target/src/callconv/sparc64.rs @@ -1,9 +1,13 @@ // FIXME: This needs an audit for correctness and completeness. +use rustc_abi::{ + BackendRepr, FieldsShape, Float, HasDataLayout, Primitive, Reg, Scalar, Size, TyAbiInterface, + TyAndLayout, +}; + use crate::abi::call::{ - ArgAbi, ArgAttribute, ArgAttributes, ArgExtension, CastTarget, FnAbi, Reg, Uniform, + ArgAbi, ArgAttribute, ArgAttributes, ArgExtension, CastTarget, FnAbi, Uniform, }; -use crate::abi::{self, HasDataLayout, Scalar, Size, TyAbiInterface, TyAndLayout}; use crate::spec::HasTargetSpec; #[derive(Clone, Debug)] @@ -21,7 +25,7 @@ where { let dl = cx.data_layout(); - if !matches!(scalar.primitive(), abi::Float(abi::F32 | abi::F64)) { + if !matches!(scalar.primitive(), Primitive::Float(Float::F32 | Float::F64)) { return data; } @@ -57,7 +61,7 @@ where return data; } - if scalar.primitive() == abi::Float(abi::F32) { + if scalar.primitive() == Primitive::Float(Float::F32) { data.arg_attribute = ArgAttribute::InReg; data.prefix[data.prefix_index] = Some(Reg::f32()); data.last_offset = offset + Reg::f32().size; @@ -81,14 +85,16 @@ where { data = arg_scalar(cx, scalar1, offset, data); match (scalar1.primitive(), scalar2.primitive()) { - (abi::Float(abi::F32), _) => offset += Reg::f32().size, - (_, abi::Float(abi::F64)) => offset += Reg::f64().size, - (abi::Int(i, _signed), _) => offset += i.size(), - (abi::Pointer(_), _) => offset += Reg::i64().size, + (Primitive::Float(Float::F32), _) => offset += Reg::f32().size, + (_, Primitive::Float(Float::F64)) => offset += Reg::f64().size, + (Primitive::Int(i, _signed), _) => offset += i.size(), + (Primitive::Pointer(_), _) => offset += Reg::i64().size, _ => {} } - if (offset.bytes() % 4) != 0 && matches!(scalar2.primitive(), abi::Float(abi::F32 | abi::F64)) { + if (offset.bytes() % 4) != 0 + && matches!(scalar2.primitive(), Primitive::Float(Float::F32 | Float::F64)) + { offset += Size::from_bytes(4 - (offset.bytes() % 4)); } data = arg_scalar(cx, scalar2, offset, data); @@ -105,15 +111,15 @@ where Ty: TyAbiInterface<'a, C> + Copy, C: HasDataLayout, { - if let abi::FieldsShape::Union(_) = layout.fields { + if let FieldsShape::Union(_) = layout.fields { return data; } match layout.backend_repr { - abi::BackendRepr::Scalar(scalar) => { + BackendRepr::Scalar(scalar) => { data = arg_scalar(cx, &scalar, offset, data); } - abi::BackendRepr::Memory { .. } => { + BackendRepr::Memory { .. } => { for i in 0..layout.fields.count() { if offset < layout.fields.offset(i) { offset = layout.fields.offset(i); @@ -122,7 +128,7 @@ where } } _ => { - if let abi::BackendRepr::ScalarPair(scalar1, scalar2) = &layout.backend_repr { + if let BackendRepr::ScalarPair(scalar1, scalar2) = &layout.backend_repr { data = arg_scalar_pair(cx, scalar1, scalar2, offset, data); } } @@ -148,16 +154,16 @@ where } match arg.layout.fields { - abi::FieldsShape::Primitive => unreachable!(), - abi::FieldsShape::Array { .. } => { + FieldsShape::Primitive => unreachable!(), + FieldsShape::Array { .. } => { // Arrays are passed indirectly arg.make_indirect(); return; } - abi::FieldsShape::Union(_) => { + FieldsShape::Union(_) => { // Unions and are always treated as a series of 64-bit integer chunks } - abi::FieldsShape::Arbitrary { .. } => { + FieldsShape::Arbitrary { .. } => { // Structures with floating point numbers need special care. let mut data = parse_structure( diff --git a/compiler/rustc_target/src/callconv/wasm.rs b/compiler/rustc_target/src/callconv/wasm.rs index d01b59cbb032b..56cd7a3f93de7 100644 --- a/compiler/rustc_target/src/callconv/wasm.rs +++ b/compiler/rustc_target/src/callconv/wasm.rs @@ -1,7 +1,6 @@ -use rustc_abi::{BackendRepr, Float, Integer, Primitive}; +use rustc_abi::{BackendRepr, Float, HasDataLayout, Integer, Primitive, TyAbiInterface}; use crate::abi::call::{ArgAbi, FnAbi}; -use crate::abi::{HasDataLayout, TyAbiInterface}; fn unwrap_trivial_aggregate<'a, Ty, C>(cx: &C, val: &mut ArgAbi<'a, Ty>) -> bool where diff --git a/compiler/rustc_target/src/callconv/x86.rs b/compiler/rustc_target/src/callconv/x86.rs index cd8465c09ca98..7c88d9b55cfee 100644 --- a/compiler/rustc_target/src/callconv/x86.rs +++ b/compiler/rustc_target/src/callconv/x86.rs @@ -1,9 +1,10 @@ -use crate::abi::call::{ArgAttribute, FnAbi, PassMode, Reg, RegKind}; -use crate::abi::{ - AddressSpace, Align, BackendRepr, Float, HasDataLayout, Pointer, TyAbiInterface, TyAndLayout, +use rustc_abi::{ + AddressSpace, Align, BackendRepr, ExternAbi, HasDataLayout, Primitive, Reg, RegKind, + TyAbiInterface, TyAndLayout, }; + +use crate::abi::call::{ArgAttribute, FnAbi, PassMode}; use crate::spec::HasTargetSpec; -use crate::spec::abi::Abi as SpecAbi; #[derive(PartialEq)] pub(crate) enum Flavor { @@ -214,7 +215,7 @@ pub(crate) fn fill_inregs<'a, Ty, C>( } } -pub(crate) fn compute_rust_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>, abi: SpecAbi) +pub(crate) fn compute_rust_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>, abi: ExternAbi) where Ty: TyAbiInterface<'a, C> + Copy, C: HasDataLayout + HasTargetSpec, @@ -223,18 +224,19 @@ where // registers will quiet signalling NaNs. Also avoid using SSE registers since they // are not always available (depending on target features). if !fn_abi.ret.is_ignore() - // Intrinsics themselves are not actual "real" functions, so theres no need to change their ABIs. - && abi != SpecAbi::RustIntrinsic + // Intrinsics themselves are not "real" functions, so theres no need to change their ABIs. + && abi != ExternAbi::RustIntrinsic { let has_float = match fn_abi.ret.layout.backend_repr { - BackendRepr::Scalar(s) => matches!(s.primitive(), Float(_)), + BackendRepr::Scalar(s) => matches!(s.primitive(), Primitive::Float(_)), BackendRepr::ScalarPair(s1, s2) => { - matches!(s1.primitive(), Float(_)) || matches!(s2.primitive(), Float(_)) + matches!(s1.primitive(), Primitive::Float(_)) + || matches!(s2.primitive(), Primitive::Float(_)) } _ => false, // anyway not passed via registers on x86 }; if has_float { - if fn_abi.ret.layout.size <= Pointer(AddressSpace::DATA).size(cx) { + if fn_abi.ret.layout.size <= Primitive::Pointer(AddressSpace::DATA).size(cx) { // Same size or smaller than pointer, return in a register. fn_abi.ret.cast_to(Reg { kind: RegKind::Integer, size: fn_abi.ret.layout.size }); } else { diff --git a/compiler/rustc_target/src/callconv/x86_64.rs b/compiler/rustc_target/src/callconv/x86_64.rs index 37aecf323a182..6e9b4690a2cfd 100644 --- a/compiler/rustc_target/src/callconv/x86_64.rs +++ b/compiler/rustc_target/src/callconv/x86_64.rs @@ -1,10 +1,12 @@ // The classification code for the x86_64 ABI is taken from the clay language // https://github.com/jckarter/clay/blob/db0bd2702ab0b6e48965cd85f8859bbd5f60e48e/compiler/externals.cpp -use rustc_abi::{BackendRepr, HasDataLayout, Size, TyAbiInterface, TyAndLayout}; +use rustc_abi::{ + BackendRepr, HasDataLayout, Primitive, Reg, RegKind, Size, TyAbiInterface, TyAndLayout, + Variants, +}; -use crate::abi; -use crate::abi::call::{ArgAbi, CastTarget, FnAbi, Reg, RegKind}; +use crate::abi::call::{ArgAbi, CastTarget, FnAbi}; /// Classification of "eightbyte" components. // N.B., the order of the variants is from general to specific, @@ -52,8 +54,8 @@ where BackendRepr::Uninhabited => return Ok(()), BackendRepr::Scalar(scalar) => match scalar.primitive() { - abi::Int(..) | abi::Pointer(_) => Class::Int, - abi::Float(_) => Class::Sse, + Primitive::Int(..) | Primitive::Pointer(_) => Class::Int, + Primitive::Float(_) => Class::Sse, }, BackendRepr::Vector { .. } => Class::Sse, @@ -65,8 +67,8 @@ where } match &layout.variants { - abi::Variants::Single { .. } | abi::Variants::Empty => {} - abi::Variants::Multiple { variants, .. } => { + Variants::Single { .. } | Variants::Empty => {} + Variants::Multiple { variants, .. } => { // Treat enum variants like union members. for variant_idx in variants.indices() { classify(cx, layout.for_variant(cx, variant_idx), cls, off)?; diff --git a/compiler/rustc_target/src/callconv/xtensa.rs b/compiler/rustc_target/src/callconv/xtensa.rs index 9d313d1650032..2542713bc11b3 100644 --- a/compiler/rustc_target/src/callconv/xtensa.rs +++ b/compiler/rustc_target/src/callconv/xtensa.rs @@ -5,8 +5,9 @@ //! Section 8.1.4 & 8.1.5 of the Xtensa ISA reference manual, as well as snippets from //! Section 2.3 from the Xtensa programmers guide. +use rustc_abi::{BackendRepr, HasDataLayout, Size, TyAbiInterface}; + use crate::abi::call::{ArgAbi, FnAbi, Reg, Uniform}; -use crate::abi::{BackendRepr, HasDataLayout, Size, TyAbiInterface}; use crate::spec::HasTargetSpec; const NUM_ARG_GPRS: u64 = 6; diff --git a/compiler/rustc_target/src/lib.rs b/compiler/rustc_target/src/lib.rs index 50679ab8cc81c..11fc09d26e4f9 100644 --- a/compiler/rustc_target/src/lib.rs +++ b/compiler/rustc_target/src/lib.rs @@ -31,10 +31,7 @@ pub mod target_features; mod tests; pub mod abi { - pub(crate) use Float::*; - pub(crate) use Primitive::*; - // Explicitly import `Float` to avoid ambiguity with `Primitive::Float`. - pub use rustc_abi::{Float, *}; + pub use rustc_abi::*; pub use crate::callconv as call; } diff --git a/compiler/rustc_target/src/spec/base/aix.rs b/compiler/rustc_target/src/spec/base/aix.rs index fe37d313294d2..a92d104f9108e 100644 --- a/compiler/rustc_target/src/spec/base/aix.rs +++ b/compiler/rustc_target/src/spec/base/aix.rs @@ -1,4 +1,5 @@ -use crate::abi::Endian; +use rustc_abi::Endian; + use crate::spec::{Cc, CodeModel, LinkOutputKind, LinkerFlavor, TargetOptions, crt_objects, cvs}; pub(crate) fn opts() -> TargetOptions { diff --git a/compiler/rustc_target/src/spec/base/bpf.rs b/compiler/rustc_target/src/spec/base/bpf.rs index 17d5e75ef6d01..7c0e2e165b649 100644 --- a/compiler/rustc_target/src/spec/base/bpf.rs +++ b/compiler/rustc_target/src/spec/base/bpf.rs @@ -1,4 +1,5 @@ -use crate::abi::Endian; +use rustc_abi::Endian; + use crate::spec::{LinkerFlavor, MergeFunctions, PanicStrategy, TargetOptions}; pub(crate) fn opts(endian: Endian) -> TargetOptions { diff --git a/compiler/rustc_target/src/spec/base/xtensa.rs b/compiler/rustc_target/src/spec/base/xtensa.rs index 280dd16e26453..47a532dfdd48d 100644 --- a/compiler/rustc_target/src/spec/base/xtensa.rs +++ b/compiler/rustc_target/src/spec/base/xtensa.rs @@ -1,4 +1,5 @@ -use crate::abi::Endian; +use rustc_abi::Endian; + use crate::spec::{Cc, LinkerFlavor, Lld, PanicStrategy, RelocModel, TargetOptions}; pub(crate) fn opts() -> TargetOptions { diff --git a/compiler/rustc_target/src/spec/mod.rs b/compiler/rustc_target/src/spec/mod.rs index 72600225e7a38..3affa1cf90cc3 100644 --- a/compiler/rustc_target/src/spec/mod.rs +++ b/compiler/rustc_target/src/spec/mod.rs @@ -42,6 +42,7 @@ use std::path::{Path, PathBuf}; use std::str::FromStr; use std::{fmt, io}; +use rustc_abi::{Endian, ExternAbi, Integer, Size, TargetDataLayout, TargetDataLayoutErrors}; use rustc_data_structures::fx::FxHashSet; use rustc_fs_util::try_canonicalize; use rustc_macros::{Decodable, Encodable, HashStable_Generic}; @@ -51,9 +52,7 @@ use serde_json::Value; use tracing::debug; use crate::abi::call::Conv; -use crate::abi::{Endian, Integer, Size, TargetDataLayout, TargetDataLayoutErrors}; use crate::json::{Json, ToJson}; -use crate::spec::abi::Abi; use crate::spec::crt_objects::CrtObjects; pub mod crt_objects; @@ -2845,44 +2844,40 @@ impl DerefMut for Target { impl Target { /// Given a function ABI, turn it into the correct ABI for this target. - pub fn adjust_abi(&self, abi: Abi, c_variadic: bool) -> Abi { + pub fn adjust_abi(&self, abi: ExternAbi, c_variadic: bool) -> ExternAbi { + use ExternAbi::*; match abi { // On Windows, `extern "system"` behaves like msvc's `__stdcall`. // `__stdcall` only applies on x86 and on non-variadic functions: // https://learn.microsoft.com/en-us/cpp/cpp/stdcall?view=msvc-170 - Abi::System { unwind } if self.is_like_windows && self.arch == "x86" && !c_variadic => { - Abi::Stdcall { unwind } + System { unwind } if self.is_like_windows && self.arch == "x86" && !c_variadic => { + Stdcall { unwind } } - Abi::System { unwind } => Abi::C { unwind }, - Abi::EfiApi if self.arch == "arm" => Abi::Aapcs { unwind: false }, - Abi::EfiApi if self.arch == "x86_64" => Abi::Win64 { unwind: false }, - Abi::EfiApi => Abi::C { unwind: false }, - - // See commentary in `is_abi_supported`: we map these ABIs to "C" when they do not make sense. - Abi::Stdcall { .. } | Abi::Thiscall { .. } | Abi::Fastcall { .. } - if self.arch == "x86" => - { - abi - } - Abi::Vectorcall { .. } if ["x86", "x86_64"].contains(&&self.arch[..]) => abi, - Abi::Stdcall { unwind } - | Abi::Thiscall { unwind } - | Abi::Fastcall { unwind } - | Abi::Vectorcall { unwind } => Abi::C { unwind }, + System { unwind } => C { unwind }, + EfiApi if self.arch == "arm" => Aapcs { unwind: false }, + EfiApi if self.arch == "x86_64" => Win64 { unwind: false }, + EfiApi => C { unwind: false }, + + // See commentary in `is_abi_supported`. + Stdcall { .. } | Thiscall { .. } if self.arch == "x86" => abi, + Stdcall { unwind } | Thiscall { unwind } => C { unwind }, + Fastcall { .. } if self.arch == "x86" => abi, + Vectorcall { .. } if ["x86", "x86_64"].contains(&&self.arch[..]) => abi, + Fastcall { unwind } | Vectorcall { unwind } => C { unwind }, // The Windows x64 calling convention we use for `extern "Rust"` // // expects the callee to save `xmm6` through `xmm15`, but `PreserveMost` // (that we use by default for `extern "rust-cold"`) doesn't save any of those. // So to avoid bloating callers, just use the Rust convention here. - Abi::RustCold if self.is_like_windows && self.arch == "x86_64" => Abi::Rust, + RustCold if self.is_like_windows && self.arch == "x86_64" => Rust, abi => abi, } } - pub fn is_abi_supported(&self, abi: Abi) -> bool { - use Abi::*; + pub fn is_abi_supported(&self, abi: ExternAbi) -> bool { + use ExternAbi::*; match abi { Rust | C { .. }