diff --git a/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs b/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs index 3e6cf0ece2966..3f54b98176cc5 100644 --- a/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs +++ b/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs @@ -1,6 +1,7 @@ use super::operand::{OperandRef, OperandValue}; use super::place::PlaceRef; use super::FunctionCx; +use crate::common::IntPredicate; use crate::errors; use crate::errors::InvalidMonomorphization; use crate::meth; @@ -499,6 +500,12 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { // The `_unsigned` version knows the relative ordering of the pointers, // so can use `sub nuw` and `udiv exact` instead of dealing in signed. let d = bx.unchecked_usub(a, b); + if let OptLevel::Default | OptLevel::Aggressive = bx.sess().opts.optimize { + // Despite dealing in `usize`, it's still not allowed for the + // pointers to be more than `isize::MAX` apart. + let inrange = bx.icmp(IntPredicate::IntSGE, d, bx.const_usize(0)); + bx.assume(inrange); + } bx.exactudiv(d, pointee_size) } } diff --git a/tests/codegen/intrinsics/offset_from.rs b/tests/codegen/intrinsics/offset_from.rs index ef1a77ef184c5..a17d6c7a58046 100644 --- a/tests/codegen/intrinsics/offset_from.rs +++ b/tests/codegen/intrinsics/offset_from.rs @@ -1,4 +1,6 @@ -//@ compile-flags: -C opt-level=1 +//@ revisions: OPT1 OPT2 +//@ [OPT1] compile-flags: -Copt-level=1 +//@ [OPT2] compile-flags: -Copt-level=2 //@ only-64bit (because we're using [ui]size) #![crate_type = "lib"] @@ -17,8 +19,9 @@ pub unsafe fn offset_from_odd_size(a: *const RGB, b: *const RGB) -> isize { // CHECK: start // CHECK-NEXT: ptrtoint // CHECK-NEXT: ptrtoint - // CHECK-NEXT: sub i64 - // CHECK-NEXT: sdiv exact i64 %{{[0-9]+}}, 3 + // CHECK-NEXT: %[[D:.+]] = sub i64 + // CHECK-NOT: assume + // CHECK-NEXT: sdiv exact i64 %[[D]], 3 // CHECK-NEXT: ret i64 ptr_offset_from(a, b) } @@ -29,8 +32,11 @@ pub unsafe fn offset_from_unsigned_odd_size(a: *const RGB, b: *const RGB) -> usi // CHECK: start // CHECK-NEXT: ptrtoint // CHECK-NEXT: ptrtoint - // CHECK-NEXT: sub nuw i64 - // CHECK-NEXT: udiv exact i64 %{{[0-9]+}}, 3 + // CHECK-NEXT: %[[D:.+]] = sub nuw i64 + // OPT1-NOT: assume + // OPT2-NEXT: %[[POS:.+]] = icmp sgt i64 %[[D]], -1 + // OPT2-NEXT: tail call void @llvm.assume(i1 %[[POS]]) + // CHECK-NEXT: udiv exact i64 %[[D]], 3 // CHECK-NEXT: ret i64 ptr_offset_from_unsigned(a, b) } diff --git a/tests/codegen/slice-iter-len-eq-zero.rs b/tests/codegen/slice-iter-len-eq-zero.rs index b2a4b2495b6a2..b749b18919d9f 100644 --- a/tests/codegen/slice-iter-len-eq-zero.rs +++ b/tests/codegen/slice-iter-len-eq-zero.rs @@ -4,10 +4,21 @@ type Demo = [u8; 3]; // CHECK-LABEL: @slice_iter_len_eq_zero +// CHECK-SAME: ptr noundef nonnull %0 +// CHECK-SAME: ptr noundef %1 #[no_mangle] pub fn slice_iter_len_eq_zero(y: std::slice::Iter<'_, Demo>) -> bool { - // CHECK-NOT: sub - // CHECK: %[[RET:.+]] = icmp eq ptr {{%1|%0}}, {{%1|%0}} + // There's no `nonnull` in the signature, so we end up with an assume. + // CHECK: %[[NNULL:.+]] = icmp ne ptr %1, null + // CHECK: tail call void @llvm.assume(i1 %[[NNULL]]) + + // CHECK-DAG: %[[E:.+]] = ptrtoint ptr %1 to i + // CHECK-DAG: %[[B:.+]] = ptrtoint ptr %0 to i + // CHECK: %[[D:.+]] = sub nuw {{.+}} %[[E]], %[[B]] + // CHECK: %[[NNEG:.+]] = icmp sgt {{.+}} %[[D]], -1 + // CHECK: tail call void @llvm.assume(i1 %[[NNEG]]) + + // CHECK: %[[RET:.+]] = icmp eq ptr {{%1, %0|%0, %1}} // CHECK: ret i1 %[[RET]] y.len() == 0 } diff --git a/tests/codegen/vec-in-place.rs b/tests/codegen/vec-in-place.rs index 7a175dc4f7e1c..e6c6748d2c42a 100644 --- a/tests/codegen/vec-in-place.rs +++ b/tests/codegen/vec-in-place.rs @@ -38,6 +38,13 @@ pub struct Baz { pub fn vec_iterator_cast_primitive(vec: Vec) -> Vec { // CHECK-NOT: loop // CHECK-NOT: call + + // CHECK: %[[LEN_IN_ISIZE:.+]] = icmp sgt i{{.+}}, -1 + // CHECK-NEXT: call void @llvm.assume(i1 %[[LEN_IN_ISIZE]]) + + // CHECK-NOT: loop + // CHECK-NOT: call + vec.into_iter().map(|e| e as u8).collect() } @@ -46,6 +53,13 @@ pub fn vec_iterator_cast_primitive(vec: Vec) -> Vec { pub fn vec_iterator_cast_wrapper(vec: Vec) -> Vec> { // CHECK-NOT: loop // CHECK-NOT: call + + // CHECK: %[[LEN_IN_ISIZE:.+]] = icmp sgt i{{.+}}, -1 + // CHECK-NEXT: call void @llvm.assume(i1 %[[LEN_IN_ISIZE]]) + + // CHECK-NOT: loop + // CHECK-NOT: call + vec.into_iter().map(|e| Wrapper(e)).collect() } @@ -54,6 +68,13 @@ pub fn vec_iterator_cast_wrapper(vec: Vec) -> Vec> { pub fn vec_iterator_cast_unwrap(vec: Vec>) -> Vec { // CHECK-NOT: loop // CHECK-NOT: call + + // CHECK: %[[LEN_IN_ISIZE:.+]] = icmp sgt i{{.+}}, -1 + // CHECK-NEXT: call void @llvm.assume(i1 %[[LEN_IN_ISIZE]]) + + // CHECK-NOT: loop + // CHECK-NOT: call + vec.into_iter().map(|e| e.0).collect() } @@ -90,3 +111,9 @@ pub fn vec_iterator_cast_deaggregate_fold(vec: Vec) -> Vec<[u64; 4]> { // correct. vec.into_iter().map(|e| unsafe { std::mem::transmute(e) }).collect() } + +// Make sure that the `NOT`s above don't trigger on the `nocallback` attribute further down. + +// CHECK-LABEL: @last_definition +#[no_mangle] +pub fn last_definition() {}