@@ -154,10 +154,10 @@ impl<Tag, Extra> Allocation<Tag, Extra> {
154
154
}
155
155
156
156
/// Looks at a slice which may describe uninitialized bytes or describe a relocation. This differs
157
- /// from `get_bytes_with_undef_and_ptr ` in that it does no relocation checks (even on the
157
+ /// from `get_bytes_with_uninit_and_ptr ` in that it does no relocation checks (even on the
158
158
/// edges) at all. It further ignores `AllocationExtra` callbacks.
159
159
/// This must not be used for reads affecting the interpreter execution.
160
- pub fn inspect_with_undef_and_ptr_outside_interpreter ( & self , range : Range < usize > ) -> & [ u8 ] {
160
+ pub fn inspect_with_uninit_and_ptr_outside_interpreter ( & self , range : Range < usize > ) -> & [ u8 ] {
161
161
& self . bytes [ range]
162
162
}
163
163
@@ -194,7 +194,7 @@ impl<'tcx, Tag: Copy, Extra: AllocationExtra<Tag>> Allocation<Tag, Extra> {
194
194
195
195
/// The last argument controls whether we error out when there are uninitialized
196
196
/// or pointer bytes. You should never call this, call `get_bytes` or
197
- /// `get_bytes_with_undef_and_ptr ` instead,
197
+ /// `get_bytes_with_uninit_and_ptr ` instead,
198
198
///
199
199
/// This function also guarantees that the resulting pointer will remain stable
200
200
/// even when new allocations are pushed to the `HashMap`. `copy_repeatedly` relies
@@ -244,7 +244,7 @@ impl<'tcx, Tag: Copy, Extra: AllocationExtra<Tag>> Allocation<Tag, Extra> {
244
244
///
245
245
/// It is the caller's responsibility to check bounds and alignment beforehand.
246
246
#[ inline]
247
- pub fn get_bytes_with_undef_and_ptr (
247
+ pub fn get_bytes_with_uninit_and_ptr (
248
248
& self ,
249
249
cx : & impl HasDataLayout ,
250
250
ptr : Pointer < Tag > ,
@@ -302,19 +302,19 @@ impl<'tcx, Tag: Copy, Extra: AllocationExtra<Tag>> Allocation<Tag, Extra> {
302
302
}
303
303
304
304
/// Validates that `ptr.offset` and `ptr.offset + size` do not point to the middle of a
305
- /// relocation. If `allow_ptr_and_undef ` is `false`, also enforces that the memory in the
305
+ /// relocation. If `allow_uninit_and_ptr ` is `false`, also enforces that the memory in the
306
306
/// given range contains neither relocations nor uninitialized bytes.
307
307
pub fn check_bytes (
308
308
& self ,
309
309
cx : & impl HasDataLayout ,
310
310
ptr : Pointer < Tag > ,
311
311
size : Size ,
312
- allow_ptr_and_undef : bool ,
312
+ allow_uninit_and_ptr : bool ,
313
313
) -> InterpResult < ' tcx > {
314
314
// Check bounds and relocations on the edges.
315
- self . get_bytes_with_undef_and_ptr ( cx, ptr, size) ?;
315
+ self . get_bytes_with_uninit_and_ptr ( cx, ptr, size) ?;
316
316
// Check uninit and ptr.
317
- if !allow_ptr_and_undef {
317
+ if !allow_uninit_and_ptr {
318
318
self . check_init ( ptr, size) ?;
319
319
self . check_relocations ( cx, ptr, size) ?;
320
320
}
@@ -361,7 +361,7 @@ impl<'tcx, Tag: Copy, Extra: AllocationExtra<Tag>> Allocation<Tag, Extra> {
361
361
size : Size ,
362
362
) -> InterpResult < ' tcx , ScalarMaybeUninit < Tag > > {
363
363
// `get_bytes_unchecked` tests relocation edges.
364
- let bytes = self . get_bytes_with_undef_and_ptr ( cx, ptr, size) ?;
364
+ let bytes = self . get_bytes_with_uninit_and_ptr ( cx, ptr, size) ?;
365
365
// Uninit check happens *after* we established that the alignment is correct.
366
366
// We must not return `Ok()` for unaligned pointers!
367
367
if self . is_init ( ptr, size) . is_err ( ) {
@@ -594,7 +594,7 @@ impl InitMaskCompressed {
594
594
/// Transferring the initialization mask to other allocations.
595
595
impl < Tag , Extra > Allocation < Tag , Extra > {
596
596
/// Creates a run-length encoding of the initialization mask.
597
- pub fn compress_undef_range ( & self , src : Pointer < Tag > , size : Size ) -> InitMaskCompressed {
597
+ pub fn compress_uninit_range ( & self , src : Pointer < Tag > , size : Size ) -> InitMaskCompressed {
598
598
// Since we are copying `size` bytes from `src` to `dest + i * size` (`for i in 0..repeat`),
599
599
// a naive initialization mask copying algorithm would repeatedly have to read the initialization mask from
600
600
// the source and write it to the destination. Even if we optimized the memory accesses,
@@ -636,8 +636,8 @@ impl<Tag, Extra> Allocation<Tag, Extra> {
636
636
size : Size ,
637
637
repeat : u64 ,
638
638
) {
639
- // An optimization where we can just overwrite an entire range of definedness bits if
640
- // they are going to be uniformly `1` or `0`.
639
+ // An optimization where we can just overwrite an entire range of initialization
640
+ // bits if they are going to be uniformly `1` or `0`.
641
641
if defined. ranges . len ( ) <= 1 {
642
642
self . init_mask . set_range_inbounds (
643
643
dest. offset ,
0 commit comments