diff --git a/src/policy/lockfreeimmortalspace.rs b/src/policy/lockfreeimmortalspace.rs index 83a4709e3e..6038e4a1cb 100644 --- a/src/policy/lockfreeimmortalspace.rs +++ b/src/policy/lockfreeimmortalspace.rs @@ -86,7 +86,7 @@ impl Space for LockFreeImmortalSpace { ); self.limit = AVAILABLE_START + total_bytes; // Eagerly memory map the entire heap (also zero all the memory) - crate::util::memory::dzmmap(AVAILABLE_START, total_bytes).unwrap(); + crate::util::memory::dzmmap_noreplace(AVAILABLE_START, total_bytes).unwrap(); if try_map_metadata_space( AVAILABLE_START, total_bytes, diff --git a/src/util/address.rs b/src/util/address.rs index 91e545e89d..e641ada50f 100644 --- a/src/util/address.rs +++ b/src/util/address.rs @@ -204,6 +204,15 @@ impl Address { Address(self.0 + size) } + // We implemented the Sub trait but we still keep this sub function. + // The sub() function is const fn, and we can use it to declare Address constants. + // The Sub trait function cannot be const. + #[allow(clippy::should_implement_trait)] + #[inline(always)] + pub const fn sub(self, size: usize) -> Address { + Address(self.0 - size) + } + /// loads a value of type T from the address /// # Safety /// This could throw a segment fault if the address is invalid diff --git a/src/util/heap/layout/byte_map_mmapper.rs b/src/util/heap/layout/byte_map_mmapper.rs index 37cdeb4be8..dfb4974b60 100644 --- a/src/util/heap/layout/byte_map_mmapper.rs +++ b/src/util/heap/layout/byte_map_mmapper.rs @@ -10,7 +10,7 @@ use std::sync::atomic::AtomicU8; use std::sync::atomic::Ordering; use std::sync::Mutex; -use crate::util::memory::{dzmmap, mprotect, munprotect}; +use crate::util::memory::{dzmmap_noreplace, mprotect, munprotect}; use std::mem::transmute; const UNMAPPED: u8 = 0; @@ -74,7 +74,7 @@ impl Mmapper for ByteMapMmapper { let guard = self.lock.lock().unwrap(); // might have become MAPPED here if self.mapped[chunk].load(Ordering::Relaxed) == UNMAPPED { - match dzmmap(mmap_start, MMAP_CHUNK_BYTES) { + match dzmmap_noreplace(mmap_start, MMAP_CHUNK_BYTES) { Ok(_) => { self.map_metadata( mmap_start, @@ -211,21 +211,20 @@ impl Default for ByteMapMmapper { #[cfg(test)] mod tests { use crate::util::heap::layout::{ByteMapMmapper, Mmapper}; - use crate::util::{conversions, Address}; + use crate::util::Address; use crate::util::constants::LOG_BYTES_IN_PAGE; use crate::util::conversions::pages_to_bytes; use crate::util::heap::layout::byte_map_mmapper::{MAPPED, PROTECTED}; use crate::util::heap::layout::vm_layout_constants::MMAP_CHUNK_BYTES; + use crate::util::memory; + use crate::util::test_util::BYTE_MAP_MMAPPER_TEST_REGION; + use crate::util::test_util::{serial_test, with_cleanup}; use std::sync::atomic::Ordering; const CHUNK_SIZE: usize = 1 << 22; - #[cfg(target_os = "linux")] - const FIXED_ADDRESS: Address = - unsafe { conversions::chunk_align_down(Address::from_usize(0x6000_0000)) }; - #[cfg(target_os = "macos")] - const FIXED_ADDRESS: Address = - unsafe { conversions::chunk_align_down(Address::from_usize(0x0001_3500_0000)) }; + const FIXED_ADDRESS: Address = BYTE_MAP_MMAPPER_TEST_REGION.start; + const MAX_SIZE: usize = BYTE_MAP_MMAPPER_TEST_REGION.size; #[test] fn address_to_mmap_chunks() { @@ -266,84 +265,148 @@ mod tests { #[test] fn ensure_mapped_1page() { - let mmapper = ByteMapMmapper::new(); - let pages = 1; - let empty_vec = vec![]; - mmapper.ensure_mapped(FIXED_ADDRESS, pages, &empty_vec, &empty_vec); - - let start_chunk = ByteMapMmapper::address_to_mmap_chunks_down(FIXED_ADDRESS); - let end_chunk = - ByteMapMmapper::address_to_mmap_chunks_up(FIXED_ADDRESS + pages_to_bytes(pages)); - for chunk in start_chunk..end_chunk { - assert_eq!(mmapper.mapped[chunk].load(Ordering::Relaxed), MAPPED); - } + serial_test(|| { + with_cleanup( + || { + let mmapper = ByteMapMmapper::new(); + let pages = 1; + let empty_vec = vec![]; + mmapper.ensure_mapped(FIXED_ADDRESS, pages, &empty_vec, &empty_vec); + + let start_chunk = ByteMapMmapper::address_to_mmap_chunks_down(FIXED_ADDRESS); + let end_chunk = ByteMapMmapper::address_to_mmap_chunks_up( + FIXED_ADDRESS + pages_to_bytes(pages), + ); + for chunk in start_chunk..end_chunk { + assert_eq!(mmapper.mapped[chunk].load(Ordering::Relaxed), MAPPED); + } + }, + || { + memory::munmap(FIXED_ADDRESS, MAX_SIZE).unwrap(); + }, + ) + }) } #[test] fn ensure_mapped_1chunk() { - let mmapper = ByteMapMmapper::new(); - let pages = MMAP_CHUNK_BYTES >> LOG_BYTES_IN_PAGE as usize; - let empty_vec = vec![]; - mmapper.ensure_mapped(FIXED_ADDRESS, pages, &empty_vec, &empty_vec); - - let start_chunk = ByteMapMmapper::address_to_mmap_chunks_down(FIXED_ADDRESS); - let end_chunk = - ByteMapMmapper::address_to_mmap_chunks_up(FIXED_ADDRESS + pages_to_bytes(pages)); - for chunk in start_chunk..end_chunk { - assert_eq!(mmapper.mapped[chunk].load(Ordering::Relaxed), MAPPED); - } + serial_test(|| { + with_cleanup( + || { + let mmapper = ByteMapMmapper::new(); + let pages = MMAP_CHUNK_BYTES >> LOG_BYTES_IN_PAGE as usize; + let empty_vec = vec![]; + mmapper.ensure_mapped(FIXED_ADDRESS, pages, &empty_vec, &empty_vec); + + let start_chunk = ByteMapMmapper::address_to_mmap_chunks_down(FIXED_ADDRESS); + let end_chunk = ByteMapMmapper::address_to_mmap_chunks_up( + FIXED_ADDRESS + pages_to_bytes(pages), + ); + for chunk in start_chunk..end_chunk { + assert_eq!(mmapper.mapped[chunk].load(Ordering::Relaxed), MAPPED); + } + }, + || { + memory::munmap(FIXED_ADDRESS, MAX_SIZE).unwrap(); + }, + ) + }) } #[test] fn ensure_mapped_more_than_1chunk() { - let mmapper = ByteMapMmapper::new(); - let pages = (MMAP_CHUNK_BYTES + MMAP_CHUNK_BYTES / 2) >> LOG_BYTES_IN_PAGE as usize; - let empty_vec = vec![]; - mmapper.ensure_mapped(FIXED_ADDRESS, pages, &empty_vec, &empty_vec); - - let start_chunk = ByteMapMmapper::address_to_mmap_chunks_down(FIXED_ADDRESS); - let end_chunk = - ByteMapMmapper::address_to_mmap_chunks_up(FIXED_ADDRESS + pages_to_bytes(pages)); - assert_eq!(end_chunk - start_chunk, 2); - for chunk in start_chunk..end_chunk { - assert_eq!(mmapper.mapped[chunk].load(Ordering::Relaxed), MAPPED); - } + serial_test(|| { + with_cleanup( + || { + let mmapper = ByteMapMmapper::new(); + let pages = + (MMAP_CHUNK_BYTES + MMAP_CHUNK_BYTES / 2) >> LOG_BYTES_IN_PAGE as usize; + let empty_vec = vec![]; + mmapper.ensure_mapped(FIXED_ADDRESS, pages, &empty_vec, &empty_vec); + + let start_chunk = ByteMapMmapper::address_to_mmap_chunks_down(FIXED_ADDRESS); + let end_chunk = ByteMapMmapper::address_to_mmap_chunks_up( + FIXED_ADDRESS + pages_to_bytes(pages), + ); + assert_eq!(end_chunk - start_chunk, 2); + for chunk in start_chunk..end_chunk { + assert_eq!(mmapper.mapped[chunk].load(Ordering::Relaxed), MAPPED); + } + }, + || { + memory::munmap(FIXED_ADDRESS, MAX_SIZE).unwrap(); + }, + ) + }) } #[test] fn protect() { - // map 2 chunks - let mmapper = ByteMapMmapper::new(); - let pages_per_chunk = MMAP_CHUNK_BYTES >> LOG_BYTES_IN_PAGE as usize; - let empty_vec = vec![]; - mmapper.ensure_mapped(FIXED_ADDRESS, pages_per_chunk * 2, &empty_vec, &empty_vec); - - // protect 1 chunk - mmapper.protect(FIXED_ADDRESS, pages_per_chunk); - - let chunk = ByteMapMmapper::address_to_mmap_chunks_down(FIXED_ADDRESS); - assert_eq!(mmapper.mapped[chunk].load(Ordering::Relaxed), PROTECTED); - assert_eq!(mmapper.mapped[chunk + 1].load(Ordering::Relaxed), MAPPED); + serial_test(|| { + with_cleanup( + || { + // map 2 chunks + let mmapper = ByteMapMmapper::new(); + let pages_per_chunk = MMAP_CHUNK_BYTES >> LOG_BYTES_IN_PAGE as usize; + let empty_vec = vec![]; + mmapper.ensure_mapped( + FIXED_ADDRESS, + pages_per_chunk * 2, + &empty_vec, + &empty_vec, + ); + + // protect 1 chunk + mmapper.protect(FIXED_ADDRESS, pages_per_chunk); + + let chunk = ByteMapMmapper::address_to_mmap_chunks_down(FIXED_ADDRESS); + assert_eq!(mmapper.mapped[chunk].load(Ordering::Relaxed), PROTECTED); + assert_eq!(mmapper.mapped[chunk + 1].load(Ordering::Relaxed), MAPPED); + }, + || { + memory::munmap(FIXED_ADDRESS, MAX_SIZE).unwrap(); + }, + ) + }) } #[test] fn ensure_mapped_on_protected_chunks() { - // map 2 chunks - let mmapper = ByteMapMmapper::new(); - let pages_per_chunk = MMAP_CHUNK_BYTES >> LOG_BYTES_IN_PAGE as usize; - let empty_vec = vec![]; - mmapper.ensure_mapped(FIXED_ADDRESS, pages_per_chunk * 2, &empty_vec, &empty_vec); - - // protect 1 chunk - mmapper.protect(FIXED_ADDRESS, pages_per_chunk); - - let chunk = ByteMapMmapper::address_to_mmap_chunks_down(FIXED_ADDRESS); - assert_eq!(mmapper.mapped[chunk].load(Ordering::Relaxed), PROTECTED); - assert_eq!(mmapper.mapped[chunk + 1].load(Ordering::Relaxed), MAPPED); - - // ensure mapped - this will unprotect the previously protected chunk - mmapper.ensure_mapped(FIXED_ADDRESS, pages_per_chunk * 2, &empty_vec, &empty_vec); - assert_eq!(mmapper.mapped[chunk].load(Ordering::Relaxed), MAPPED); - assert_eq!(mmapper.mapped[chunk + 1].load(Ordering::Relaxed), MAPPED); + serial_test(|| { + with_cleanup( + || { + // map 2 chunks + let mmapper = ByteMapMmapper::new(); + let pages_per_chunk = MMAP_CHUNK_BYTES >> LOG_BYTES_IN_PAGE as usize; + let empty_vec = vec![]; + mmapper.ensure_mapped( + FIXED_ADDRESS, + pages_per_chunk * 2, + &empty_vec, + &empty_vec, + ); + + // protect 1 chunk + mmapper.protect(FIXED_ADDRESS, pages_per_chunk); + + let chunk = ByteMapMmapper::address_to_mmap_chunks_down(FIXED_ADDRESS); + assert_eq!(mmapper.mapped[chunk].load(Ordering::Relaxed), PROTECTED); + assert_eq!(mmapper.mapped[chunk + 1].load(Ordering::Relaxed), MAPPED); + + // ensure mapped - this will unprotect the previously protected chunk + mmapper.ensure_mapped( + FIXED_ADDRESS, + pages_per_chunk * 2, + &empty_vec, + &empty_vec, + ); + assert_eq!(mmapper.mapped[chunk].load(Ordering::Relaxed), MAPPED); + assert_eq!(mmapper.mapped[chunk + 1].load(Ordering::Relaxed), MAPPED); + }, + || { + memory::munmap(FIXED_ADDRESS, MAX_SIZE).unwrap(); + }, + ) + }) } } diff --git a/src/util/heap/layout/fragmented_mapper.rs b/src/util/heap/layout/fragmented_mapper.rs index 0c6b9be904..47a425e6a9 100644 --- a/src/util/heap/layout/fragmented_mapper.rs +++ b/src/util/heap/layout/fragmented_mapper.rs @@ -115,7 +115,8 @@ impl Mmapper for FragmentedMapper { match entry.load(Ordering::Relaxed) { MapState::Unmapped => { let mmap_start = Self::chunk_index_to_address(base, chunk); - crate::util::memory::dzmmap(mmap_start, MMAP_CHUNK_BYTES).unwrap(); + crate::util::memory::dzmmap_noreplace(mmap_start, MMAP_CHUNK_BYTES) + .unwrap(); self.map_metadata( mmap_start, global_metadata_spec_vec, @@ -332,10 +333,13 @@ impl Default for FragmentedMapper { mod tests { use super::*; use crate::util::constants::LOG_BYTES_IN_PAGE; - use crate::util::heap::layout::vm_layout_constants::{AVAILABLE_START, MMAP_CHUNK_BYTES}; + use crate::util::memory; + use crate::util::test_util::FRAGMENTED_MMAPPER_TEST_REGION; + use crate::util::test_util::{serial_test, with_cleanup}; use crate::util::{conversions, Address}; - const FIXED_ADDRESS: Address = AVAILABLE_START; + const FIXED_ADDRESS: Address = FRAGMENTED_MMAPPER_TEST_REGION.start; + const MAX_BYTES: usize = FRAGMENTED_MMAPPER_TEST_REGION.size; fn pages_to_chunks_up(pages: usize) -> usize { conversions::raw_align_up(pages, MMAP_CHUNK_BYTES) / MMAP_CHUNK_BYTES @@ -380,101 +384,170 @@ mod tests { #[test] fn ensure_mapped_1page() { - let mmapper = FragmentedMapper::new(); - let pages = 1; - let empty_vec = vec![]; - mmapper.ensure_mapped(FIXED_ADDRESS, pages, &empty_vec, &empty_vec); - - let chunks = pages_to_chunks_up(pages); - for i in 0..chunks { - assert_eq!( - get_chunk_map_state(&mmapper, FIXED_ADDRESS + (i << LOG_BYTES_IN_CHUNK)), - Some(MapState::Mapped) - ); - } + serial_test(|| { + let pages = 1; + with_cleanup( + || { + let mmapper = FragmentedMapper::new(); + let empty_vec = vec![]; + mmapper.ensure_mapped(FIXED_ADDRESS, pages, &empty_vec, &empty_vec); + + let chunks = pages_to_chunks_up(pages); + for i in 0..chunks { + assert_eq!( + get_chunk_map_state( + &mmapper, + FIXED_ADDRESS + (i << LOG_BYTES_IN_CHUNK) + ), + Some(MapState::Mapped) + ); + } + }, + || { + memory::munmap(FIXED_ADDRESS, MAX_BYTES).unwrap(); + }, + ) + }) } #[test] fn ensure_mapped_1chunk() { - let mmapper = FragmentedMapper::new(); - let pages = MMAP_CHUNK_BYTES >> LOG_BYTES_IN_PAGE as usize; - let empty_vec = vec![]; - mmapper.ensure_mapped(FIXED_ADDRESS, pages, &empty_vec, &empty_vec); - - let chunks = pages_to_chunks_up(pages); - for i in 0..chunks { - assert_eq!( - get_chunk_map_state(&mmapper, FIXED_ADDRESS + (i << LOG_BYTES_IN_CHUNK)), - Some(MapState::Mapped) - ); - } + serial_test(|| { + let pages = MMAP_CHUNK_BYTES >> LOG_BYTES_IN_PAGE as usize; + with_cleanup( + || { + let mmapper = FragmentedMapper::new(); + let empty_vec = vec![]; + mmapper.ensure_mapped(FIXED_ADDRESS, pages, &empty_vec, &empty_vec); + + let chunks = pages_to_chunks_up(pages); + for i in 0..chunks { + assert_eq!( + get_chunk_map_state( + &mmapper, + FIXED_ADDRESS + (i << LOG_BYTES_IN_CHUNK) + ), + Some(MapState::Mapped) + ); + } + }, + || { + memory::munmap(FIXED_ADDRESS, MAX_BYTES).unwrap(); + }, + ) + }) } #[test] fn ensure_mapped_more_than_1chunk() { - let mmapper = FragmentedMapper::new(); - let pages = (MMAP_CHUNK_BYTES + MMAP_CHUNK_BYTES / 2) >> LOG_BYTES_IN_PAGE as usize; - let empty_vec = vec![]; - mmapper.ensure_mapped(FIXED_ADDRESS, pages, &empty_vec, &empty_vec); - - let chunks = pages_to_chunks_up(pages); - for i in 0..chunks { - assert_eq!( - get_chunk_map_state(&mmapper, FIXED_ADDRESS + (i << LOG_BYTES_IN_CHUNK)), - Some(MapState::Mapped) - ); - } + serial_test(|| { + let pages = (MMAP_CHUNK_BYTES + MMAP_CHUNK_BYTES / 2) >> LOG_BYTES_IN_PAGE as usize; + with_cleanup( + || { + let mmapper = FragmentedMapper::new(); + let empty_vec = vec![]; + mmapper.ensure_mapped(FIXED_ADDRESS, pages, &empty_vec, &empty_vec); + + let chunks = pages_to_chunks_up(pages); + for i in 0..chunks { + assert_eq!( + get_chunk_map_state( + &mmapper, + FIXED_ADDRESS + (i << LOG_BYTES_IN_CHUNK) + ), + Some(MapState::Mapped) + ); + } + }, + || { + memory::munmap(FIXED_ADDRESS, MAX_BYTES).unwrap(); + }, + ) + }) } #[test] fn protect() { - // map 2 chunks - let mmapper = FragmentedMapper::new(); - let pages_per_chunk = MMAP_CHUNK_BYTES >> LOG_BYTES_IN_PAGE as usize; - let empty_vec = vec![]; - mmapper.ensure_mapped(FIXED_ADDRESS, pages_per_chunk * 2, &empty_vec, &empty_vec); - - // protect 1 chunk - mmapper.protect(FIXED_ADDRESS, pages_per_chunk); - - assert_eq!( - get_chunk_map_state(&mmapper, FIXED_ADDRESS), - Some(MapState::Protected) - ); - assert_eq!( - get_chunk_map_state(&mmapper, FIXED_ADDRESS + MMAP_CHUNK_BYTES), - Some(MapState::Mapped) - ); + serial_test(|| { + with_cleanup( + || { + // map 2 chunks + let mmapper = FragmentedMapper::new(); + let pages_per_chunk = MMAP_CHUNK_BYTES >> LOG_BYTES_IN_PAGE as usize; + let empty_vec = vec![]; + mmapper.ensure_mapped( + FIXED_ADDRESS, + pages_per_chunk * 2, + &empty_vec, + &empty_vec, + ); + + // protect 1 chunk + mmapper.protect(FIXED_ADDRESS, pages_per_chunk); + + assert_eq!( + get_chunk_map_state(&mmapper, FIXED_ADDRESS), + Some(MapState::Protected) + ); + assert_eq!( + get_chunk_map_state(&mmapper, FIXED_ADDRESS + MMAP_CHUNK_BYTES), + Some(MapState::Mapped) + ); + }, + || { + memory::munmap(FIXED_ADDRESS, MAX_BYTES).unwrap(); + }, + ) + }) } #[test] fn ensure_mapped_on_protected_chunks() { - // map 2 chunks - let mmapper = FragmentedMapper::new(); - let pages_per_chunk = MMAP_CHUNK_BYTES >> LOG_BYTES_IN_PAGE as usize; - let empty_vec = vec![]; - mmapper.ensure_mapped(FIXED_ADDRESS, pages_per_chunk * 2, &empty_vec, &empty_vec); - - // protect 1 chunk - mmapper.protect(FIXED_ADDRESS, pages_per_chunk); - - assert_eq!( - get_chunk_map_state(&mmapper, FIXED_ADDRESS), - Some(MapState::Protected) - ); - assert_eq!( - get_chunk_map_state(&mmapper, FIXED_ADDRESS + MMAP_CHUNK_BYTES), - Some(MapState::Mapped) - ); - - // ensure mapped - this will unprotect the previously protected chunk - mmapper.ensure_mapped(FIXED_ADDRESS, pages_per_chunk * 2, &empty_vec, &empty_vec); - assert_eq!( - get_chunk_map_state(&mmapper, FIXED_ADDRESS), - Some(MapState::Mapped) - ); - assert_eq!( - get_chunk_map_state(&mmapper, FIXED_ADDRESS + MMAP_CHUNK_BYTES), - Some(MapState::Mapped) - ); + serial_test(|| { + with_cleanup( + || { + // map 2 chunks + let mmapper = FragmentedMapper::new(); + let pages_per_chunk = MMAP_CHUNK_BYTES >> LOG_BYTES_IN_PAGE as usize; + let empty_vec = vec![]; + mmapper.ensure_mapped( + FIXED_ADDRESS, + pages_per_chunk * 2, + &empty_vec, + &empty_vec, + ); + + // protect 1 chunk + mmapper.protect(FIXED_ADDRESS, pages_per_chunk); + + assert_eq!( + get_chunk_map_state(&mmapper, FIXED_ADDRESS), + Some(MapState::Protected) + ); + assert_eq!( + get_chunk_map_state(&mmapper, FIXED_ADDRESS + MMAP_CHUNK_BYTES), + Some(MapState::Mapped) + ); + + // ensure mapped - this will unprotect the previously protected chunk + mmapper.ensure_mapped( + FIXED_ADDRESS, + pages_per_chunk * 2, + &empty_vec, + &empty_vec, + ); + assert_eq!( + get_chunk_map_state(&mmapper, FIXED_ADDRESS), + Some(MapState::Mapped) + ); + assert_eq!( + get_chunk_map_state(&mmapper, FIXED_ADDRESS + MMAP_CHUNK_BYTES), + Some(MapState::Mapped) + ); + }, + || { + memory::munmap(FIXED_ADDRESS, MAX_BYTES).unwrap(); + }, + ) + }) } } diff --git a/src/util/memory.rs b/src/util/memory.rs index ac30e184cf..079a24084a 100644 --- a/src/util/memory.rs +++ b/src/util/memory.rs @@ -1,6 +1,6 @@ use crate::util::Address; -use libc::{c_void, PROT_EXEC, PROT_NONE, PROT_READ, PROT_WRITE}; -use std::io::{Error, ErrorKind, Result}; +use libc::{PROT_EXEC, PROT_NONE, PROT_READ, PROT_WRITE}; +use std::io::Result; pub fn result_is_mapped(result: Result<()>) -> bool { match result { @@ -10,131 +10,269 @@ pub fn result_is_mapped(result: Result<()>) -> bool { } pub fn zero(start: Address, len: usize) { - unsafe { - libc::memset(start.to_mut_ptr() as *mut libc::c_void, 0, len); - } + let ptr = start.to_mut_ptr(); + wrap_libc_call(&|| unsafe { libc::memset(ptr, 0, len) }, ptr).unwrap() } /// Demand-zero mmap: -/// This function guarantees to zero all mapped memory. -pub fn dzmmap(start: Address, size: usize) -> Result<()> { - let prot = libc::PROT_READ | libc::PROT_WRITE | libc::PROT_EXEC; +/// This function mmaps the memory and guarantees to zero all mapped memory. +/// This function WILL overwrite existing memory mapping. The user of this function +/// needs to be aware of this, and use it cautiously. +/// +/// # Safety +/// This function WILL overwrite existing memory mapping if there is any. So only use this function if you know +/// the memory has been reserved by mmtk (e.g. after the use of mmap_noreserve()). Otherwise using this function +/// may corrupt others' data. +#[allow(clippy::let_and_return)] // Zeroing is not neceesary for some OS/s +pub unsafe fn dzmmap(start: Address, size: usize) -> Result<()> { + let prot = PROT_READ | PROT_WRITE | PROT_EXEC; let flags = libc::MAP_ANON | libc::MAP_PRIVATE | libc::MAP_FIXED; - let result: *mut c_void = unsafe { libc::mmap(start.to_mut_ptr(), size, prot, flags, -1, 0) }; - let addr = Address::from_mut_ptr(result); - if addr == start { - // On linux, we don't need to zero the memory. This is achieved by using the `MAP_ANON` mmap flag. - #[cfg(not(target_os = "linux"))] - { - zero(addr, size); - } - Ok(()) - } else { - // assert!(result as usize <= 127, - // "mmap with MAP_FIXED has unexpected behavior: demand zero mmap with MAP_FIXED on {:?} returned some other address {:?}", - // start, result - // ); - Err(Error::from_raw_os_error( - unsafe { *libc::__errno_location() } as _, - )) + let ret = mmap_fixed(start, size, prot, flags); + // We do not need to explicitly zero for Linux (memory is guaranteed to be zeroed) + #[cfg(not(target_os = "linux"))] + if ret.is_ok() { + zero(start, size) } + ret } -/// Demand-zero mmap: -/// This function guarantees to zero all mapped memory. -/// FIXME - this function should replace dzmmap. -/// Currently, the replacement causes some of the concurrent tests to fail +/// Demand-zero mmap (no replace): +/// This function mmaps the memory and guarantees to zero all mapped memory. +/// This function will not overwrite existing memory mapping, and it will result Err if there is an existing mapping. +#[allow(clippy::let_and_return)] // Zeroing is not neceesary for some OS/s pub fn dzmmap_noreplace(start: Address, size: usize) -> Result<()> { - let prot = libc::PROT_READ | libc::PROT_WRITE | libc::PROT_EXEC; + let prot = PROT_READ | PROT_WRITE | PROT_EXEC; let flags = libc::MAP_ANON | libc::MAP_PRIVATE | libc::MAP_FIXED_NOREPLACE; - let result: *mut c_void = unsafe { libc::mmap(start.to_mut_ptr(), size, prot, flags, -1, 0) }; - let addr = Address::from_mut_ptr(result); - if addr == start { - // On linux, we don't need to zero the memory. This is achieved by using the `MAP_ANON` mmap flag. - #[cfg(not(target_os = "linux"))] - { - zero(addr, size); + let ret = mmap_fixed(start, size, prot, flags); + // We do not need to explicitly zero for Linux (memory is guaranteed to be zeroed) + #[cfg(not(target_os = "linux"))] + if ret.is_ok() { + zero(start, size) + } + ret +} + +/// mmap with no swap space reserve: +/// This function does not reserve swap space for this mapping, which means there is no guarantee that writes to the +/// mapping can always be successful. In case of out of physical memory, one may get a segfault for writing to the mapping. +/// We can use this to reserve the address range, and then later overwrites the mapping with dzmmap(). +pub fn mmap_noreserve(start: Address, size: usize) -> Result<()> { + let prot = PROT_NONE; + let flags = + libc::MAP_ANON | libc::MAP_PRIVATE | libc::MAP_FIXED_NOREPLACE | libc::MAP_NORESERVE; + mmap_fixed(start, size, prot, flags) +} + +pub fn mmap_fixed( + start: Address, + size: usize, + prot: libc::c_int, + flags: libc::c_int, +) -> Result<()> { + let ptr = start.to_mut_ptr(); + wrap_libc_call( + &|| unsafe { libc::mmap(start.to_mut_ptr(), size, prot, flags, -1, 0) }, + ptr, + ) +} + +pub fn munmap(start: Address, size: usize) -> Result<()> { + wrap_libc_call(&|| unsafe { libc::munmap(start.to_mut_ptr(), size) }, 0) +} + +/// Checks if the memory has already been mapped. If not, we panic. +// Note that the checking has a side effect that it will map the memory if it was unmapped. So we panic if it was unmapped. +// Be very careful about using this function. +pub fn panic_if_unmapped(start: Address, size: usize) { + let prot = PROT_READ | PROT_WRITE; + // MAP_FIXED_NOREPLACE returns EEXIST if already mapped + let flags = libc::MAP_ANON | libc::MAP_PRIVATE | libc::MAP_FIXED_NOREPLACE; + match mmap_fixed(start, size, prot, flags) { + Ok(_) => panic!("{} of size {} is not mapped", start, size), + Err(e) => { + assert!( + e.kind() == std::io::ErrorKind::AlreadyExists, + "Failed to check mapped: {:?}", + e + ); } - Ok(()) - } else { - // assert!(result as usize <= 127, - // "mmap with MAP_FIXED has unexpected behavior: demand zero mmap with MAP_FIXED on {:?} returned some other address {:?}", - // start, result - // ); - Err(Error::from_raw_os_error( - unsafe { *libc::__errno_location() } as _, - )) } } pub fn munprotect(start: Address, size: usize) -> Result<()> { - let result = - unsafe { libc::mprotect(start.to_mut_ptr(), size, PROT_READ | PROT_WRITE | PROT_EXEC) }; - if result == 0 { - Ok(()) - } else { - Err(Error::from_raw_os_error(result)) - } + wrap_libc_call( + &|| unsafe { libc::mprotect(start.to_mut_ptr(), size, PROT_READ | PROT_WRITE | PROT_EXEC) }, + 0, + ) } pub fn mprotect(start: Address, size: usize) -> Result<()> { - let result = unsafe { libc::mprotect(start.to_mut_ptr(), size, PROT_NONE) }; - if result == 0 { + wrap_libc_call( + &|| unsafe { libc::mprotect(start.to_mut_ptr(), size, PROT_NONE) }, + 0, + ) +} + +fn wrap_libc_call(f: &dyn Fn() -> T, expect: T) -> Result<()> { + let ret = f(); + if ret == expect { Ok(()) } else { - Err(Error::from_raw_os_error(result)) + Err(std::io::Error::last_os_error()) } } -/// mmap with no swap space reserve: -/// This function only maps the address range, but doesn't occupy any physical memory. -/// -/// Before using any part of the address range, dzmmap must be called. -/// -pub fn mmap_noreserve(start: Address, size: usize) -> Result<()> { - let prot = libc::PROT_READ | libc::PROT_WRITE; - // MAP_FIXED_NOREPLACE returns EEXIST if already mapped - let flags = - libc::MAP_ANON | libc::MAP_PRIVATE | libc::MAP_FIXED_NOREPLACE | libc::MAP_NORESERVE; +#[cfg(test)] +mod tests { + use super::*; + use crate::util::constants::BYTES_IN_PAGE; + use crate::util::test_util::MEMORY_TEST_REGION; + use crate::util::test_util::{serial_test, with_cleanup}; - let result: *mut libc::c_void = - unsafe { libc::mmap(start.to_mut_ptr(), size, prot, flags, -1, 0) }; + // In the tests, we will mmap this address. This address should not be in our heap (in case we mess up with other tests) + const START: Address = MEMORY_TEST_REGION.start; - if result == libc::MAP_FAILED { - let err = unsafe { *libc::__errno_location() }; - Err(Error::from_raw_os_error(err as _)) - } else { - Ok(()) + #[test] + fn test_mmap() { + serial_test(|| { + with_cleanup( + || { + let res = unsafe { dzmmap(START, BYTES_IN_PAGE) }; + assert!(res.is_ok()); + // We can overwrite with dzmmap + let res = unsafe { dzmmap(START, BYTES_IN_PAGE) }; + assert!(res.is_ok()); + }, + || { + assert!(munmap(START, BYTES_IN_PAGE).is_ok()); + }, + ); + }); } -} -pub fn try_munmap(start: Address, size: usize) -> Result<()> { - let result = unsafe { libc::munmap(start.to_mut_ptr(), size) }; - if result == -1 { - let err = unsafe { *libc::__errno_location() }; - Err(Error::from_raw_os_error(err as _)) - } else { - Ok(()) + #[test] + fn test_munmap() { + serial_test(|| { + with_cleanup( + || { + let res = dzmmap_noreplace(START, BYTES_IN_PAGE); + assert!(res.is_ok()); + let res = munmap(START, BYTES_IN_PAGE); + assert!(res.is_ok()); + }, + || { + assert!(munmap(START, BYTES_IN_PAGE).is_ok()); + }, + ) + }) } -} -// -pub fn check_is_mmapped(start: Address, size: usize) -> Result<()> { - let prot = libc::PROT_READ | libc::PROT_WRITE; - // MAP_FIXED_NOREPLACE returns EEXIST if already mapped - let flags = libc::MAP_ANON | libc::MAP_PRIVATE | libc::MAP_FIXED_NOREPLACE; + #[test] + fn test_mmap_noreplace() { + serial_test(|| { + with_cleanup( + || { + // Make sure we mmapped the memory + let res = unsafe { dzmmap(START, BYTES_IN_PAGE) }; + assert!(res.is_ok()); + // Use dzmmap_noreplace will fail + let res = dzmmap_noreplace(START, BYTES_IN_PAGE); + assert!(res.is_err()); + }, + || { + assert!(munmap(START, BYTES_IN_PAGE).is_ok()); + }, + ) + }); + } - let result: *mut libc::c_void = - unsafe { libc::mmap(start.to_mut_ptr(), size, prot, flags, -1, 0) }; + #[test] + fn test_mmap_noreserve() { + serial_test(|| { + with_cleanup( + || { + let res = mmap_noreserve(START, BYTES_IN_PAGE); + assert!(res.is_ok()); + // Try reserve it + let res = unsafe { dzmmap(START, BYTES_IN_PAGE) }; + assert!(res.is_ok()); + }, + || { + assert!(munmap(START, BYTES_IN_PAGE).is_ok()); + }, + ) + }) + } - if result != libc::MAP_FAILED { - return Err(Error::new(ErrorKind::InvalidInput, "NotMMapped")); + #[test] + #[should_panic] + fn test_check_is_mmapped_for_unmapped() { + serial_test(|| { + with_cleanup( + || { + // We expect this call to panic + panic_if_unmapped(START, BYTES_IN_PAGE); + }, + || { + assert!(munmap(START, BYTES_IN_PAGE).is_ok()); + }, + ) + }) } - let err = unsafe { *libc::__errno_location() }; - if err == libc::EEXIST { - Ok(()) - } else { - Err(Error::from_raw_os_error(err as _)) + #[test] + fn test_check_is_mmapped_for_mapped() { + serial_test(|| { + with_cleanup( + || { + assert!(dzmmap_noreplace(START, BYTES_IN_PAGE).is_ok()); + panic_if_unmapped(START, BYTES_IN_PAGE); + }, + || { + assert!(munmap(START, BYTES_IN_PAGE).is_ok()); + }, + ) + }) + } + + #[test] + #[should_panic] + fn test_check_is_mmapped_for_unmapped_next_to_mapped() { + serial_test(|| { + with_cleanup( + || { + // map 1 page from START + assert!(dzmmap_noreplace(START, BYTES_IN_PAGE).is_ok()); + + // check if the next page is mapped - which should panic + panic_if_unmapped(START + BYTES_IN_PAGE, BYTES_IN_PAGE); + }, + || { + assert!(munmap(START, BYTES_IN_PAGE * 2).is_ok()); + }, + ) + }) + } + + #[test] + #[should_panic] + // This is a bug we need to fix. We need to figure out a way to properly check if a piece of memory is mapped or not. + // Alternatively, we should remove the code that calls the function. + #[ignore] + fn test_check_is_mmapped_for_partial_mapped() { + serial_test(|| { + with_cleanup( + || { + // map 1 page from START + assert!(dzmmap_noreplace(START, BYTES_IN_PAGE).is_ok()); + + // check if the 2 pages from START are mapped. The second page is unmapped, so it should panic. + panic_if_unmapped(START, BYTES_IN_PAGE * 2); + }, + || { + assert!(munmap(START, BYTES_IN_PAGE * 2).is_ok()); + }, + ) + }) } } diff --git a/src/util/raw_memory_freelist.rs b/src/util/raw_memory_freelist.rs index c10465e6ba..b15292ec26 100644 --- a/src/util/raw_memory_freelist.rs +++ b/src/util/raw_memory_freelist.rs @@ -194,7 +194,7 @@ impl RawMemoryFreeList { } fn mmap(&self, start: Address, bytes: usize) { - if super::memory::dzmmap(start, bytes).is_err() { + if super::memory::dzmmap_noreplace(start, bytes).is_err() { panic!("Can't get more space with mmap()"); } } diff --git a/src/util/side_metadata/global.rs b/src/util/side_metadata/global.rs index e77af6645b..25e49b4371 100644 --- a/src/util/side_metadata/global.rs +++ b/src/util/side_metadata/global.rs @@ -206,7 +206,7 @@ pub fn ensure_metadata_is_mapped(metadata_spec: SideMetadataSpec, data_addr: Add data_addr, meta_start ); - assert!(memory::check_is_mmapped(meta_start, BYTES_IN_PAGE).is_ok()) + memory::panic_if_unmapped(meta_start, BYTES_IN_PAGE); } #[inline(always)] diff --git a/src/util/side_metadata/helpers.rs b/src/util/side_metadata/helpers.rs index c8392f1d48..32f2f25b07 100644 --- a/src/util/side_metadata/helpers.rs +++ b/src/util/side_metadata/helpers.rs @@ -33,7 +33,7 @@ pub(crate) fn address_to_contiguous_meta_address( pub(super) fn ensure_munmap_metadata(start: Address, size: usize) { trace!("ensure_munmap_metadata({}, 0x{:x})", start, size); - assert!(memory::try_munmap(start, size).is_ok()) + assert!(memory::munmap(start, size).is_ok()) } /// Unmaps a metadata space (`spec`) for the specified data address range (`start` and `size`) @@ -95,10 +95,12 @@ pub(super) fn try_mmap_metadata_address_range(start: Address, size: usize) -> Re } /// Tries to map the specified metadata space (`start` and `size`), including reservation of swap-space/physical memory. +/// This function should only be called if we have called try_mmap_metadata_address_range() first. pub(super) fn try_mmap_metadata(start: Address, size: usize) -> Result<()> { debug_assert!(size > 0 && size % BYTES_IN_PAGE == 0); - let res = memory::dzmmap(start, size); + // It is safe to call dzmmap here as we have reserved the address range. + let res = unsafe { memory::dzmmap(start, size) }; trace!("try_mmap_metadata({}, 0x{:x}) -> {:#?}", start, size, res); res } diff --git a/src/util/side_metadata/helpers_32.rs b/src/util/side_metadata/helpers_32.rs index 7c2cb675fb..64fc304580 100644 --- a/src/util/side_metadata/helpers_32.rs +++ b/src/util/side_metadata/helpers_32.rs @@ -91,7 +91,7 @@ pub(crate) const fn meta_bytes_per_chunk(log_min_obj_size: usize, log_num_of_bit pub fn ensure_munmap_metadata_chunk(start: Address, local_per_chunk: usize) { if local_per_chunk != 0 { let policy_meta_start = address_to_meta_chunk_addr(start); - assert!(memory::try_munmap(policy_meta_start, local_per_chunk).is_ok()) + assert!(memory::munmap(policy_meta_start, local_per_chunk).is_ok()) } } @@ -157,7 +157,8 @@ pub fn try_mmap_metadata_chunk( let policy_meta_start = address_to_meta_chunk_addr(start); if !no_reserve { - memory::dzmmap_noreplace(policy_meta_start, local_per_chunk) + // We have reserved the memory + unsafe { memory::dzmmap(policy_meta_start, local_per_chunk) } } else { memory::mmap_noreserve(policy_meta_start, local_per_chunk) } diff --git a/src/util/test_util.rs b/src/util/test_util.rs index 9bca19b3d8..b8022e8b3d 100644 --- a/src/util/test_util.rs +++ b/src/util/test_util.rs @@ -1,8 +1,40 @@ +use crate::util::address::{Address, ByteSize}; +use crate::util::heap::layout::vm_layout_constants::*; +use std::panic; use std::sync::mpsc; use std::sync::Mutex; use std::thread; use std::time::Duration; +// Sometimes we need to mmap for tests. We want to ensure that the mmapped addresses do not overlap +// for different tests, so we organize them here. + +pub(crate) struct MmapTestRegion { + pub start: Address, + pub size: ByteSize, +} +impl MmapTestRegion { + pub const fn reserve_before(prev: MmapTestRegion, size: ByteSize) -> MmapTestRegion { + Self::reserve_before_address(prev.start, size) + } + pub const fn reserve_before_address(addr: Address, size: ByteSize) -> MmapTestRegion { + MmapTestRegion { + start: addr.sub(size), + size, + } + } +} + +// util::heap::layout::fragmented_mmapper +pub(crate) const FRAGMENTED_MMAPPER_TEST_REGION: MmapTestRegion = + MmapTestRegion::reserve_before_address(HEAP_START, MMAP_CHUNK_BYTES * 2); +// util::heap::layout::byte_map_mmaper +pub(crate) const BYTE_MAP_MMAPPER_TEST_REGION: MmapTestRegion = + MmapTestRegion::reserve_before(FRAGMENTED_MMAPPER_TEST_REGION, MMAP_CHUNK_BYTES * 2); +// util::memory +pub(crate) const MEMORY_TEST_REGION: MmapTestRegion = + MmapTestRegion::reserve_before(BYTE_MAP_MMAPPER_TEST_REGION, MMAP_CHUNK_BYTES); + // https://github.com/rust-lang/rfcs/issues/2798#issuecomment-552949300 pub fn panic_after(millis: u64, f: F) -> T where @@ -24,6 +56,8 @@ where } lazy_static! { + // A global lock to make tests serial. + // If we do want more parallelism, we can allow each set of tests to have their own locks. But it seems unnecessary for now. static ref SERIAL_TEST_LOCK: Mutex<()> = Mutex::default(); } @@ -35,3 +69,16 @@ where let _lock = SERIAL_TEST_LOCK.lock(); f(); } + +// Always execute a cleanup closure no matter the test panics or not. +pub fn with_cleanup(test: T, cleanup: C) +where + T: FnOnce() + panic::UnwindSafe, + C: FnOnce(), +{ + let res = panic::catch_unwind(test); + cleanup(); + if let Err(e) = res { + panic::resume_unwind(e); + } +}