diff --git a/Cargo.toml b/Cargo.toml index 87e0823..cde88f9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -22,3 +22,6 @@ memory_addr = "0.3" memory_set = "0.3" page_table_entry = "0.5" page_table_multiarch = "0.5" + +npt_multiarch = { git = "https://github.com/arceos-org/npt_multiarch.git" } +aspace_generic = { git = "https://github.com/arceos-org/aspace_generic.git" } diff --git a/src/addr.rs b/src/addr.rs deleted file mode 100644 index 512821a..0000000 --- a/src/addr.rs +++ /dev/null @@ -1,30 +0,0 @@ -use memory_addr::{AddrRange, PhysAddr, VirtAddr, def_usize_addr, def_usize_addr_formatter}; - -/// Host virtual address. -pub type HostVirtAddr = VirtAddr; -/// Host physical address. -pub type HostPhysAddr = PhysAddr; - -def_usize_addr! { - /// Guest virtual address. - pub type GuestVirtAddr; - /// Guest physical address. - pub type GuestPhysAddr; -} - -def_usize_addr_formatter! { - GuestVirtAddr = "GVA:{}"; - GuestPhysAddr = "GPA:{}"; -} - -/// Guest virtual address range. -pub type GuestVirtAddrRange = AddrRange; -/// Guest physical address range. -pub type GuestPhysAddrRange = AddrRange; - -#[cfg(any(target_arch = "riscv32", target_arch = "riscv64"))] -impl page_table_multiarch::riscv::SvVirtAddr for GuestPhysAddr { - fn flush_tlb(_vaddr: Option) { - todo!() - } -} diff --git a/src/address_space/backend/alloc.rs b/src/address_space/backend/alloc.rs deleted file mode 100644 index eb01e11..0000000 --- a/src/address_space/backend/alloc.rs +++ /dev/null @@ -1,97 +0,0 @@ -use memory_addr::{PageIter4K, PhysAddr}; -use page_table_multiarch::{MappingFlags, PageSize, PagingHandler}; - -use super::Backend; -use crate::{GuestPhysAddr, npt::NestedPageTable as PageTable}; - -impl Backend { - /// Creates a new allocation mapping backend. - pub const fn new_alloc(populate: bool) -> Self { - Self::Alloc { - populate, - _phantom: core::marker::PhantomData, - } - } - - pub(crate) fn map_alloc( - &self, - start: GuestPhysAddr, - size: usize, - flags: MappingFlags, - pt: &mut PageTable, - populate: bool, - ) -> bool { - debug!( - "map_alloc: [{:#x}, {:#x}) {:?} (populate={})", - start, - start + size, - flags, - populate - ); - if populate { - // allocate all possible physical frames for populated mapping. - for addr in PageIter4K::new(start, start + size).unwrap() { - if H::alloc_frame() - .and_then(|frame| pt.map(addr, frame, PageSize::Size4K, flags).ok()) - .is_none() - { - return false; - } - } - true - } else { - // Map to a empty entry for on-demand mapping. - pt.map_region( - start, - |_va| PhysAddr::from(0), - size, - MappingFlags::empty(), - false, - false, - ) - .is_ok() - } - } - - pub(crate) fn unmap_alloc( - &self, - start: GuestPhysAddr, - size: usize, - pt: &mut PageTable, - _populate: bool, - ) -> bool { - debug!("unmap_alloc: [{:#x}, {:#x})", start, start + size); - for addr in PageIter4K::new(start, start + size).unwrap() { - if let Ok((frame, page_size, _)) = pt.unmap(addr) { - // Deallocate the physical frame if there is a mapping in the - // page table. - if page_size.is_huge() { - return false; - } - H::dealloc_frame(frame); - } else { - // It's fine if the page is not mapped. - } - } - true - } - - pub(crate) fn handle_page_fault_alloc( - &self, - vaddr: GuestPhysAddr, - orig_flags: MappingFlags, - pt: &mut PageTable, - populate: bool, - ) -> bool { - if populate { - false // Populated mappings should not trigger page faults. - } else { - // Allocate a physical frame lazily and map it to the fault address. - // `vaddr` does not need to be aligned. It will be automatically - // aligned during `pt.remap` regardless of the page size. - H::alloc_frame() - .and_then(|frame| pt.remap(vaddr, frame, orig_flags).ok()) - .is_some() - } - } -} diff --git a/src/address_space/backend/linear.rs b/src/address_space/backend/linear.rs deleted file mode 100644 index 8a3e6af..0000000 --- a/src/address_space/backend/linear.rs +++ /dev/null @@ -1,51 +0,0 @@ -use memory_addr::PhysAddr; -use page_table_multiarch::{MappingFlags, PagingHandler}; - -use super::Backend; -use crate::{GuestPhysAddr, npt::NestedPageTable as PageTable}; - -impl Backend { - /// Creates a new linear mapping backend. - pub const fn new_linear(pa_va_offset: usize) -> Self { - Self::Linear { pa_va_offset } - } - - pub(crate) fn map_linear( - &self, - start: GuestPhysAddr, - size: usize, - flags: MappingFlags, - pt: &mut PageTable, - pa_va_offset: usize, - ) -> bool { - let pa_start = PhysAddr::from(start.as_usize() - pa_va_offset); - debug!( - "map_linear: [{:#x}, {:#x}) -> [{:#x}, {:#x}) {:?}", - start, - start + size, - pa_start, - pa_start + size, - flags - ); - pt.map_region( - start, - |va| PhysAddr::from(va.as_usize() - pa_va_offset), - size, - flags, - false, - false, - ) - .is_ok() - } - - pub(crate) fn unmap_linear( - &self, - start: GuestPhysAddr, - size: usize, - pt: &mut PageTable, - _pa_va_offset: usize, - ) -> bool { - debug!("unmap_linear: [{:#x}, {:#x})", start, start + size); - pt.unmap_region(start, size, true).is_ok() - } -} diff --git a/src/address_space/backend/mod.rs b/src/address_space/backend/mod.rs deleted file mode 100644 index 8955d91..0000000 --- a/src/address_space/backend/mod.rs +++ /dev/null @@ -1,106 +0,0 @@ -//! Memory mapping backends. - -use memory_set::MappingBackend; -use page_table_multiarch::{MappingFlags, PagingHandler}; - -use crate::{GuestPhysAddr, npt::NestedPageTable as PageTable}; - -mod alloc; -mod linear; - -/// A unified enum type for different memory mapping backends. -/// -/// Currently, two backends are implemented: -/// -/// - **Linear**: used for linear mappings. The target physical frames are -/// contiguous and their addresses should be known when creating the mapping. -/// - **Allocation**: used in general, or for lazy mappings. The target physical -/// frames are obtained from the global allocator. -pub enum Backend { - /// Linear mapping backend. - /// - /// The offset between the virtual address and the physical address is - /// constant, which is specified by `pa_va_offset`. For example, the virtual - /// address `vaddr` is mapped to the physical address `vaddr - pa_va_offset`. - Linear { - /// `vaddr - paddr`. - pa_va_offset: usize, - }, - /// Allocation mapping backend. - /// - /// If `populate` is `true`, all physical frames are allocated when the - /// mapping is created, and no page faults are triggered during the memory - /// access. Otherwise, the physical frames are allocated on demand (by - /// handling page faults). - Alloc { - /// Whether to populate the physical frames when creating the mapping. - populate: bool, - /// A phantom data for the paging handler. - _phantom: core::marker::PhantomData, - }, -} - -impl Clone for Backend { - fn clone(&self) -> Self { - match *self { - Self::Linear { pa_va_offset } => Self::Linear { pa_va_offset }, - Self::Alloc { populate, .. } => Self::Alloc { - populate, - _phantom: core::marker::PhantomData, - }, - } - } -} - -impl MappingBackend for Backend { - type Addr = GuestPhysAddr; - type Flags = MappingFlags; - type PageTable = PageTable; - - fn map( - &self, - start: GuestPhysAddr, - size: usize, - flags: MappingFlags, - pt: &mut PageTable, - ) -> bool { - match *self { - Self::Linear { pa_va_offset } => self.map_linear(start, size, flags, pt, pa_va_offset), - Self::Alloc { populate, .. } => self.map_alloc(start, size, flags, pt, populate), - } - } - - fn unmap(&self, start: GuestPhysAddr, size: usize, pt: &mut PageTable) -> bool { - match *self { - Self::Linear { pa_va_offset } => self.unmap_linear(start, size, pt, pa_va_offset), - Self::Alloc { populate, .. } => self.unmap_alloc(start, size, pt, populate), - } - } - - fn protect( - &self, - _start: GuestPhysAddr, - _size: usize, - _new_flags: MappingFlags, - _page_table: &mut PageTable, - ) -> bool { - // a stub here - true - } -} - -impl Backend { - pub(crate) fn handle_page_fault( - &self, - vaddr: GuestPhysAddr, - orig_flags: MappingFlags, - page_table: &mut PageTable, - ) -> bool { - match *self { - Self::Linear { .. } => false, // Linear mappings should not trigger page faults. - Self::Alloc { populate, .. } => { - self.handle_page_fault_alloc(vaddr, orig_flags, page_table, populate) - } - } - } -} diff --git a/src/address_space/mod.rs b/src/address_space/mod.rs deleted file mode 100644 index 993de01..0000000 --- a/src/address_space/mod.rs +++ /dev/null @@ -1,263 +0,0 @@ -use alloc::vec::Vec; -use core::fmt; - -use axerrno::{AxError, AxResult, ax_err}; -use memory_addr::{MemoryAddr, PhysAddr, is_aligned_4k}; -use memory_set::{MemoryArea, MemorySet}; -use page_table_multiarch::PagingHandler; - -use crate::npt::NestedPageTable as PageTable; -use crate::{GuestPhysAddr, GuestPhysAddrRange, mapping_err_to_ax_err}; - -mod backend; - -pub use backend::Backend; -pub use page_table_entry::MappingFlags; - -/// The virtual memory address space. -pub struct AddrSpace { - va_range: GuestPhysAddrRange, - areas: MemorySet>, - pt: PageTable, -} - -impl AddrSpace { - /// Returns the address space base. - pub const fn base(&self) -> GuestPhysAddr { - self.va_range.start - } - - /// Returns the address space end. - pub const fn end(&self) -> GuestPhysAddr { - self.va_range.end - } - - /// Returns the address space size. - pub fn size(&self) -> usize { - self.va_range.size() - } - - /// Returns the reference to the inner page table. - pub const fn page_table(&self) -> &PageTable { - &self.pt - } - - /// Returns the root physical address of the inner page table. - pub const fn page_table_root(&self) -> PhysAddr { - self.pt.root_paddr() - } - - /// Checks if the address space contains the given address range. - pub fn contains_range(&self, start: GuestPhysAddr, size: usize) -> bool { - self.va_range - .contains_range(GuestPhysAddrRange::from_start_size(start, size)) - } - - /// Creates a new empty address space. - pub fn new_empty(base: GuestPhysAddr, size: usize) -> AxResult { - Ok(Self { - va_range: GuestPhysAddrRange::from_start_size(base, size), - areas: MemorySet::new(), - pt: PageTable::try_new().map_err(|_| AxError::NoMemory)?, - }) - } - - /// Add a new linear mapping. - /// - /// See [`Backend`] for more details about the mapping backends. - /// - /// The `flags` parameter indicates the mapping permissions and attributes. - pub fn map_linear( - &mut self, - start_vaddr: GuestPhysAddr, - start_paddr: PhysAddr, - size: usize, - flags: MappingFlags, - ) -> AxResult { - if !self.contains_range(start_vaddr, size) { - return ax_err!(InvalidInput, "address out of range"); - } - if !start_vaddr.is_aligned_4k() || !start_paddr.is_aligned_4k() || !is_aligned_4k(size) { - return ax_err!(InvalidInput, "address not aligned"); - } - - let offset = start_vaddr.as_usize() - start_paddr.as_usize(); - let area = MemoryArea::new(start_vaddr, size, flags, Backend::new_linear(offset)); - self.areas - .map(area, &mut self.pt, false) - .map_err(mapping_err_to_ax_err)?; - Ok(()) - } - - /// Add a new allocation mapping. - /// - /// See [`Backend`] for more details about the mapping backends. - /// - /// The `flags` parameter indicates the mapping permissions and attributes. - pub fn map_alloc( - &mut self, - start: GuestPhysAddr, - size: usize, - flags: MappingFlags, - populate: bool, - ) -> AxResult { - if !self.contains_range(start, size) { - return ax_err!( - InvalidInput, - alloc::format!("address [{:?}~{:?}] out of range", start, start + size).as_str() - ); - } - if !start.is_aligned_4k() || !is_aligned_4k(size) { - return ax_err!(InvalidInput, "address not aligned"); - } - - let area = MemoryArea::new(start, size, flags, Backend::new_alloc(populate)); - self.areas - .map(area, &mut self.pt, false) - .map_err(mapping_err_to_ax_err)?; - Ok(()) - } - - /// Removes mappings within the specified virtual address range. - pub fn unmap(&mut self, start: GuestPhysAddr, size: usize) -> AxResult { - if !self.contains_range(start, size) { - return ax_err!(InvalidInput, "address out of range"); - } - if !start.is_aligned_4k() || !is_aligned_4k(size) { - return ax_err!(InvalidInput, "address not aligned"); - } - - self.areas - .unmap(start, size, &mut self.pt) - .map_err(mapping_err_to_ax_err)?; - Ok(()) - } - - /// Removes all mappings in the address space. - pub fn clear(&mut self) { - self.areas.clear(&mut self.pt).unwrap(); - } - - /// Handles a page fault at the given address. - /// - /// `access_flags` indicates the access type that caused the page fault. - /// - /// Returns `true` if the page fault is handled successfully (not a real - /// fault). - pub fn handle_page_fault(&mut self, vaddr: GuestPhysAddr, access_flags: MappingFlags) -> bool { - if !self.va_range.contains(vaddr) { - return false; - } - if let Some(area) = self.areas.find(vaddr) { - let orig_flags = area.flags(); - if !orig_flags.contains(access_flags) { - return false; - } - area.backend() - .handle_page_fault(vaddr, orig_flags, &mut self.pt) - } else { - false - } - } - - /// Translates the given `VirtAddr` into `PhysAddr`. - /// - /// Returns `None` if the virtual address is out of range or not mapped. - pub fn translate(&self, vaddr: GuestPhysAddr) -> Option { - if !self.va_range.contains(vaddr) { - return None; - } - self.pt - .query(vaddr) - .map(|(phys_addr, _, _)| { - debug!("vaddr {:?} translate to {:?}", vaddr, phys_addr); - phys_addr - }) - .ok() - } - - /// Translate&Copy the given `VirtAddr` with LENGTH len to a mutable u8 Vec through page table. - /// - /// Returns `None` if the virtual address is out of range or not mapped. - pub fn translated_byte_buffer( - &self, - vaddr: GuestPhysAddr, - len: usize, - ) -> Option> { - if !self.va_range.contains(vaddr) { - return None; - } - if let Some(area) = self.areas.find(vaddr) { - if len > area.size() { - warn!( - "AddrSpace translated_byte_buffer len {:#x} exceeds area length {:#x}", - len, - area.size() - ); - return None; - } - - let mut start = vaddr; - let end = start + len; - - debug!( - "start {:?} end {:?} area size {:#x}", - start, - end, - area.size() - ); - - let mut v = Vec::new(); - while start < end { - let (start_paddr, _, page_size) = self.page_table().query(start).unwrap(); - let mut end_va = start.align_down(page_size) + page_size.into(); - end_va = end_va.min(end); - - v.push(unsafe { - core::slice::from_raw_parts_mut( - H::phys_to_virt(start_paddr).as_mut_ptr(), - (end_va - start.as_usize()).into(), - ) - }); - start = end_va; - } - Some(v) - } else { - None - } - } - - /// Translates the given `VirtAddr` into `PhysAddr`, - /// and returns the size of the `MemoryArea` corresponding to the target vaddr. - /// - /// Returns `None` if the virtual address is out of range or not mapped. - pub fn translate_and_get_limit(&self, vaddr: GuestPhysAddr) -> Option<(PhysAddr, usize)> { - if !self.va_range.contains(vaddr) { - return None; - } - if let Some(area) = self.areas.find(vaddr) { - self.pt - .query(vaddr) - .map(|(phys_addr, _, _)| (phys_addr, area.size())) - .ok() - } else { - None - } - } -} - -impl fmt::Debug for AddrSpace { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("AddrSpace") - .field("va_range", &self.va_range) - .field("page_table_root", &self.pt.root_paddr()) - .field("areas", &self.areas) - .finish() - } -} - -impl Drop for AddrSpace { - fn drop(&mut self) { - self.clear(); - } -} diff --git a/src/lib.rs b/src/lib.rs index 760ce5a..8c778d0 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -7,21 +7,17 @@ extern crate log; extern crate alloc; -mod addr; -mod address_space; pub mod device; mod frame; mod hal; -mod npt; -pub use addr::*; -pub use address_space::*; +pub use npt_multiarch::*; +pub use aspace_generic::*; pub use frame::PhysFrame; pub use hal::AxMmHal; -use axerrno::AxError; -use memory_set::MappingError; +pub type AddrSpace = aspace_generic::AddrSpace; /// Information about nested page faults. #[derive(Debug)] @@ -31,12 +27,3 @@ pub struct NestedPageFaultInfo { /// Guest physical address that caused the nested page fault. pub fault_guest_paddr: GuestPhysAddr, } - -fn mapping_err_to_ax_err(err: MappingError) -> AxError { - warn!("Mapping error: {:?}", err); - match err { - MappingError::InvalidParam => AxError::InvalidInput, - MappingError::AlreadyExists => AxError::AlreadyExists, - MappingError::BadState => AxError::BadState, - } -} diff --git a/src/npt/arch/aarch64.rs b/src/npt/arch/aarch64.rs deleted file mode 100644 index 9b8b521..0000000 --- a/src/npt/arch/aarch64.rs +++ /dev/null @@ -1,252 +0,0 @@ -use core::arch::asm; -use core::fmt; -use page_table_entry::{GenericPTE, MappingFlags}; -use page_table_multiarch::{PageTable64, PagingMetaData}; -// use memory_addr::HostPhysAddr; -use crate::{GuestPhysAddr, HostPhysAddr}; - -bitflags::bitflags! { - /// Memory attribute fields in the VMSAv8-64 translation table format descriptors. - #[derive(Debug)] - pub struct DescriptorAttr: u64 { - // Attribute fields in stage 1 VMSAv8-64 Block and Page descriptors: - - /// Whether the descriptor is valid. - const VALID = 1 << 0; - /// The descriptor gives the address of the next level of translation table or 4KB page. - /// (not a 2M, 1G block) - const NON_BLOCK = 1 << 1; - /// Memory attributes index field. - const ATTR = 0b1111 << 2; - /// Access permission: read-only. - const S2AP_RO = 1 << 6; - /// Access permission: write-only. - const S2AP_WO = 1 << 7; - /// Shareability: Inner Shareable (otherwise Outer Shareable). - const INNER = 1 << 8; - /// Shareability: Inner or Outer Shareable (otherwise Non-shareable). - const SHAREABLE = 1 << 9; - /// The Access flag. - const AF = 1 << 10; - /// The not global bit. - const NG = 1 << 11; - /// Indicates that 16 adjacent translation table entries point to contiguous memory regions. - const CONTIGUOUS = 1 << 52; - /// The Privileged execute-never field. - // const PXN = 1 << 53; - /// The Execute-never or Unprivileged execute-never field. - const XN = 1 << 54; - /// Non-secure bit. For memory accesses from Secure state, specifies whether the output - /// address is in Secure or Non-secure memory. - const NS = 1 << 55; - // Next-level attributes in stage 1 VMSAv8-64 Table descriptors: - - /// PXN limit for subsequent levels of lookup. - const PXN_TABLE = 1 << 59; - /// XN limit for subsequent levels of lookup. - const XN_TABLE = 1 << 60; - /// Access permissions limit for subsequent levels of lookup: access at EL0 not permitted. - const AP_NO_EL0_TABLE = 1 << 61; - /// Access permissions limit for subsequent levels of lookup: write access not permitted. - const AP_NO_WRITE_TABLE = 1 << 62; - /// For memory accesses from Secure state, specifies the Security state for subsequent - /// levels of lookup. - const NS_TABLE = 1 << 63; - } -} - -#[repr(u64)] -#[derive(Debug, Clone, Copy, Eq, PartialEq)] -enum MemType { - Device = 0, - Normal = 1, - NormalNonCache = 2, -} - -impl DescriptorAttr { - #[allow(clippy::unusual_byte_groupings)] - const ATTR_INDEX_MASK: u64 = 0b1111_00; - const PTE_S2_MEM_ATTR_NORMAL_INNER_WRITE_BACK_CACHEABLE: u64 = 0b11 << 2; - const PTE_S2_MEM_ATTR_NORMAL_OUTER_WRITE_BACK_CACHEABLE: u64 = 0b11 << 4; - const PTE_S2_MEM_ATTR_NORMAL_OUTER_WRITE_BACK_NOCACHEABLE: u64 = 0b1 << 4; - const NORMAL_BIT: u64 = Self::PTE_S2_MEM_ATTR_NORMAL_INNER_WRITE_BACK_CACHEABLE - | Self::PTE_S2_MEM_ATTR_NORMAL_OUTER_WRITE_BACK_CACHEABLE; - - const fn from_mem_type(mem_type: MemType) -> Self { - let bits = match mem_type { - MemType::Normal => Self::NORMAL_BIT | Self::SHAREABLE.bits(), - MemType::NormalNonCache => { - Self::PTE_S2_MEM_ATTR_NORMAL_INNER_WRITE_BACK_CACHEABLE - | Self::PTE_S2_MEM_ATTR_NORMAL_OUTER_WRITE_BACK_NOCACHEABLE - | Self::SHAREABLE.bits() - } - MemType::Device => Self::SHAREABLE.bits(), - }; - Self::from_bits_retain(bits) - } - - fn mem_type(&self) -> MemType { - let idx = self.bits() & Self::ATTR_INDEX_MASK; - match idx { - Self::NORMAL_BIT => MemType::Normal, - Self::PTE_S2_MEM_ATTR_NORMAL_OUTER_WRITE_BACK_NOCACHEABLE => MemType::NormalNonCache, - 0 => MemType::Device, - _ => panic!("Invalid memory attribute index"), - } - } -} - -impl From for MappingFlags { - fn from(attr: DescriptorAttr) -> Self { - let mut flags = Self::empty(); - if attr.contains(DescriptorAttr::VALID) { - flags |= Self::READ; - } - if !attr.contains(DescriptorAttr::S2AP_WO) { - flags |= Self::WRITE; - } - if !attr.contains(DescriptorAttr::XN) { - flags |= Self::EXECUTE; - } - if attr.mem_type() == MemType::Device { - flags |= Self::DEVICE; - } - flags - } -} - -impl From for DescriptorAttr { - fn from(flags: MappingFlags) -> Self { - let mut attr = if flags.contains(MappingFlags::DEVICE) { - if flags.contains(MappingFlags::UNCACHED) { - Self::from_mem_type(MemType::NormalNonCache) - } else { - Self::from_mem_type(MemType::Device) - } - } else { - Self::from_mem_type(MemType::Normal) - }; - if flags.contains(MappingFlags::READ) { - attr |= Self::VALID | Self::S2AP_RO; - } - if flags.contains(MappingFlags::WRITE) { - attr |= Self::S2AP_WO; - } - attr - } -} - -/// A VMSAv8-64 translation table descriptor. -/// -/// Note that the **AttrIndx\[2:0\]** (bit\[4:2\]) field is set to `0` for device -/// memory, and `1` for normal memory. The system must configure the MAIR_ELx -/// system register accordingly. -#[derive(Clone, Copy)] -#[repr(transparent)] -pub struct A64PTEHV(u64); - -impl A64PTEHV { - const PHYS_ADDR_MASK: u64 = 0x0000_ffff_ffff_f000; // bits 12..48 - - /// Creates an empty descriptor with all bits set to zero. - pub const fn empty() -> Self { - Self(0) - } -} - -impl GenericPTE for A64PTEHV { - fn bits(self) -> usize { - self.0 as usize - } - fn new_page(paddr: HostPhysAddr, flags: MappingFlags, is_huge: bool) -> Self { - let mut attr = DescriptorAttr::from(flags) | DescriptorAttr::AF; - if !is_huge { - attr |= DescriptorAttr::NON_BLOCK; - } - Self(attr.bits() | (paddr.as_usize() as u64 & Self::PHYS_ADDR_MASK)) - } - fn new_table(paddr: HostPhysAddr) -> Self { - let attr = DescriptorAttr::NON_BLOCK | DescriptorAttr::VALID; - Self(attr.bits() | (paddr.as_usize() as u64 & Self::PHYS_ADDR_MASK)) - } - fn paddr(&self) -> HostPhysAddr { - HostPhysAddr::from((self.0 & Self::PHYS_ADDR_MASK) as usize) - } - fn flags(&self) -> MappingFlags { - DescriptorAttr::from_bits_truncate(self.0).into() - } - fn set_paddr(&mut self, paddr: HostPhysAddr) { - self.0 = (self.0 & !Self::PHYS_ADDR_MASK) | (paddr.as_usize() as u64 & Self::PHYS_ADDR_MASK) - } - fn set_flags(&mut self, flags: MappingFlags, is_huge: bool) { - let mut attr = DescriptorAttr::from(flags) | DescriptorAttr::AF; - if !is_huge { - attr |= DescriptorAttr::NON_BLOCK; - } - self.0 = (self.0 & Self::PHYS_ADDR_MASK) | attr.bits(); - } - fn is_unused(&self) -> bool { - self.0 == 0 - } - fn is_present(&self) -> bool { - DescriptorAttr::from_bits_truncate(self.0).contains(DescriptorAttr::VALID) - } - fn is_huge(&self) -> bool { - !DescriptorAttr::from_bits_truncate(self.0).contains(DescriptorAttr::NON_BLOCK) - } - fn clear(&mut self) { - self.0 = 0 - } -} - -impl fmt::Debug for A64PTEHV { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - let mut f = f.debug_struct("A64PTE"); - f.field("raw", &self.0) - .field("paddr", &self.paddr()) - .field("attr", &DescriptorAttr::from_bits_truncate(self.0)) - .field("flags", &self.flags()) - .finish() - } -} - -/// Metadata of AArch64 hypervisor page tables (ipa to hpa). -#[derive(Copy, Clone)] -pub struct A64HVPagingMetaData; - -impl PagingMetaData for A64HVPagingMetaData { - const LEVELS: usize = 3; - // In Armv8.0-A, the maximum size for a physical address is 48 bits. - const PA_MAX_BITS: usize = 48; - // The size of the IPA space can be configured in the same way as the - const VA_MAX_BITS: usize = 40; // virtual address space. VTCR_EL2.T0SZ controls the size. - - type VirtAddr = GuestPhysAddr; - - fn flush_tlb(vaddr: Option) { - unsafe { - if let Some(vaddr) = vaddr { - #[cfg(not(feature = "arm-el2"))] - { - asm!("tlbi vaae1is, {}; dsb sy; isb", in(reg) vaddr.as_usize()) - } - #[cfg(feature = "arm-el2")] - { - asm!("tlbi vae2is, {}; dsb sy; isb", in(reg) vaddr.as_usize()) - } - } else { - // flush the entire TLB - #[cfg(not(feature = "arm-el2"))] - { - asm!("tlbi vmalle1; dsb sy; isb") - } - #[cfg(feature = "arm-el2")] - { - asm!("tlbi alle2is; dsb sy; isb") - } - } - } - } -} -/// According to rust shyper, AArch64 translation table. -pub type NestedPageTable = PageTable64; diff --git a/src/npt/arch/mod.rs b/src/npt/arch/mod.rs deleted file mode 100644 index c80a457..0000000 --- a/src/npt/arch/mod.rs +++ /dev/null @@ -1,14 +0,0 @@ -//! Architecture dependent structures. - -cfg_if::cfg_if! { - if #[cfg(target_arch = "x86_64")] { - mod x86_64; - pub use self::x86_64::*; - } else if #[cfg(target_arch = "aarch64")] { - mod aarch64; - pub use self::aarch64::*; - } else if #[cfg(any(target_arch = "riscv32", target_arch = "riscv64"))] { - mod riscv; - pub use self::riscv::*; - } -} diff --git a/src/npt/arch/riscv.rs b/src/npt/arch/riscv.rs deleted file mode 100644 index aa190e2..0000000 --- a/src/npt/arch/riscv.rs +++ /dev/null @@ -1,6 +0,0 @@ -use page_table_entry::riscv::Rv64PTE; -use page_table_multiarch::{PageTable64, riscv::Sv39MetaData}; - -use crate::GuestPhysAddr; - -pub type NestedPageTable = PageTable64, Rv64PTE, H>; diff --git a/src/npt/arch/x86_64.rs b/src/npt/arch/x86_64.rs deleted file mode 100644 index ae57a11..0000000 --- a/src/npt/arch/x86_64.rs +++ /dev/null @@ -1,183 +0,0 @@ -use core::{convert::TryFrom, fmt}; - -use bit_field::BitField; -use page_table_entry::{GenericPTE, MappingFlags}; -use page_table_multiarch::{PageTable64, PagingMetaData}; - -use crate::{GuestPhysAddr, HostPhysAddr}; - -bitflags::bitflags! { - /// EPT entry flags. (SDM Vol. 3C, Section 28.3.2) - struct EPTFlags: u64 { - /// Read access. - const READ = 1 << 0; - /// Write access. - const WRITE = 1 << 1; - /// Execute access. - const EXECUTE = 1 << 2; - /// EPT memory type. Only for terminate pages. - const MEM_TYPE_MASK = 0b111 << 3; - /// Ignore PAT memory type. Only for terminate pages. - const IGNORE_PAT = 1 << 6; - /// Specifies that the entry maps a huge frame instead of a page table. - /// Only allowed in P2 or P3 tables. - const HUGE_PAGE = 1 << 7; - /// If bit 6 of EPTP is 1, accessed flag for EPT. - const ACCESSED = 1 << 8; - /// If bit 6 of EPTP is 1, dirty flag for EPT. - const DIRTY = 1 << 9; - /// Execute access for user-mode linear addresses. - const EXECUTE_FOR_USER = 1 << 10; - } -} - -numeric_enum_macro::numeric_enum! { - #[repr(u8)] - #[derive(Debug, PartialEq, Clone, Copy)] - /// EPT memory typing. (SDM Vol. 3C, Section 28.3.7) - enum EPTMemType { - Uncached = 0, - WriteCombining = 1, - WriteThrough = 4, - WriteProtected = 5, - WriteBack = 6, - } -} - -impl EPTFlags { - fn set_mem_type(&mut self, mem_type: EPTMemType) { - let mut bits = self.bits(); - bits.set_bits(3..6, mem_type as u64); - *self = Self::from_bits_truncate(bits) - } - fn mem_type(&self) -> Result { - EPTMemType::try_from(self.bits().get_bits(3..6) as u8) - } -} - -impl From for EPTFlags { - fn from(f: MappingFlags) -> Self { - if f.is_empty() { - return Self::empty(); - } - let mut ret = Self::empty(); - if f.contains(MappingFlags::READ) { - ret |= Self::READ; - } - if f.contains(MappingFlags::WRITE) { - ret |= Self::WRITE; - } - if f.contains(MappingFlags::EXECUTE) { - ret |= Self::EXECUTE; - } - if !f.contains(MappingFlags::DEVICE) { - ret.set_mem_type(EPTMemType::WriteBack); - } - ret - } -} - -impl From for MappingFlags { - fn from(f: EPTFlags) -> Self { - let mut ret = MappingFlags::empty(); - if f.contains(EPTFlags::READ) { - ret |= Self::READ; - } - if f.contains(EPTFlags::WRITE) { - ret |= Self::WRITE; - } - if f.contains(EPTFlags::EXECUTE) { - ret |= Self::EXECUTE; - } - if let Ok(EPTMemType::Uncached) = f.mem_type() { - ret |= Self::DEVICE; - } - ret - } -} - -/// An x86_64 VMX extented page table entry. -/// Note: The [EPTEntry] can be moved to the independent crate `page_table_entry`. -#[derive(Clone, Copy)] -#[repr(transparent)] -pub struct EPTEntry(u64); - -impl EPTEntry { - const PHYS_ADDR_MASK: u64 = 0x000f_ffff_ffff_f000; // bits 12..52 -} - -impl GenericPTE for EPTEntry { - fn new_page(paddr: HostPhysAddr, flags: MappingFlags, is_huge: bool) -> Self { - let mut flags = EPTFlags::from(flags); - if is_huge { - flags |= EPTFlags::HUGE_PAGE; - } - Self(flags.bits() | (paddr.as_usize() as u64 & Self::PHYS_ADDR_MASK)) - } - fn new_table(paddr: HostPhysAddr) -> Self { - let flags = EPTFlags::READ | EPTFlags::WRITE | EPTFlags::EXECUTE; - Self(flags.bits() | (paddr.as_usize() as u64 & Self::PHYS_ADDR_MASK)) - } - fn paddr(&self) -> HostPhysAddr { - HostPhysAddr::from((self.0 & Self::PHYS_ADDR_MASK) as usize) - } - fn flags(&self) -> MappingFlags { - EPTFlags::from_bits_truncate(self.0).into() - } - fn set_paddr(&mut self, paddr: HostPhysAddr) { - self.0 = (self.0 & !Self::PHYS_ADDR_MASK) | (paddr.as_usize() as u64 & Self::PHYS_ADDR_MASK) - } - - fn set_flags(&mut self, flags: MappingFlags, is_huge: bool) { - let mut flags = EPTFlags::from(flags); - if is_huge { - flags |= EPTFlags::HUGE_PAGE; - } - self.0 = (self.0 & Self::PHYS_ADDR_MASK) | flags.bits() - } - fn is_unused(&self) -> bool { - self.0 == 0 - } - fn is_present(&self) -> bool { - self.0 & 0x7 != 0 // RWX != 0 - } - fn is_huge(&self) -> bool { - EPTFlags::from_bits_truncate(self.0).contains(EPTFlags::HUGE_PAGE) - } - fn clear(&mut self) { - self.0 = 0 - } - - fn bits(self) -> usize { - self.0 as usize - } -} - -impl fmt::Debug for EPTEntry { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("EPTEntry") - .field("raw", &self.0) - .field("hpaddr", &self.paddr()) - .field("flags", &self.flags()) - .field("mem_type", &EPTFlags::from_bits_truncate(self.0).mem_type()) - .finish() - } -} - -/// Metadata of VMX extended page tables. -pub struct ExtendedPageTableMetadata; - -impl PagingMetaData for ExtendedPageTableMetadata { - const LEVELS: usize = 4; - const PA_MAX_BITS: usize = 52; - const VA_MAX_BITS: usize = 48; - - type VirtAddr = GuestPhysAddr; - - fn flush_tlb(_vaddr: Option) { - todo!() - } -} - -/// The VMX extended page table. (SDM Vol. 3C, Section 29.3) -pub type ExtendedPageTable = PageTable64; diff --git a/src/npt/mod.rs b/src/npt/mod.rs deleted file mode 100644 index 14d7d2d..0000000 --- a/src/npt/mod.rs +++ /dev/null @@ -1,14 +0,0 @@ -cfg_if::cfg_if! { - if #[cfg(target_arch = "x86_64")] { - /// The architecture-specific nested page table for two-stage address translation. - pub type NestedPageTable = arch::ExtendedPageTable; - } else if #[cfg(any(target_arch = "riscv32", target_arch = "riscv64"))] { - /// The architecture-specific page table. - pub type NestedPageTable = arch::NestedPageTable; - } else if #[cfg(target_arch = "aarch64")]{ - /// The architecture-specific nested page table for two-stage address translation. - pub type NestedPageTable = arch::NestedPageTable; - } -} - -mod arch;