From 943238849898384d9d02db6ffca8200846ccdf42 Mon Sep 17 00:00:00 2001 From: Shi Lei Date: Sun, 13 Apr 2025 10:29:27 +0000 Subject: [PATCH 1/3] Remove 'npt' and use arceos-org/npt_multiarch instead. --- Cargo.toml | 2 + src/addr.rs | 30 ---- src/address_space/backend/alloc.rs | 2 +- src/address_space/backend/linear.rs | 2 +- src/address_space/backend/mod.rs | 2 +- src/address_space/mod.rs | 2 +- src/lib.rs | 4 +- src/npt/arch/aarch64.rs | 252 ---------------------------- src/npt/arch/mod.rs | 14 -- src/npt/arch/riscv.rs | 6 - src/npt/arch/x86_64.rs | 183 -------------------- src/npt/mod.rs | 14 -- 12 files changed, 7 insertions(+), 506 deletions(-) delete mode 100644 src/addr.rs delete mode 100644 src/npt/arch/aarch64.rs delete mode 100644 src/npt/arch/mod.rs delete mode 100644 src/npt/arch/riscv.rs delete mode 100644 src/npt/arch/x86_64.rs delete mode 100644 src/npt/mod.rs diff --git a/Cargo.toml b/Cargo.toml index 87e0823..9e7da22 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -22,3 +22,5 @@ memory_addr = "0.3" memory_set = "0.3" page_table_entry = "0.5" page_table_multiarch = "0.5" + +npt_multiarch = { git = "https://github.com/arceos-org/npt_multiarch.git" } diff --git a/src/addr.rs b/src/addr.rs deleted file mode 100644 index 512821a..0000000 --- a/src/addr.rs +++ /dev/null @@ -1,30 +0,0 @@ -use memory_addr::{AddrRange, PhysAddr, VirtAddr, def_usize_addr, def_usize_addr_formatter}; - -/// Host virtual address. -pub type HostVirtAddr = VirtAddr; -/// Host physical address. -pub type HostPhysAddr = PhysAddr; - -def_usize_addr! { - /// Guest virtual address. - pub type GuestVirtAddr; - /// Guest physical address. - pub type GuestPhysAddr; -} - -def_usize_addr_formatter! { - GuestVirtAddr = "GVA:{}"; - GuestPhysAddr = "GPA:{}"; -} - -/// Guest virtual address range. -pub type GuestVirtAddrRange = AddrRange; -/// Guest physical address range. -pub type GuestPhysAddrRange = AddrRange; - -#[cfg(any(target_arch = "riscv32", target_arch = "riscv64"))] -impl page_table_multiarch::riscv::SvVirtAddr for GuestPhysAddr { - fn flush_tlb(_vaddr: Option) { - todo!() - } -} diff --git a/src/address_space/backend/alloc.rs b/src/address_space/backend/alloc.rs index eb01e11..f6d3f3a 100644 --- a/src/address_space/backend/alloc.rs +++ b/src/address_space/backend/alloc.rs @@ -2,7 +2,7 @@ use memory_addr::{PageIter4K, PhysAddr}; use page_table_multiarch::{MappingFlags, PageSize, PagingHandler}; use super::Backend; -use crate::{GuestPhysAddr, npt::NestedPageTable as PageTable}; +use npt_multiarch::{GuestPhysAddr, NestedPageTable as PageTable}; impl Backend { /// Creates a new allocation mapping backend. diff --git a/src/address_space/backend/linear.rs b/src/address_space/backend/linear.rs index 8a3e6af..890824b 100644 --- a/src/address_space/backend/linear.rs +++ b/src/address_space/backend/linear.rs @@ -2,7 +2,7 @@ use memory_addr::PhysAddr; use page_table_multiarch::{MappingFlags, PagingHandler}; use super::Backend; -use crate::{GuestPhysAddr, npt::NestedPageTable as PageTable}; +use npt_multiarch::{GuestPhysAddr, NestedPageTable as PageTable}; impl Backend { /// Creates a new linear mapping backend. diff --git a/src/address_space/backend/mod.rs b/src/address_space/backend/mod.rs index 8955d91..70b076c 100644 --- a/src/address_space/backend/mod.rs +++ b/src/address_space/backend/mod.rs @@ -3,7 +3,7 @@ use memory_set::MappingBackend; use page_table_multiarch::{MappingFlags, PagingHandler}; -use crate::{GuestPhysAddr, npt::NestedPageTable as PageTable}; +use npt_multiarch::{GuestPhysAddr, NestedPageTable as PageTable}; mod alloc; mod linear; diff --git a/src/address_space/mod.rs b/src/address_space/mod.rs index 993de01..890053c 100644 --- a/src/address_space/mod.rs +++ b/src/address_space/mod.rs @@ -6,7 +6,7 @@ use memory_addr::{MemoryAddr, PhysAddr, is_aligned_4k}; use memory_set::{MemoryArea, MemorySet}; use page_table_multiarch::PagingHandler; -use crate::npt::NestedPageTable as PageTable; +use npt_multiarch::NestedPageTable as PageTable; use crate::{GuestPhysAddr, GuestPhysAddrRange, mapping_err_to_ax_err}; mod backend; diff --git a/src/lib.rs b/src/lib.rs index 760ce5a..a5a7f10 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -7,14 +7,12 @@ extern crate log; extern crate alloc; -mod addr; mod address_space; pub mod device; mod frame; mod hal; -mod npt; -pub use addr::*; +pub use npt_multiarch::*; pub use address_space::*; pub use frame::PhysFrame; diff --git a/src/npt/arch/aarch64.rs b/src/npt/arch/aarch64.rs deleted file mode 100644 index 9b8b521..0000000 --- a/src/npt/arch/aarch64.rs +++ /dev/null @@ -1,252 +0,0 @@ -use core::arch::asm; -use core::fmt; -use page_table_entry::{GenericPTE, MappingFlags}; -use page_table_multiarch::{PageTable64, PagingMetaData}; -// use memory_addr::HostPhysAddr; -use crate::{GuestPhysAddr, HostPhysAddr}; - -bitflags::bitflags! { - /// Memory attribute fields in the VMSAv8-64 translation table format descriptors. - #[derive(Debug)] - pub struct DescriptorAttr: u64 { - // Attribute fields in stage 1 VMSAv8-64 Block and Page descriptors: - - /// Whether the descriptor is valid. - const VALID = 1 << 0; - /// The descriptor gives the address of the next level of translation table or 4KB page. - /// (not a 2M, 1G block) - const NON_BLOCK = 1 << 1; - /// Memory attributes index field. - const ATTR = 0b1111 << 2; - /// Access permission: read-only. - const S2AP_RO = 1 << 6; - /// Access permission: write-only. - const S2AP_WO = 1 << 7; - /// Shareability: Inner Shareable (otherwise Outer Shareable). - const INNER = 1 << 8; - /// Shareability: Inner or Outer Shareable (otherwise Non-shareable). - const SHAREABLE = 1 << 9; - /// The Access flag. - const AF = 1 << 10; - /// The not global bit. - const NG = 1 << 11; - /// Indicates that 16 adjacent translation table entries point to contiguous memory regions. - const CONTIGUOUS = 1 << 52; - /// The Privileged execute-never field. - // const PXN = 1 << 53; - /// The Execute-never or Unprivileged execute-never field. - const XN = 1 << 54; - /// Non-secure bit. For memory accesses from Secure state, specifies whether the output - /// address is in Secure or Non-secure memory. - const NS = 1 << 55; - // Next-level attributes in stage 1 VMSAv8-64 Table descriptors: - - /// PXN limit for subsequent levels of lookup. - const PXN_TABLE = 1 << 59; - /// XN limit for subsequent levels of lookup. - const XN_TABLE = 1 << 60; - /// Access permissions limit for subsequent levels of lookup: access at EL0 not permitted. - const AP_NO_EL0_TABLE = 1 << 61; - /// Access permissions limit for subsequent levels of lookup: write access not permitted. - const AP_NO_WRITE_TABLE = 1 << 62; - /// For memory accesses from Secure state, specifies the Security state for subsequent - /// levels of lookup. - const NS_TABLE = 1 << 63; - } -} - -#[repr(u64)] -#[derive(Debug, Clone, Copy, Eq, PartialEq)] -enum MemType { - Device = 0, - Normal = 1, - NormalNonCache = 2, -} - -impl DescriptorAttr { - #[allow(clippy::unusual_byte_groupings)] - const ATTR_INDEX_MASK: u64 = 0b1111_00; - const PTE_S2_MEM_ATTR_NORMAL_INNER_WRITE_BACK_CACHEABLE: u64 = 0b11 << 2; - const PTE_S2_MEM_ATTR_NORMAL_OUTER_WRITE_BACK_CACHEABLE: u64 = 0b11 << 4; - const PTE_S2_MEM_ATTR_NORMAL_OUTER_WRITE_BACK_NOCACHEABLE: u64 = 0b1 << 4; - const NORMAL_BIT: u64 = Self::PTE_S2_MEM_ATTR_NORMAL_INNER_WRITE_BACK_CACHEABLE - | Self::PTE_S2_MEM_ATTR_NORMAL_OUTER_WRITE_BACK_CACHEABLE; - - const fn from_mem_type(mem_type: MemType) -> Self { - let bits = match mem_type { - MemType::Normal => Self::NORMAL_BIT | Self::SHAREABLE.bits(), - MemType::NormalNonCache => { - Self::PTE_S2_MEM_ATTR_NORMAL_INNER_WRITE_BACK_CACHEABLE - | Self::PTE_S2_MEM_ATTR_NORMAL_OUTER_WRITE_BACK_NOCACHEABLE - | Self::SHAREABLE.bits() - } - MemType::Device => Self::SHAREABLE.bits(), - }; - Self::from_bits_retain(bits) - } - - fn mem_type(&self) -> MemType { - let idx = self.bits() & Self::ATTR_INDEX_MASK; - match idx { - Self::NORMAL_BIT => MemType::Normal, - Self::PTE_S2_MEM_ATTR_NORMAL_OUTER_WRITE_BACK_NOCACHEABLE => MemType::NormalNonCache, - 0 => MemType::Device, - _ => panic!("Invalid memory attribute index"), - } - } -} - -impl From for MappingFlags { - fn from(attr: DescriptorAttr) -> Self { - let mut flags = Self::empty(); - if attr.contains(DescriptorAttr::VALID) { - flags |= Self::READ; - } - if !attr.contains(DescriptorAttr::S2AP_WO) { - flags |= Self::WRITE; - } - if !attr.contains(DescriptorAttr::XN) { - flags |= Self::EXECUTE; - } - if attr.mem_type() == MemType::Device { - flags |= Self::DEVICE; - } - flags - } -} - -impl From for DescriptorAttr { - fn from(flags: MappingFlags) -> Self { - let mut attr = if flags.contains(MappingFlags::DEVICE) { - if flags.contains(MappingFlags::UNCACHED) { - Self::from_mem_type(MemType::NormalNonCache) - } else { - Self::from_mem_type(MemType::Device) - } - } else { - Self::from_mem_type(MemType::Normal) - }; - if flags.contains(MappingFlags::READ) { - attr |= Self::VALID | Self::S2AP_RO; - } - if flags.contains(MappingFlags::WRITE) { - attr |= Self::S2AP_WO; - } - attr - } -} - -/// A VMSAv8-64 translation table descriptor. -/// -/// Note that the **AttrIndx\[2:0\]** (bit\[4:2\]) field is set to `0` for device -/// memory, and `1` for normal memory. The system must configure the MAIR_ELx -/// system register accordingly. -#[derive(Clone, Copy)] -#[repr(transparent)] -pub struct A64PTEHV(u64); - -impl A64PTEHV { - const PHYS_ADDR_MASK: u64 = 0x0000_ffff_ffff_f000; // bits 12..48 - - /// Creates an empty descriptor with all bits set to zero. - pub const fn empty() -> Self { - Self(0) - } -} - -impl GenericPTE for A64PTEHV { - fn bits(self) -> usize { - self.0 as usize - } - fn new_page(paddr: HostPhysAddr, flags: MappingFlags, is_huge: bool) -> Self { - let mut attr = DescriptorAttr::from(flags) | DescriptorAttr::AF; - if !is_huge { - attr |= DescriptorAttr::NON_BLOCK; - } - Self(attr.bits() | (paddr.as_usize() as u64 & Self::PHYS_ADDR_MASK)) - } - fn new_table(paddr: HostPhysAddr) -> Self { - let attr = DescriptorAttr::NON_BLOCK | DescriptorAttr::VALID; - Self(attr.bits() | (paddr.as_usize() as u64 & Self::PHYS_ADDR_MASK)) - } - fn paddr(&self) -> HostPhysAddr { - HostPhysAddr::from((self.0 & Self::PHYS_ADDR_MASK) as usize) - } - fn flags(&self) -> MappingFlags { - DescriptorAttr::from_bits_truncate(self.0).into() - } - fn set_paddr(&mut self, paddr: HostPhysAddr) { - self.0 = (self.0 & !Self::PHYS_ADDR_MASK) | (paddr.as_usize() as u64 & Self::PHYS_ADDR_MASK) - } - fn set_flags(&mut self, flags: MappingFlags, is_huge: bool) { - let mut attr = DescriptorAttr::from(flags) | DescriptorAttr::AF; - if !is_huge { - attr |= DescriptorAttr::NON_BLOCK; - } - self.0 = (self.0 & Self::PHYS_ADDR_MASK) | attr.bits(); - } - fn is_unused(&self) -> bool { - self.0 == 0 - } - fn is_present(&self) -> bool { - DescriptorAttr::from_bits_truncate(self.0).contains(DescriptorAttr::VALID) - } - fn is_huge(&self) -> bool { - !DescriptorAttr::from_bits_truncate(self.0).contains(DescriptorAttr::NON_BLOCK) - } - fn clear(&mut self) { - self.0 = 0 - } -} - -impl fmt::Debug for A64PTEHV { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - let mut f = f.debug_struct("A64PTE"); - f.field("raw", &self.0) - .field("paddr", &self.paddr()) - .field("attr", &DescriptorAttr::from_bits_truncate(self.0)) - .field("flags", &self.flags()) - .finish() - } -} - -/// Metadata of AArch64 hypervisor page tables (ipa to hpa). -#[derive(Copy, Clone)] -pub struct A64HVPagingMetaData; - -impl PagingMetaData for A64HVPagingMetaData { - const LEVELS: usize = 3; - // In Armv8.0-A, the maximum size for a physical address is 48 bits. - const PA_MAX_BITS: usize = 48; - // The size of the IPA space can be configured in the same way as the - const VA_MAX_BITS: usize = 40; // virtual address space. VTCR_EL2.T0SZ controls the size. - - type VirtAddr = GuestPhysAddr; - - fn flush_tlb(vaddr: Option) { - unsafe { - if let Some(vaddr) = vaddr { - #[cfg(not(feature = "arm-el2"))] - { - asm!("tlbi vaae1is, {}; dsb sy; isb", in(reg) vaddr.as_usize()) - } - #[cfg(feature = "arm-el2")] - { - asm!("tlbi vae2is, {}; dsb sy; isb", in(reg) vaddr.as_usize()) - } - } else { - // flush the entire TLB - #[cfg(not(feature = "arm-el2"))] - { - asm!("tlbi vmalle1; dsb sy; isb") - } - #[cfg(feature = "arm-el2")] - { - asm!("tlbi alle2is; dsb sy; isb") - } - } - } - } -} -/// According to rust shyper, AArch64 translation table. -pub type NestedPageTable = PageTable64; diff --git a/src/npt/arch/mod.rs b/src/npt/arch/mod.rs deleted file mode 100644 index c80a457..0000000 --- a/src/npt/arch/mod.rs +++ /dev/null @@ -1,14 +0,0 @@ -//! Architecture dependent structures. - -cfg_if::cfg_if! { - if #[cfg(target_arch = "x86_64")] { - mod x86_64; - pub use self::x86_64::*; - } else if #[cfg(target_arch = "aarch64")] { - mod aarch64; - pub use self::aarch64::*; - } else if #[cfg(any(target_arch = "riscv32", target_arch = "riscv64"))] { - mod riscv; - pub use self::riscv::*; - } -} diff --git a/src/npt/arch/riscv.rs b/src/npt/arch/riscv.rs deleted file mode 100644 index aa190e2..0000000 --- a/src/npt/arch/riscv.rs +++ /dev/null @@ -1,6 +0,0 @@ -use page_table_entry::riscv::Rv64PTE; -use page_table_multiarch::{PageTable64, riscv::Sv39MetaData}; - -use crate::GuestPhysAddr; - -pub type NestedPageTable = PageTable64, Rv64PTE, H>; diff --git a/src/npt/arch/x86_64.rs b/src/npt/arch/x86_64.rs deleted file mode 100644 index ae57a11..0000000 --- a/src/npt/arch/x86_64.rs +++ /dev/null @@ -1,183 +0,0 @@ -use core::{convert::TryFrom, fmt}; - -use bit_field::BitField; -use page_table_entry::{GenericPTE, MappingFlags}; -use page_table_multiarch::{PageTable64, PagingMetaData}; - -use crate::{GuestPhysAddr, HostPhysAddr}; - -bitflags::bitflags! { - /// EPT entry flags. (SDM Vol. 3C, Section 28.3.2) - struct EPTFlags: u64 { - /// Read access. - const READ = 1 << 0; - /// Write access. - const WRITE = 1 << 1; - /// Execute access. - const EXECUTE = 1 << 2; - /// EPT memory type. Only for terminate pages. - const MEM_TYPE_MASK = 0b111 << 3; - /// Ignore PAT memory type. Only for terminate pages. - const IGNORE_PAT = 1 << 6; - /// Specifies that the entry maps a huge frame instead of a page table. - /// Only allowed in P2 or P3 tables. - const HUGE_PAGE = 1 << 7; - /// If bit 6 of EPTP is 1, accessed flag for EPT. - const ACCESSED = 1 << 8; - /// If bit 6 of EPTP is 1, dirty flag for EPT. - const DIRTY = 1 << 9; - /// Execute access for user-mode linear addresses. - const EXECUTE_FOR_USER = 1 << 10; - } -} - -numeric_enum_macro::numeric_enum! { - #[repr(u8)] - #[derive(Debug, PartialEq, Clone, Copy)] - /// EPT memory typing. (SDM Vol. 3C, Section 28.3.7) - enum EPTMemType { - Uncached = 0, - WriteCombining = 1, - WriteThrough = 4, - WriteProtected = 5, - WriteBack = 6, - } -} - -impl EPTFlags { - fn set_mem_type(&mut self, mem_type: EPTMemType) { - let mut bits = self.bits(); - bits.set_bits(3..6, mem_type as u64); - *self = Self::from_bits_truncate(bits) - } - fn mem_type(&self) -> Result { - EPTMemType::try_from(self.bits().get_bits(3..6) as u8) - } -} - -impl From for EPTFlags { - fn from(f: MappingFlags) -> Self { - if f.is_empty() { - return Self::empty(); - } - let mut ret = Self::empty(); - if f.contains(MappingFlags::READ) { - ret |= Self::READ; - } - if f.contains(MappingFlags::WRITE) { - ret |= Self::WRITE; - } - if f.contains(MappingFlags::EXECUTE) { - ret |= Self::EXECUTE; - } - if !f.contains(MappingFlags::DEVICE) { - ret.set_mem_type(EPTMemType::WriteBack); - } - ret - } -} - -impl From for MappingFlags { - fn from(f: EPTFlags) -> Self { - let mut ret = MappingFlags::empty(); - if f.contains(EPTFlags::READ) { - ret |= Self::READ; - } - if f.contains(EPTFlags::WRITE) { - ret |= Self::WRITE; - } - if f.contains(EPTFlags::EXECUTE) { - ret |= Self::EXECUTE; - } - if let Ok(EPTMemType::Uncached) = f.mem_type() { - ret |= Self::DEVICE; - } - ret - } -} - -/// An x86_64 VMX extented page table entry. -/// Note: The [EPTEntry] can be moved to the independent crate `page_table_entry`. -#[derive(Clone, Copy)] -#[repr(transparent)] -pub struct EPTEntry(u64); - -impl EPTEntry { - const PHYS_ADDR_MASK: u64 = 0x000f_ffff_ffff_f000; // bits 12..52 -} - -impl GenericPTE for EPTEntry { - fn new_page(paddr: HostPhysAddr, flags: MappingFlags, is_huge: bool) -> Self { - let mut flags = EPTFlags::from(flags); - if is_huge { - flags |= EPTFlags::HUGE_PAGE; - } - Self(flags.bits() | (paddr.as_usize() as u64 & Self::PHYS_ADDR_MASK)) - } - fn new_table(paddr: HostPhysAddr) -> Self { - let flags = EPTFlags::READ | EPTFlags::WRITE | EPTFlags::EXECUTE; - Self(flags.bits() | (paddr.as_usize() as u64 & Self::PHYS_ADDR_MASK)) - } - fn paddr(&self) -> HostPhysAddr { - HostPhysAddr::from((self.0 & Self::PHYS_ADDR_MASK) as usize) - } - fn flags(&self) -> MappingFlags { - EPTFlags::from_bits_truncate(self.0).into() - } - fn set_paddr(&mut self, paddr: HostPhysAddr) { - self.0 = (self.0 & !Self::PHYS_ADDR_MASK) | (paddr.as_usize() as u64 & Self::PHYS_ADDR_MASK) - } - - fn set_flags(&mut self, flags: MappingFlags, is_huge: bool) { - let mut flags = EPTFlags::from(flags); - if is_huge { - flags |= EPTFlags::HUGE_PAGE; - } - self.0 = (self.0 & Self::PHYS_ADDR_MASK) | flags.bits() - } - fn is_unused(&self) -> bool { - self.0 == 0 - } - fn is_present(&self) -> bool { - self.0 & 0x7 != 0 // RWX != 0 - } - fn is_huge(&self) -> bool { - EPTFlags::from_bits_truncate(self.0).contains(EPTFlags::HUGE_PAGE) - } - fn clear(&mut self) { - self.0 = 0 - } - - fn bits(self) -> usize { - self.0 as usize - } -} - -impl fmt::Debug for EPTEntry { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("EPTEntry") - .field("raw", &self.0) - .field("hpaddr", &self.paddr()) - .field("flags", &self.flags()) - .field("mem_type", &EPTFlags::from_bits_truncate(self.0).mem_type()) - .finish() - } -} - -/// Metadata of VMX extended page tables. -pub struct ExtendedPageTableMetadata; - -impl PagingMetaData for ExtendedPageTableMetadata { - const LEVELS: usize = 4; - const PA_MAX_BITS: usize = 52; - const VA_MAX_BITS: usize = 48; - - type VirtAddr = GuestPhysAddr; - - fn flush_tlb(_vaddr: Option) { - todo!() - } -} - -/// The VMX extended page table. (SDM Vol. 3C, Section 29.3) -pub type ExtendedPageTable = PageTable64; diff --git a/src/npt/mod.rs b/src/npt/mod.rs deleted file mode 100644 index 14d7d2d..0000000 --- a/src/npt/mod.rs +++ /dev/null @@ -1,14 +0,0 @@ -cfg_if::cfg_if! { - if #[cfg(target_arch = "x86_64")] { - /// The architecture-specific nested page table for two-stage address translation. - pub type NestedPageTable = arch::ExtendedPageTable; - } else if #[cfg(any(target_arch = "riscv32", target_arch = "riscv64"))] { - /// The architecture-specific page table. - pub type NestedPageTable = arch::NestedPageTable; - } else if #[cfg(target_arch = "aarch64")]{ - /// The architecture-specific nested page table for two-stage address translation. - pub type NestedPageTable = arch::NestedPageTable; - } -} - -mod arch; From 1a009bcd4ddaf4cb5271cd3796fd62f69809f102 Mon Sep 17 00:00:00 2001 From: Shi Lei Date: Sun, 13 Apr 2025 15:35:36 +0000 Subject: [PATCH 2/3] Refactor AddrSpace generic type as 'AddrSpace' --- src/address_space/backend/alloc.rs | 29 ++++++------- src/address_space/backend/linear.rs | 21 +++++----- src/address_space/backend/mod.rs | 38 ++++++++--------- src/address_space/mod.rs | 63 ++++++++++++++--------------- src/lib.rs | 2 + 5 files changed, 78 insertions(+), 75 deletions(-) diff --git a/src/address_space/backend/alloc.rs b/src/address_space/backend/alloc.rs index f6d3f3a..6730c0a 100644 --- a/src/address_space/backend/alloc.rs +++ b/src/address_space/backend/alloc.rs @@ -1,36 +1,37 @@ use memory_addr::{PageIter4K, PhysAddr}; -use page_table_multiarch::{MappingFlags, PageSize, PagingHandler}; +use page_table_multiarch::{PageTable64, MappingFlags, PageSize, PagingHandler, PagingMetaData, GenericPTE}; use super::Backend; -use npt_multiarch::{GuestPhysAddr, NestedPageTable as PageTable}; -impl Backend { +impl Backend { /// Creates a new allocation mapping backend. pub const fn new_alloc(populate: bool) -> Self { Self::Alloc { populate, - _phantom: core::marker::PhantomData, + _phantom0: core::marker::PhantomData, + _phantom1: core::marker::PhantomData, + _phantom2: core::marker::PhantomData, } } pub(crate) fn map_alloc( &self, - start: GuestPhysAddr, + start: M::VirtAddr, size: usize, flags: MappingFlags, - pt: &mut PageTable, + pt: &mut PageTable64, populate: bool, ) -> bool { debug!( "map_alloc: [{:#x}, {:#x}) {:?} (populate={})", start, - start + size, + start.into() + size, flags, populate ); if populate { // allocate all possible physical frames for populated mapping. - for addr in PageIter4K::new(start, start + size).unwrap() { + for addr in PageIter4K::new(start, (start.into() + size).into()).unwrap() { if H::alloc_frame() .and_then(|frame| pt.map(addr, frame, PageSize::Size4K, flags).ok()) .is_none() @@ -55,13 +56,13 @@ impl Backend { pub(crate) fn unmap_alloc( &self, - start: GuestPhysAddr, + start: M::VirtAddr, size: usize, - pt: &mut PageTable, + pt: &mut PageTable64, _populate: bool, ) -> bool { - debug!("unmap_alloc: [{:#x}, {:#x})", start, start + size); - for addr in PageIter4K::new(start, start + size).unwrap() { + debug!("unmap_alloc: [{:#x}, {:#x})", start, start.into() + size); + for addr in PageIter4K::new(start, (start.into() + size).into()).unwrap() { if let Ok((frame, page_size, _)) = pt.unmap(addr) { // Deallocate the physical frame if there is a mapping in the // page table. @@ -78,9 +79,9 @@ impl Backend { pub(crate) fn handle_page_fault_alloc( &self, - vaddr: GuestPhysAddr, + vaddr: M::VirtAddr, orig_flags: MappingFlags, - pt: &mut PageTable, + pt: &mut PageTable64, populate: bool, ) -> bool { if populate { diff --git a/src/address_space/backend/linear.rs b/src/address_space/backend/linear.rs index 890824b..21d8b41 100644 --- a/src/address_space/backend/linear.rs +++ b/src/address_space/backend/linear.rs @@ -1,10 +1,9 @@ use memory_addr::PhysAddr; -use page_table_multiarch::{MappingFlags, PagingHandler}; +use page_table_multiarch::{PageTable64, MappingFlags, PagingHandler, PagingMetaData, GenericPTE}; use super::Backend; -use npt_multiarch::{GuestPhysAddr, NestedPageTable as PageTable}; -impl Backend { +impl Backend { /// Creates a new linear mapping backend. pub const fn new_linear(pa_va_offset: usize) -> Self { Self::Linear { pa_va_offset } @@ -12,24 +11,24 @@ impl Backend { pub(crate) fn map_linear( &self, - start: GuestPhysAddr, + start: M::VirtAddr, size: usize, flags: MappingFlags, - pt: &mut PageTable, + pt: &mut PageTable64, pa_va_offset: usize, ) -> bool { - let pa_start = PhysAddr::from(start.as_usize() - pa_va_offset); + let pa_start = PhysAddr::from(start.into() - pa_va_offset); debug!( "map_linear: [{:#x}, {:#x}) -> [{:#x}, {:#x}) {:?}", start, - start + size, + start.into() + size, pa_start, pa_start + size, flags ); pt.map_region( start, - |va| PhysAddr::from(va.as_usize() - pa_va_offset), + |va| PhysAddr::from(va.into() - pa_va_offset), size, flags, false, @@ -40,12 +39,12 @@ impl Backend { pub(crate) fn unmap_linear( &self, - start: GuestPhysAddr, + start: M::VirtAddr, size: usize, - pt: &mut PageTable, + pt: &mut PageTable64, _pa_va_offset: usize, ) -> bool { - debug!("unmap_linear: [{:#x}, {:#x})", start, start + size); + debug!("unmap_linear: [{:#x}, {:#x})", start, start.into() + size); pt.unmap_region(start, size, true).is_ok() } } diff --git a/src/address_space/backend/mod.rs b/src/address_space/backend/mod.rs index 70b076c..f840993 100644 --- a/src/address_space/backend/mod.rs +++ b/src/address_space/backend/mod.rs @@ -1,9 +1,7 @@ //! Memory mapping backends. use memory_set::MappingBackend; -use page_table_multiarch::{MappingFlags, PagingHandler}; - -use npt_multiarch::{GuestPhysAddr, NestedPageTable as PageTable}; +use page_table_multiarch::{PageTable64, MappingFlags, PagingHandler, PagingMetaData, GenericPTE}; mod alloc; mod linear; @@ -16,7 +14,7 @@ mod linear; /// contiguous and their addresses should be known when creating the mapping. /// - **Allocation**: used in general, or for lazy mappings. The target physical /// frames are obtained from the global allocator. -pub enum Backend { +pub enum Backend { /// Linear mapping backend. /// /// The offset between the virtual address and the physical address is @@ -36,33 +34,37 @@ pub enum Backend { /// Whether to populate the physical frames when creating the mapping. populate: bool, /// A phantom data for the paging handler. - _phantom: core::marker::PhantomData, + _phantom0: core::marker::PhantomData, + _phantom1: core::marker::PhantomData, + _phantom2: core::marker::PhantomData, }, } -impl Clone for Backend { +impl Clone for Backend { fn clone(&self) -> Self { match *self { Self::Linear { pa_va_offset } => Self::Linear { pa_va_offset }, Self::Alloc { populate, .. } => Self::Alloc { populate, - _phantom: core::marker::PhantomData, + _phantom0: core::marker::PhantomData, + _phantom1: core::marker::PhantomData, + _phantom2: core::marker::PhantomData, }, } } } -impl MappingBackend for Backend { - type Addr = GuestPhysAddr; +impl MappingBackend for Backend { + type Addr = M::VirtAddr; type Flags = MappingFlags; - type PageTable = PageTable; + type PageTable = PageTable64; fn map( &self, - start: GuestPhysAddr, + start: M::VirtAddr, size: usize, flags: MappingFlags, - pt: &mut PageTable, + pt: &mut Self::PageTable, ) -> bool { match *self { Self::Linear { pa_va_offset } => self.map_linear(start, size, flags, pt, pa_va_offset), @@ -70,7 +72,7 @@ impl MappingBackend for Backend { } } - fn unmap(&self, start: GuestPhysAddr, size: usize, pt: &mut PageTable) -> bool { + fn unmap(&self, start: Self::Addr, size: usize, pt: &mut Self::PageTable) -> bool { match *self { Self::Linear { pa_va_offset } => self.unmap_linear(start, size, pt, pa_va_offset), Self::Alloc { populate, .. } => self.unmap_alloc(start, size, pt, populate), @@ -79,22 +81,22 @@ impl MappingBackend for Backend { fn protect( &self, - _start: GuestPhysAddr, + _start: Self::Addr, _size: usize, _new_flags: MappingFlags, - _page_table: &mut PageTable, + _page_table: &mut Self::PageTable, ) -> bool { // a stub here true } } -impl Backend { +impl Backend { pub(crate) fn handle_page_fault( &self, - vaddr: GuestPhysAddr, + vaddr: M::VirtAddr, orig_flags: MappingFlags, - page_table: &mut PageTable, + page_table: &mut PageTable64, ) -> bool { match *self { Self::Linear { .. } => false, // Linear mappings should not trigger page faults. diff --git a/src/address_space/mod.rs b/src/address_space/mod.rs index 890053c..174e1c3 100644 --- a/src/address_space/mod.rs +++ b/src/address_space/mod.rs @@ -2,12 +2,11 @@ use alloc::vec::Vec; use core::fmt; use axerrno::{AxError, AxResult, ax_err}; -use memory_addr::{MemoryAddr, PhysAddr, is_aligned_4k}; +use memory_addr::{MemoryAddr, PhysAddr, AddrRange, is_aligned_4k}; use memory_set::{MemoryArea, MemorySet}; -use page_table_multiarch::PagingHandler; +use page_table_multiarch::{PagingHandler, PageTable64, PagingMetaData, GenericPTE}; -use npt_multiarch::NestedPageTable as PageTable; -use crate::{GuestPhysAddr, GuestPhysAddrRange, mapping_err_to_ax_err}; +use crate::mapping_err_to_ax_err; mod backend; @@ -15,20 +14,20 @@ pub use backend::Backend; pub use page_table_entry::MappingFlags; /// The virtual memory address space. -pub struct AddrSpace { - va_range: GuestPhysAddrRange, - areas: MemorySet>, - pt: PageTable, +pub struct AddrSpace { + va_range: AddrRange, + areas: MemorySet>, + pt: PageTable64, } -impl AddrSpace { +impl AddrSpace { /// Returns the address space base. - pub const fn base(&self) -> GuestPhysAddr { + pub const fn base(&self) -> M::VirtAddr { self.va_range.start } /// Returns the address space end. - pub const fn end(&self) -> GuestPhysAddr { + pub const fn end(&self) -> M::VirtAddr { self.va_range.end } @@ -38,7 +37,7 @@ impl AddrSpace { } /// Returns the reference to the inner page table. - pub const fn page_table(&self) -> &PageTable { + pub const fn page_table(&self) -> &PageTable64 { &self.pt } @@ -48,17 +47,17 @@ impl AddrSpace { } /// Checks if the address space contains the given address range. - pub fn contains_range(&self, start: GuestPhysAddr, size: usize) -> bool { + pub fn contains_range(&self, start: M::VirtAddr, size: usize) -> bool { self.va_range - .contains_range(GuestPhysAddrRange::from_start_size(start, size)) + .contains_range(AddrRange::from_start_size(start, size)) } /// Creates a new empty address space. - pub fn new_empty(base: GuestPhysAddr, size: usize) -> AxResult { + pub fn new_empty(base: M::VirtAddr, size: usize) -> AxResult { Ok(Self { - va_range: GuestPhysAddrRange::from_start_size(base, size), + va_range: AddrRange::from_start_size(base, size), areas: MemorySet::new(), - pt: PageTable::try_new().map_err(|_| AxError::NoMemory)?, + pt: PageTable64::try_new().map_err(|_| AxError::NoMemory)?, }) } @@ -69,7 +68,7 @@ impl AddrSpace { /// The `flags` parameter indicates the mapping permissions and attributes. pub fn map_linear( &mut self, - start_vaddr: GuestPhysAddr, + start_vaddr: M::VirtAddr, start_paddr: PhysAddr, size: usize, flags: MappingFlags, @@ -81,7 +80,7 @@ impl AddrSpace { return ax_err!(InvalidInput, "address not aligned"); } - let offset = start_vaddr.as_usize() - start_paddr.as_usize(); + let offset = start_vaddr.into() - start_paddr.as_usize(); let area = MemoryArea::new(start_vaddr, size, flags, Backend::new_linear(offset)); self.areas .map(area, &mut self.pt, false) @@ -96,7 +95,7 @@ impl AddrSpace { /// The `flags` parameter indicates the mapping permissions and attributes. pub fn map_alloc( &mut self, - start: GuestPhysAddr, + start: M::VirtAddr, size: usize, flags: MappingFlags, populate: bool, @@ -104,7 +103,7 @@ impl AddrSpace { if !self.contains_range(start, size) { return ax_err!( InvalidInput, - alloc::format!("address [{:?}~{:?}] out of range", start, start + size).as_str() + alloc::format!("address [{:?}~{:?}] out of range", start, start.into() + size).as_str() ); } if !start.is_aligned_4k() || !is_aligned_4k(size) { @@ -119,7 +118,7 @@ impl AddrSpace { } /// Removes mappings within the specified virtual address range. - pub fn unmap(&mut self, start: GuestPhysAddr, size: usize) -> AxResult { + pub fn unmap(&mut self, start: M::VirtAddr, size: usize) -> AxResult { if !self.contains_range(start, size) { return ax_err!(InvalidInput, "address out of range"); } @@ -144,7 +143,7 @@ impl AddrSpace { /// /// Returns `true` if the page fault is handled successfully (not a real /// fault). - pub fn handle_page_fault(&mut self, vaddr: GuestPhysAddr, access_flags: MappingFlags) -> bool { + pub fn handle_page_fault(&mut self, vaddr: M::VirtAddr, access_flags: MappingFlags) -> bool { if !self.va_range.contains(vaddr) { return false; } @@ -163,7 +162,7 @@ impl AddrSpace { /// Translates the given `VirtAddr` into `PhysAddr`. /// /// Returns `None` if the virtual address is out of range or not mapped. - pub fn translate(&self, vaddr: GuestPhysAddr) -> Option { + pub fn translate(&self, vaddr: M::VirtAddr) -> Option { if !self.va_range.contains(vaddr) { return None; } @@ -181,7 +180,7 @@ impl AddrSpace { /// Returns `None` if the virtual address is out of range or not mapped. pub fn translated_byte_buffer( &self, - vaddr: GuestPhysAddr, + vaddr: M::VirtAddr, len: usize, ) -> Option> { if !self.va_range.contains(vaddr) { @@ -197,8 +196,8 @@ impl AddrSpace { return None; } - let mut start = vaddr; - let end = start + len; + let mut start: M::VirtAddr = vaddr; + let end: M::VirtAddr = (start.into() + len).into(); debug!( "start {:?} end {:?} area size {:#x}", @@ -210,13 +209,13 @@ impl AddrSpace { let mut v = Vec::new(); while start < end { let (start_paddr, _, page_size) = self.page_table().query(start).unwrap(); - let mut end_va = start.align_down(page_size) + page_size.into(); + let mut end_va: M::VirtAddr = (start.align_down(page_size).into() + page_size as usize).into(); end_va = end_va.min(end); v.push(unsafe { core::slice::from_raw_parts_mut( H::phys_to_virt(start_paddr).as_mut_ptr(), - (end_va - start.as_usize()).into(), + (end_va.into() - start.into()).into(), ) }); start = end_va; @@ -231,7 +230,7 @@ impl AddrSpace { /// and returns the size of the `MemoryArea` corresponding to the target vaddr. /// /// Returns `None` if the virtual address is out of range or not mapped. - pub fn translate_and_get_limit(&self, vaddr: GuestPhysAddr) -> Option<(PhysAddr, usize)> { + pub fn translate_and_get_limit(&self, vaddr: M::VirtAddr) -> Option<(PhysAddr, usize)> { if !self.va_range.contains(vaddr) { return None; } @@ -246,7 +245,7 @@ impl AddrSpace { } } -impl fmt::Debug for AddrSpace { +impl fmt::Debug for AddrSpace { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("AddrSpace") .field("va_range", &self.va_range) @@ -256,7 +255,7 @@ impl fmt::Debug for AddrSpace { } } -impl Drop for AddrSpace { +impl Drop for AddrSpace { fn drop(&mut self) { self.clear(); } diff --git a/src/lib.rs b/src/lib.rs index a5a7f10..8c67a39 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -21,6 +21,8 @@ pub use hal::AxMmHal; use axerrno::AxError; use memory_set::MappingError; +pub type AddrSpace = address_space::AddrSpace; + /// Information about nested page faults. #[derive(Debug)] pub struct NestedPageFaultInfo { From a443fef72d2fcbe36519bf892ce6e4047676af76 Mon Sep 17 00:00:00 2001 From: Shi Lei Date: Sun, 13 Apr 2025 15:51:11 +0000 Subject: [PATCH 3/3] Remove 'address_space' and use arceos-org/aspace_generic instead --- Cargo.toml | 1 + src/address_space/backend/alloc.rs | 98 ----------- src/address_space/backend/linear.rs | 50 ------ src/address_space/backend/mod.rs | 108 ------------ src/address_space/mod.rs | 262 ---------------------------- src/lib.rs | 17 +- 6 files changed, 3 insertions(+), 533 deletions(-) delete mode 100644 src/address_space/backend/alloc.rs delete mode 100644 src/address_space/backend/linear.rs delete mode 100644 src/address_space/backend/mod.rs delete mode 100644 src/address_space/mod.rs diff --git a/Cargo.toml b/Cargo.toml index 9e7da22..cde88f9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -24,3 +24,4 @@ page_table_entry = "0.5" page_table_multiarch = "0.5" npt_multiarch = { git = "https://github.com/arceos-org/npt_multiarch.git" } +aspace_generic = { git = "https://github.com/arceos-org/aspace_generic.git" } diff --git a/src/address_space/backend/alloc.rs b/src/address_space/backend/alloc.rs deleted file mode 100644 index 6730c0a..0000000 --- a/src/address_space/backend/alloc.rs +++ /dev/null @@ -1,98 +0,0 @@ -use memory_addr::{PageIter4K, PhysAddr}; -use page_table_multiarch::{PageTable64, MappingFlags, PageSize, PagingHandler, PagingMetaData, GenericPTE}; - -use super::Backend; - -impl Backend { - /// Creates a new allocation mapping backend. - pub const fn new_alloc(populate: bool) -> Self { - Self::Alloc { - populate, - _phantom0: core::marker::PhantomData, - _phantom1: core::marker::PhantomData, - _phantom2: core::marker::PhantomData, - } - } - - pub(crate) fn map_alloc( - &self, - start: M::VirtAddr, - size: usize, - flags: MappingFlags, - pt: &mut PageTable64, - populate: bool, - ) -> bool { - debug!( - "map_alloc: [{:#x}, {:#x}) {:?} (populate={})", - start, - start.into() + size, - flags, - populate - ); - if populate { - // allocate all possible physical frames for populated mapping. - for addr in PageIter4K::new(start, (start.into() + size).into()).unwrap() { - if H::alloc_frame() - .and_then(|frame| pt.map(addr, frame, PageSize::Size4K, flags).ok()) - .is_none() - { - return false; - } - } - true - } else { - // Map to a empty entry for on-demand mapping. - pt.map_region( - start, - |_va| PhysAddr::from(0), - size, - MappingFlags::empty(), - false, - false, - ) - .is_ok() - } - } - - pub(crate) fn unmap_alloc( - &self, - start: M::VirtAddr, - size: usize, - pt: &mut PageTable64, - _populate: bool, - ) -> bool { - debug!("unmap_alloc: [{:#x}, {:#x})", start, start.into() + size); - for addr in PageIter4K::new(start, (start.into() + size).into()).unwrap() { - if let Ok((frame, page_size, _)) = pt.unmap(addr) { - // Deallocate the physical frame if there is a mapping in the - // page table. - if page_size.is_huge() { - return false; - } - H::dealloc_frame(frame); - } else { - // It's fine if the page is not mapped. - } - } - true - } - - pub(crate) fn handle_page_fault_alloc( - &self, - vaddr: M::VirtAddr, - orig_flags: MappingFlags, - pt: &mut PageTable64, - populate: bool, - ) -> bool { - if populate { - false // Populated mappings should not trigger page faults. - } else { - // Allocate a physical frame lazily and map it to the fault address. - // `vaddr` does not need to be aligned. It will be automatically - // aligned during `pt.remap` regardless of the page size. - H::alloc_frame() - .and_then(|frame| pt.remap(vaddr, frame, orig_flags).ok()) - .is_some() - } - } -} diff --git a/src/address_space/backend/linear.rs b/src/address_space/backend/linear.rs deleted file mode 100644 index 21d8b41..0000000 --- a/src/address_space/backend/linear.rs +++ /dev/null @@ -1,50 +0,0 @@ -use memory_addr::PhysAddr; -use page_table_multiarch::{PageTable64, MappingFlags, PagingHandler, PagingMetaData, GenericPTE}; - -use super::Backend; - -impl Backend { - /// Creates a new linear mapping backend. - pub const fn new_linear(pa_va_offset: usize) -> Self { - Self::Linear { pa_va_offset } - } - - pub(crate) fn map_linear( - &self, - start: M::VirtAddr, - size: usize, - flags: MappingFlags, - pt: &mut PageTable64, - pa_va_offset: usize, - ) -> bool { - let pa_start = PhysAddr::from(start.into() - pa_va_offset); - debug!( - "map_linear: [{:#x}, {:#x}) -> [{:#x}, {:#x}) {:?}", - start, - start.into() + size, - pa_start, - pa_start + size, - flags - ); - pt.map_region( - start, - |va| PhysAddr::from(va.into() - pa_va_offset), - size, - flags, - false, - false, - ) - .is_ok() - } - - pub(crate) fn unmap_linear( - &self, - start: M::VirtAddr, - size: usize, - pt: &mut PageTable64, - _pa_va_offset: usize, - ) -> bool { - debug!("unmap_linear: [{:#x}, {:#x})", start, start.into() + size); - pt.unmap_region(start, size, true).is_ok() - } -} diff --git a/src/address_space/backend/mod.rs b/src/address_space/backend/mod.rs deleted file mode 100644 index f840993..0000000 --- a/src/address_space/backend/mod.rs +++ /dev/null @@ -1,108 +0,0 @@ -//! Memory mapping backends. - -use memory_set::MappingBackend; -use page_table_multiarch::{PageTable64, MappingFlags, PagingHandler, PagingMetaData, GenericPTE}; - -mod alloc; -mod linear; - -/// A unified enum type for different memory mapping backends. -/// -/// Currently, two backends are implemented: -/// -/// - **Linear**: used for linear mappings. The target physical frames are -/// contiguous and their addresses should be known when creating the mapping. -/// - **Allocation**: used in general, or for lazy mappings. The target physical -/// frames are obtained from the global allocator. -pub enum Backend { - /// Linear mapping backend. - /// - /// The offset between the virtual address and the physical address is - /// constant, which is specified by `pa_va_offset`. For example, the virtual - /// address `vaddr` is mapped to the physical address `vaddr - pa_va_offset`. - Linear { - /// `vaddr - paddr`. - pa_va_offset: usize, - }, - /// Allocation mapping backend. - /// - /// If `populate` is `true`, all physical frames are allocated when the - /// mapping is created, and no page faults are triggered during the memory - /// access. Otherwise, the physical frames are allocated on demand (by - /// handling page faults). - Alloc { - /// Whether to populate the physical frames when creating the mapping. - populate: bool, - /// A phantom data for the paging handler. - _phantom0: core::marker::PhantomData, - _phantom1: core::marker::PhantomData, - _phantom2: core::marker::PhantomData, - }, -} - -impl Clone for Backend { - fn clone(&self) -> Self { - match *self { - Self::Linear { pa_va_offset } => Self::Linear { pa_va_offset }, - Self::Alloc { populate, .. } => Self::Alloc { - populate, - _phantom0: core::marker::PhantomData, - _phantom1: core::marker::PhantomData, - _phantom2: core::marker::PhantomData, - }, - } - } -} - -impl MappingBackend for Backend { - type Addr = M::VirtAddr; - type Flags = MappingFlags; - type PageTable = PageTable64; - - fn map( - &self, - start: M::VirtAddr, - size: usize, - flags: MappingFlags, - pt: &mut Self::PageTable, - ) -> bool { - match *self { - Self::Linear { pa_va_offset } => self.map_linear(start, size, flags, pt, pa_va_offset), - Self::Alloc { populate, .. } => self.map_alloc(start, size, flags, pt, populate), - } - } - - fn unmap(&self, start: Self::Addr, size: usize, pt: &mut Self::PageTable) -> bool { - match *self { - Self::Linear { pa_va_offset } => self.unmap_linear(start, size, pt, pa_va_offset), - Self::Alloc { populate, .. } => self.unmap_alloc(start, size, pt, populate), - } - } - - fn protect( - &self, - _start: Self::Addr, - _size: usize, - _new_flags: MappingFlags, - _page_table: &mut Self::PageTable, - ) -> bool { - // a stub here - true - } -} - -impl Backend { - pub(crate) fn handle_page_fault( - &self, - vaddr: M::VirtAddr, - orig_flags: MappingFlags, - page_table: &mut PageTable64, - ) -> bool { - match *self { - Self::Linear { .. } => false, // Linear mappings should not trigger page faults. - Self::Alloc { populate, .. } => { - self.handle_page_fault_alloc(vaddr, orig_flags, page_table, populate) - } - } - } -} diff --git a/src/address_space/mod.rs b/src/address_space/mod.rs deleted file mode 100644 index 174e1c3..0000000 --- a/src/address_space/mod.rs +++ /dev/null @@ -1,262 +0,0 @@ -use alloc::vec::Vec; -use core::fmt; - -use axerrno::{AxError, AxResult, ax_err}; -use memory_addr::{MemoryAddr, PhysAddr, AddrRange, is_aligned_4k}; -use memory_set::{MemoryArea, MemorySet}; -use page_table_multiarch::{PagingHandler, PageTable64, PagingMetaData, GenericPTE}; - -use crate::mapping_err_to_ax_err; - -mod backend; - -pub use backend::Backend; -pub use page_table_entry::MappingFlags; - -/// The virtual memory address space. -pub struct AddrSpace { - va_range: AddrRange, - areas: MemorySet>, - pt: PageTable64, -} - -impl AddrSpace { - /// Returns the address space base. - pub const fn base(&self) -> M::VirtAddr { - self.va_range.start - } - - /// Returns the address space end. - pub const fn end(&self) -> M::VirtAddr { - self.va_range.end - } - - /// Returns the address space size. - pub fn size(&self) -> usize { - self.va_range.size() - } - - /// Returns the reference to the inner page table. - pub const fn page_table(&self) -> &PageTable64 { - &self.pt - } - - /// Returns the root physical address of the inner page table. - pub const fn page_table_root(&self) -> PhysAddr { - self.pt.root_paddr() - } - - /// Checks if the address space contains the given address range. - pub fn contains_range(&self, start: M::VirtAddr, size: usize) -> bool { - self.va_range - .contains_range(AddrRange::from_start_size(start, size)) - } - - /// Creates a new empty address space. - pub fn new_empty(base: M::VirtAddr, size: usize) -> AxResult { - Ok(Self { - va_range: AddrRange::from_start_size(base, size), - areas: MemorySet::new(), - pt: PageTable64::try_new().map_err(|_| AxError::NoMemory)?, - }) - } - - /// Add a new linear mapping. - /// - /// See [`Backend`] for more details about the mapping backends. - /// - /// The `flags` parameter indicates the mapping permissions and attributes. - pub fn map_linear( - &mut self, - start_vaddr: M::VirtAddr, - start_paddr: PhysAddr, - size: usize, - flags: MappingFlags, - ) -> AxResult { - if !self.contains_range(start_vaddr, size) { - return ax_err!(InvalidInput, "address out of range"); - } - if !start_vaddr.is_aligned_4k() || !start_paddr.is_aligned_4k() || !is_aligned_4k(size) { - return ax_err!(InvalidInput, "address not aligned"); - } - - let offset = start_vaddr.into() - start_paddr.as_usize(); - let area = MemoryArea::new(start_vaddr, size, flags, Backend::new_linear(offset)); - self.areas - .map(area, &mut self.pt, false) - .map_err(mapping_err_to_ax_err)?; - Ok(()) - } - - /// Add a new allocation mapping. - /// - /// See [`Backend`] for more details about the mapping backends. - /// - /// The `flags` parameter indicates the mapping permissions and attributes. - pub fn map_alloc( - &mut self, - start: M::VirtAddr, - size: usize, - flags: MappingFlags, - populate: bool, - ) -> AxResult { - if !self.contains_range(start, size) { - return ax_err!( - InvalidInput, - alloc::format!("address [{:?}~{:?}] out of range", start, start.into() + size).as_str() - ); - } - if !start.is_aligned_4k() || !is_aligned_4k(size) { - return ax_err!(InvalidInput, "address not aligned"); - } - - let area = MemoryArea::new(start, size, flags, Backend::new_alloc(populate)); - self.areas - .map(area, &mut self.pt, false) - .map_err(mapping_err_to_ax_err)?; - Ok(()) - } - - /// Removes mappings within the specified virtual address range. - pub fn unmap(&mut self, start: M::VirtAddr, size: usize) -> AxResult { - if !self.contains_range(start, size) { - return ax_err!(InvalidInput, "address out of range"); - } - if !start.is_aligned_4k() || !is_aligned_4k(size) { - return ax_err!(InvalidInput, "address not aligned"); - } - - self.areas - .unmap(start, size, &mut self.pt) - .map_err(mapping_err_to_ax_err)?; - Ok(()) - } - - /// Removes all mappings in the address space. - pub fn clear(&mut self) { - self.areas.clear(&mut self.pt).unwrap(); - } - - /// Handles a page fault at the given address. - /// - /// `access_flags` indicates the access type that caused the page fault. - /// - /// Returns `true` if the page fault is handled successfully (not a real - /// fault). - pub fn handle_page_fault(&mut self, vaddr: M::VirtAddr, access_flags: MappingFlags) -> bool { - if !self.va_range.contains(vaddr) { - return false; - } - if let Some(area) = self.areas.find(vaddr) { - let orig_flags = area.flags(); - if !orig_flags.contains(access_flags) { - return false; - } - area.backend() - .handle_page_fault(vaddr, orig_flags, &mut self.pt) - } else { - false - } - } - - /// Translates the given `VirtAddr` into `PhysAddr`. - /// - /// Returns `None` if the virtual address is out of range or not mapped. - pub fn translate(&self, vaddr: M::VirtAddr) -> Option { - if !self.va_range.contains(vaddr) { - return None; - } - self.pt - .query(vaddr) - .map(|(phys_addr, _, _)| { - debug!("vaddr {:?} translate to {:?}", vaddr, phys_addr); - phys_addr - }) - .ok() - } - - /// Translate&Copy the given `VirtAddr` with LENGTH len to a mutable u8 Vec through page table. - /// - /// Returns `None` if the virtual address is out of range or not mapped. - pub fn translated_byte_buffer( - &self, - vaddr: M::VirtAddr, - len: usize, - ) -> Option> { - if !self.va_range.contains(vaddr) { - return None; - } - if let Some(area) = self.areas.find(vaddr) { - if len > area.size() { - warn!( - "AddrSpace translated_byte_buffer len {:#x} exceeds area length {:#x}", - len, - area.size() - ); - return None; - } - - let mut start: M::VirtAddr = vaddr; - let end: M::VirtAddr = (start.into() + len).into(); - - debug!( - "start {:?} end {:?} area size {:#x}", - start, - end, - area.size() - ); - - let mut v = Vec::new(); - while start < end { - let (start_paddr, _, page_size) = self.page_table().query(start).unwrap(); - let mut end_va: M::VirtAddr = (start.align_down(page_size).into() + page_size as usize).into(); - end_va = end_va.min(end); - - v.push(unsafe { - core::slice::from_raw_parts_mut( - H::phys_to_virt(start_paddr).as_mut_ptr(), - (end_va.into() - start.into()).into(), - ) - }); - start = end_va; - } - Some(v) - } else { - None - } - } - - /// Translates the given `VirtAddr` into `PhysAddr`, - /// and returns the size of the `MemoryArea` corresponding to the target vaddr. - /// - /// Returns `None` if the virtual address is out of range or not mapped. - pub fn translate_and_get_limit(&self, vaddr: M::VirtAddr) -> Option<(PhysAddr, usize)> { - if !self.va_range.contains(vaddr) { - return None; - } - if let Some(area) = self.areas.find(vaddr) { - self.pt - .query(vaddr) - .map(|(phys_addr, _, _)| (phys_addr, area.size())) - .ok() - } else { - None - } - } -} - -impl fmt::Debug for AddrSpace { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("AddrSpace") - .field("va_range", &self.va_range) - .field("page_table_root", &self.pt.root_paddr()) - .field("areas", &self.areas) - .finish() - } -} - -impl Drop for AddrSpace { - fn drop(&mut self) { - self.clear(); - } -} diff --git a/src/lib.rs b/src/lib.rs index 8c67a39..8c778d0 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -7,21 +7,17 @@ extern crate log; extern crate alloc; -mod address_space; pub mod device; mod frame; mod hal; pub use npt_multiarch::*; -pub use address_space::*; +pub use aspace_generic::*; pub use frame::PhysFrame; pub use hal::AxMmHal; -use axerrno::AxError; -use memory_set::MappingError; - -pub type AddrSpace = address_space::AddrSpace; +pub type AddrSpace = aspace_generic::AddrSpace; /// Information about nested page faults. #[derive(Debug)] @@ -31,12 +27,3 @@ pub struct NestedPageFaultInfo { /// Guest physical address that caused the nested page fault. pub fault_guest_paddr: GuestPhysAddr, } - -fn mapping_err_to_ax_err(err: MappingError) -> AxError { - warn!("Mapping error: {:?}", err); - match err { - MappingError::InvalidParam => AxError::InvalidInput, - MappingError::AlreadyExists => AxError::AlreadyExists, - MappingError::BadState => AxError::BadState, - } -}