diff --git a/mmtk/src/api.rs b/mmtk/src/api.rs
index c0e8317d..58949a56 100644
--- a/mmtk/src/api.rs
+++ b/mmtk/src/api.rs
@@ -343,7 +343,7 @@ pub extern "C" fn mmtk_object_reference_write_pre(
) {
mutator
.barrier()
- .object_reference_write_pre(src, slot, target);
+ .object_reference_write_pre(src, slot.into(), target);
}
/// Full post barrier
@@ -356,7 +356,7 @@ pub extern "C" fn mmtk_object_reference_write_post(
) {
mutator
.barrier()
- .object_reference_write_post(src, slot, target);
+ .object_reference_write_post(src, slot.into(), target);
}
/// Barrier slow-path call
@@ -369,7 +369,7 @@ pub extern "C" fn mmtk_object_reference_write_slow(
) {
mutator
.barrier()
- .object_reference_write_slow(src, slot, target);
+ .object_reference_write_slow(src, slot.into(), target);
}
/// Array-copy pre-barrier
@@ -383,7 +383,7 @@ pub extern "C" fn mmtk_array_copy_pre(
let bytes = count << LOG_BYTES_IN_ADDRESS;
mutator
.barrier()
- .memory_region_copy_pre(src..src + bytes, dst..dst + bytes);
+ .memory_region_copy_pre((src..src + bytes).into(), (dst..dst + bytes).into());
}
/// Array-copy post-barrier
@@ -397,7 +397,7 @@ pub extern "C" fn mmtk_array_copy_post(
let bytes = count << LOG_BYTES_IN_ADDRESS;
mutator
.barrier()
- .memory_region_copy_post(src..src + bytes, dst..dst + bytes);
+ .memory_region_copy_post((src..src + bytes).into(), (dst..dst + bytes).into());
}
/// C2 Slowpath allocation barrier
diff --git a/mmtk/src/edges.rs b/mmtk/src/edges.rs
new file mode 100644
index 00000000..44f666d1
--- /dev/null
+++ b/mmtk/src/edges.rs
@@ -0,0 +1,103 @@
+use std::ops::Range;
+
+use mmtk::{
+ util::{Address, ObjectReference},
+ vm::edge_shape::{AddressRangeIterator, Edge, MemorySlice},
+};
+
+/// The type of edges in OpenJDK.
+/// Currently it has the same layout as `Address`, but we override its load and store methods.
+#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
+#[repr(transparent)]
+pub struct OpenJDKEdge {
+ pub addr: Address,
+}
+
+impl From
for OpenJDKEdge {
+ fn from(value: Address) -> Self {
+ Self { addr: value }
+ }
+}
+
+impl Edge for OpenJDKEdge {
+ fn load(&self) -> ObjectReference {
+ if cfg!(any(target_arch = "x86", target_arch = "x86_64")) {
+ // Workaround: On x86 (including x86_64), machine instructions may contain pointers as
+ // immediates, and they may be unaligned. It is an undefined behavior in Rust to
+ // dereference unaligned pointers. We have to explicitly use unaligned memory access
+ // methods. On x86, ordinary MOV instructions can load and store memory at unaligned
+ // addresses, so we expect `ptr.read_unaligned()` to have no performance penalty over
+ // `ptr.read()` if `ptr` is actually aligned.
+ unsafe {
+ let ptr = self.addr.to_ptr::();
+ ptr.read_unaligned()
+ }
+ } else {
+ unsafe { self.addr.load() }
+ }
+ }
+
+ fn store(&self, object: ObjectReference) {
+ if cfg!(any(target_arch = "x86", target_arch = "x86_64")) {
+ unsafe {
+ let ptr = self.addr.to_mut_ptr::();
+ ptr.write_unaligned(object)
+ }
+ } else {
+ unsafe { self.addr.store(object) }
+ }
+ }
+}
+
+/// A range of OpenJDKEdge, usually used for arrays.
+#[derive(Clone, PartialEq, Eq, Hash, Debug)]
+pub struct OpenJDKEdgeRange {
+ range: Range,
+}
+
+impl From> for OpenJDKEdgeRange {
+ fn from(value: Range) -> Self {
+ Self { range: value }
+ }
+}
+
+pub struct OpenJDKEdgeRangeIterator {
+ inner: AddressRangeIterator,
+}
+
+impl Iterator for OpenJDKEdgeRangeIterator {
+ type Item = OpenJDKEdge;
+
+ fn next(&mut self) -> Option {
+ self.inner.next().map(|a| a.into())
+ }
+}
+
+// Note that we cannot implement MemorySlice for `Range` because neither
+// `MemorySlice` nor `Range` are defined in the `mmtk-openjdk` crate. ("orphan rule")
+impl MemorySlice for OpenJDKEdgeRange {
+ type Edge = OpenJDKEdge;
+ type EdgeIterator = OpenJDKEdgeRangeIterator;
+
+ fn iter_edges(&self) -> Self::EdgeIterator {
+ OpenJDKEdgeRangeIterator {
+ inner: self.range.iter_edges(),
+ }
+ }
+
+ fn object(&self) -> Option {
+ self.range.object()
+ }
+
+ fn start(&self) -> Address {
+ self.range.start()
+ }
+
+ fn bytes(&self) -> usize {
+ self.range.bytes()
+ }
+
+ fn copy(src: &Self, tgt: &Self) {
+ MemorySlice::copy(&src.range, &tgt.range)
+ }
+}
diff --git a/mmtk/src/gc_work.rs b/mmtk/src/gc_work.rs
index 4273f9f7..b9496410 100644
--- a/mmtk/src/gc_work.rs
+++ b/mmtk/src/gc_work.rs
@@ -59,7 +59,7 @@ impl> GCWork for ScanCodeCacheRoots
let mut edges = Vec::with_capacity(crate::CODE_CACHE_ROOTS_SIZE.load(Ordering::Relaxed));
for roots in (*crate::CODE_CACHE_ROOTS.lock().unwrap()).values() {
for r in roots {
- edges.push(*r)
+ edges.push((*r).into())
}
}
// Create work packet
diff --git a/mmtk/src/lib.rs b/mmtk/src/lib.rs
index 8c7bd9d9..7a4d9e2c 100644
--- a/mmtk/src/lib.rs
+++ b/mmtk/src/lib.rs
@@ -2,11 +2,11 @@
extern crate lazy_static;
use std::collections::HashMap;
-use std::ops::Range;
use std::ptr::null_mut;
use std::sync::atomic::AtomicUsize;
use std::sync::Mutex;
+use edges::{OpenJDKEdge, OpenJDKEdgeRange};
use libc::{c_char, c_void, uintptr_t};
use mmtk::util::alloc::AllocationError;
use mmtk::util::opaque_pointer::*;
@@ -19,6 +19,7 @@ pub mod active_plan;
pub mod api;
mod build_info;
pub mod collection;
+mod edges;
mod gc_work;
pub mod object_model;
mod object_scanning;
@@ -135,13 +136,6 @@ pub static FREE_LIST_ALLOCATOR_SIZE: uintptr_t =
#[derive(Default)]
pub struct OpenJDK;
-/// The type of edges in OpenJDK.
-///
-/// TODO: We currently make it an alias to Address to make the change minimal.
-/// If we support CompressedOOPs, we should define an enum type to support both
-/// compressed and uncompressed OOPs.
-pub type OpenJDKEdge = Address;
-
impl VMBinding for OpenJDK {
type VMObjectModel = object_model::VMObjectModel;
type VMScanning = scanning::VMScanning;
@@ -150,7 +144,7 @@ impl VMBinding for OpenJDK {
type VMReferenceGlue = reference_glue::VMReferenceGlue;
type VMEdge = OpenJDKEdge;
- type VMMemorySlice = Range;
+ type VMMemorySlice = OpenJDKEdgeRange;
const MIN_ALIGNMENT: usize = 8;
const MAX_ALIGNMENT: usize = 8;
diff --git a/mmtk/src/object_scanning.rs b/mmtk/src/object_scanning.rs
index 6ca17e7f..a738a934 100644
--- a/mmtk/src/object_scanning.rs
+++ b/mmtk/src/object_scanning.rs
@@ -16,7 +16,7 @@ impl OopIterate for OopMapBlock {
fn oop_iterate(&self, oop: Oop, closure: &mut impl EdgeVisitor) {
let start = oop.get_field_address(self.offset);
for i in 0..self.count as usize {
- let edge = start + (i << LOG_BYTES_IN_ADDRESS);
+ let edge = (start + (i << LOG_BYTES_IN_ADDRESS)).into();
closure.visit_edge(edge);
}
}
@@ -66,7 +66,7 @@ impl OopIterate for InstanceMirrorKlass {
let len = Self::static_oop_field_count(oop);
let slice = unsafe { slice::from_raw_parts(start, len as _) };
for oop in slice {
- closure.visit_edge(Address::from_ref(oop as &Oop));
+ closure.visit_edge(Address::from_ref(oop as &Oop).into());
}
}
}
@@ -88,7 +88,7 @@ impl OopIterate for ObjArrayKlass {
fn oop_iterate(&self, oop: Oop, closure: &mut impl EdgeVisitor) {
let array = unsafe { oop.as_array_oop() };
for oop in unsafe { array.data::(BasicType::T_OBJECT) } {
- closure.visit_edge(Address::from_ref(oop as &Oop));
+ closure.visit_edge(Address::from_ref(oop as &Oop).into());
}
}
}
@@ -133,9 +133,9 @@ impl InstanceRefKlass {
}
fn process_ref_as_strong(oop: Oop, closure: &mut impl EdgeVisitor) {
let referent_addr = Self::referent_address(oop);
- closure.visit_edge(referent_addr);
+ closure.visit_edge(referent_addr.into());
let discovered_addr = Self::discovered_address(oop);
- closure.visit_edge(discovered_addr);
+ closure.visit_edge(discovered_addr.into());
}
}
diff --git a/mmtk/src/scanning.rs b/mmtk/src/scanning.rs
index 9d6955d1..d92d5dcd 100644
--- a/mmtk/src/scanning.rs
+++ b/mmtk/src/scanning.rs
@@ -20,7 +20,9 @@ extern "C" fn report_edges_and_renew_buffer>(
factory_ptr: *mut libc::c_void,
) -> NewBuffer {
if !ptr.is_null() {
- let buf = unsafe { Vec::::from_raw_parts(ptr, length, capacity) };
+ // Note: Currently OpenJDKEdge has the same layout as Address. If the layout changes, we
+ // should fix the Rust-to-C interface.
+ let buf = unsafe { Vec::::from_raw_parts(ptr as _, length, capacity) };
let factory: &mut F = unsafe { &mut *(factory_ptr as *mut F) };
factory.create_process_edge_roots_work(buf);
}