Skip to content

Commit c677418

Browse files
committed
Move edge types into a dedicated mod
1 parent f95a693 commit c677418

File tree

2 files changed

+105
-99
lines changed

2 files changed

+105
-99
lines changed

mmtk/src/edges.rs

Lines changed: 103 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,103 @@
1+
use std::ops::Range;
2+
3+
use mmtk::{
4+
util::{Address, ObjectReference},
5+
vm::edge_shape::{AddressRangeIterator, Edge, MemorySlice},
6+
};
7+
8+
/// The type of edges in OpenJDK.
9+
/// Currently it has the same layout as `Address`, but we override its load and store methods.
10+
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
11+
#[repr(transparent)]
12+
pub struct OpenJDKEdge {
13+
pub addr: Address,
14+
}
15+
16+
impl From<Address> for OpenJDKEdge {
17+
fn from(value: Address) -> Self {
18+
Self { addr: value }
19+
}
20+
}
21+
22+
impl Edge for OpenJDKEdge {
23+
fn load(&self) -> ObjectReference {
24+
if cfg!(any(target_arch = "x86", target_arch = "x86_64")) {
25+
// Workaround: On x86 (including x86_64), machine instructions may contain pointers as
26+
// immediates, and they may be unaligned. It is an undefined behavior in Rust to
27+
// dereference unaligned pointers. We have to explicitly use unaligned memory access
28+
// methods. On x86, ordinary MOV instructions can load and store memory at unaligned
29+
// addresses, so we expect `ptr.read_unaligned()` to have no performance penalty over
30+
// `ptr.read()` if `ptr` is actually aligned.
31+
unsafe {
32+
let ptr = self.addr.to_ptr::<ObjectReference>();
33+
ptr.read_unaligned()
34+
}
35+
} else {
36+
unsafe { self.addr.load() }
37+
}
38+
}
39+
40+
fn store(&self, object: ObjectReference) {
41+
if cfg!(any(target_arch = "x86", target_arch = "x86_64")) {
42+
unsafe {
43+
let ptr = self.addr.to_mut_ptr::<ObjectReference>();
44+
ptr.write_unaligned(object)
45+
}
46+
} else {
47+
unsafe { self.addr.store(object) }
48+
}
49+
}
50+
}
51+
52+
/// A range of OpenJDKEdge, usually used for arrays.
53+
#[derive(Clone, PartialEq, Eq, Hash, Debug)]
54+
pub struct OpenJDKEdgeRange {
55+
range: Range<Address>,
56+
}
57+
58+
impl From<Range<Address>> for OpenJDKEdgeRange {
59+
fn from(value: Range<Address>) -> Self {
60+
Self { range: value }
61+
}
62+
}
63+
64+
pub struct OpenJDKEdgeRangeIterator {
65+
inner: AddressRangeIterator,
66+
}
67+
68+
impl Iterator for OpenJDKEdgeRangeIterator {
69+
type Item = OpenJDKEdge;
70+
71+
fn next(&mut self) -> Option<Self::Item> {
72+
self.inner.next().map(|a| a.into())
73+
}
74+
}
75+
76+
// Note that we cannot implement MemorySlice for `Range<OpenJDKEdgeRange>` because neither
77+
// `MemorySlice` nor `Range<T>` are defined in the `mmtk-openjdk` crate. ("orphan rule")
78+
impl MemorySlice for OpenJDKEdgeRange {
79+
type Edge = OpenJDKEdge;
80+
type EdgeIterator = OpenJDKEdgeRangeIterator;
81+
82+
fn iter_edges(&self) -> Self::EdgeIterator {
83+
OpenJDKEdgeRangeIterator {
84+
inner: self.range.iter_edges(),
85+
}
86+
}
87+
88+
fn object(&self) -> Option<ObjectReference> {
89+
self.range.object()
90+
}
91+
92+
fn start(&self) -> Address {
93+
self.range.start()
94+
}
95+
96+
fn bytes(&self) -> usize {
97+
self.range.bytes()
98+
}
99+
100+
fn copy(src: &Self, tgt: &Self) {
101+
MemorySlice::copy(&src.range, &tgt.range)
102+
}
103+
}

mmtk/src/lib.rs

Lines changed: 2 additions & 99 deletions
Original file line numberDiff line numberDiff line change
@@ -2,16 +2,15 @@
22
extern crate lazy_static;
33

44
use std::collections::HashMap;
5-
use std::ops::Range;
65
use std::ptr::null_mut;
76
use std::sync::atomic::AtomicUsize;
87
use std::sync::Mutex;
98

9+
use edges::{OpenJDKEdge, OpenJDKEdgeRange};
1010
use libc::{c_char, c_void, uintptr_t};
1111
use mmtk::util::alloc::AllocationError;
1212
use mmtk::util::opaque_pointer::*;
1313
use mmtk::util::{Address, ObjectReference};
14-
use mmtk::vm::edge_shape::{AddressRangeIterator, Edge, MemorySlice};
1514
use mmtk::vm::VMBinding;
1615
use mmtk::{MMTKBuilder, Mutator, MMTK};
1716

@@ -20,6 +19,7 @@ pub mod active_plan;
2019
pub mod api;
2120
mod build_info;
2221
pub mod collection;
22+
mod edges;
2323
mod gc_work;
2424
pub mod object_model;
2525
mod object_scanning;
@@ -136,103 +136,6 @@ pub static FREE_LIST_ALLOCATOR_SIZE: uintptr_t =
136136
#[derive(Default)]
137137
pub struct OpenJDK;
138138

139-
/// The type of edges in OpenJDK.
140-
/// Currently it has the same layout as `Address`, but we override its load and store methods.
141-
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
142-
#[repr(transparent)]
143-
pub struct OpenJDKEdge {
144-
pub addr: Address,
145-
}
146-
147-
impl From<Address> for OpenJDKEdge {
148-
fn from(value: Address) -> Self {
149-
Self { addr: value }
150-
}
151-
}
152-
153-
impl Edge for OpenJDKEdge {
154-
fn load(&self) -> ObjectReference {
155-
if cfg!(any(target_arch = "x86", target_arch = "x86_64")) {
156-
// Workaround: On x86 (including x86_64), machine instructions may contain pointers as
157-
// immediates, and they may be unaligned. It is an undefined behavior in Rust to
158-
// dereference unaligned pointers. We have to explicitly use unaligned memory access
159-
// methods. On x86, ordinary MOV instructions can load and store memory at unaligned
160-
// addresses, so we expect `ptr.read_unaligned()` to have no performance penalty over
161-
// `ptr.read()` if `ptr` is actually aligned.
162-
unsafe {
163-
let ptr = self.addr.to_ptr::<ObjectReference>();
164-
ptr.read_unaligned()
165-
}
166-
} else {
167-
unsafe { self.addr.load() }
168-
}
169-
}
170-
171-
fn store(&self, object: ObjectReference) {
172-
if cfg!(any(target_arch = "x86", target_arch = "x86_64")) {
173-
unsafe {
174-
let ptr = self.addr.to_mut_ptr::<ObjectReference>();
175-
ptr.write_unaligned(object)
176-
}
177-
} else {
178-
unsafe { self.addr.store(object) }
179-
}
180-
}
181-
}
182-
183-
/// A range of OpenJDKEdge, usually used for arrays.
184-
#[derive(Clone, PartialEq, Eq, Hash, Debug)]
185-
pub struct OpenJDKEdgeRange {
186-
range: Range<Address>,
187-
}
188-
189-
impl From<Range<Address>> for OpenJDKEdgeRange {
190-
fn from(value: Range<Address>) -> Self {
191-
Self { range: value }
192-
}
193-
}
194-
195-
pub struct OpenJDKEdgeRangeIterator {
196-
inner: AddressRangeIterator,
197-
}
198-
199-
impl Iterator for OpenJDKEdgeRangeIterator {
200-
type Item = OpenJDKEdge;
201-
202-
fn next(&mut self) -> Option<Self::Item> {
203-
self.inner.next().map(|a| a.into())
204-
}
205-
}
206-
207-
// Note that we cannot implement MemorySlice for `Range<OpenJDKEdgeRange>` because neither
208-
// `MemorySlice` nor `Range<T>` are defined in the `mmtk-openjdk` crate. ("orphan rule")
209-
impl MemorySlice for OpenJDKEdgeRange {
210-
type Edge = OpenJDKEdge;
211-
type EdgeIterator = OpenJDKEdgeRangeIterator;
212-
213-
fn iter_edges(&self) -> Self::EdgeIterator {
214-
OpenJDKEdgeRangeIterator {
215-
inner: self.range.iter_edges(),
216-
}
217-
}
218-
219-
fn object(&self) -> Option<ObjectReference> {
220-
self.range.object()
221-
}
222-
223-
fn start(&self) -> Address {
224-
self.range.start()
225-
}
226-
227-
fn bytes(&self) -> usize {
228-
self.range.bytes()
229-
}
230-
231-
fn copy(src: &Self, tgt: &Self) {
232-
MemorySlice::copy(&src.range, &tgt.range)
233-
}
234-
}
235-
236139
impl VMBinding for OpenJDK {
237140
type VMObjectModel = object_model::VMObjectModel;
238141
type VMScanning = scanning::VMScanning;

0 commit comments

Comments
 (0)