Skip to content

Commit 42262c2

Browse files
authored
Bump version to v0.10. Update Rust to nightly-2022-02-11 (#547)
1 parent 0c408d3 commit 42262c2

File tree

15 files changed

+75
-35
lines changed

15 files changed

+75
-35
lines changed

CHANGELOG.md

Lines changed: 45 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,48 @@
1+
0.10.0 (2022-02-14)
2+
===
3+
4+
GC Plans
5+
---
6+
* Removed plan-specific copy contexts. Now each plan needs to provide a configuration for
7+
`GCWorkerCopyContext` (similar to how they config `Mutator`).
8+
* Fixed a bug that `needs_log_bit` was always set to `true` for generational plans, no matter
9+
their barrier used the log bit or not.
10+
* Fixed a bug that we may overflow when calculating `get_available_pages()`.
11+
12+
Policies
13+
---
14+
* Refactored copy context. Now a copying policy provides its copy context.
15+
* Mark sweep and mark compact now uses `ObjectIterator` for linear scan.
16+
17+
Scheduler
18+
---
19+
* Introduced `GCController`, a counterpart of `GCWorker`, for the controller thread.
20+
* Refactored `GCWorker`. Now `GCWorker` is seperated into two parts, a thread local part `GCWorker`
21+
which is owned by GC threads, and a shared part `GCWorkerShared` that is shared between GC threads
22+
and the scheduler.
23+
* Refactored the creation of the scheduler and the workers to remove some unnecessary `Option<T>` and `RwLock<T>`.
24+
25+
API
26+
---
27+
* Added `process_bulk()` that allows bindings to pass options as a string of key-value pairs.
28+
* `ObjectModel::copy()` now takes `CopySemantics` as a parameter.
29+
* Renamed `Collection::spawn_worker_thread()` to `spawn_gc_thread()`, which is now used to spawn both GC worker and
30+
GC controller.
31+
* `Collection::out_of_memory()` now takes `AllocationError` as a parameter which hints the binding
32+
on how to handle the OOM error.
33+
* `Collection::out_of_memory()` now allows a binding to return from the method in the case of a non-critical OOM.
34+
If a binding returns, `alloc()` will return a zero address.
35+
36+
Misc
37+
---
38+
* Added `ObjectIterator` that provides linear scanning through a region to iterate
39+
objects using the alloc bit.
40+
* Added a feature `work_packet_stats` to optionally collect work packet statistics. Note that
41+
MMTk used to always collect work packet statistics.
42+
* Optimized the access to the SFT map.
43+
* Fixed a few issues with documentation.
44+
* The example header file `mmtk.h` now uses the prefix `mmtk_` for all the functions.
45+
146
0.9.0 (2021-12-16)
247
===
348

Cargo.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
[package]
22
name = "mmtk"
3-
version = "0.9.0"
3+
version = "0.10.0"
44
authors = ["The MMTk Developers <>"]
55
edition = "2018"
66
license = "MIT OR Apache-2.0"

rust-toolchain

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
nightly-2021-12-05
1+
nightly-2022-02-11

src/lib.rs

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,9 @@
11
#![allow(incomplete_features)]
2-
#![feature(asm)]
32
#![feature(integer_atomics)]
43
#![feature(is_sorted)]
54
#![feature(drain_filter)]
65
#![feature(nll)]
76
#![feature(box_syntax)]
8-
#![feature(maybe_uninit_extra)]
9-
#![feature(get_mut_unchecked)]
107
#![feature(arbitrary_self_types)]
118
#![feature(associated_type_defaults)]
129
#![feature(specialization)]

src/policy/copyspace.rs

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@ impl<VM: VMBinding> SFT for CopySpace<VM> {
3434
}
3535

3636
fn is_live(&self, object: ObjectReference) -> bool {
37-
!self.from_space() || object_forwarding::is_forwarded::<VM>(object)
37+
!self.is_from_space() || object_forwarding::is_forwarded::<VM>(object)
3838
}
3939

4040
fn is_movable(&self) -> bool {
@@ -43,7 +43,7 @@ impl<VM: VMBinding> SFT for CopySpace<VM> {
4343

4444
#[cfg(feature = "sanity")]
4545
fn is_sane(&self) -> bool {
46-
!self.from_space()
46+
!self.is_from_space()
4747
}
4848

4949
fn initialize_object_metadata(&self, _object: ObjectReference, _alloc: bool) {
@@ -53,7 +53,7 @@ impl<VM: VMBinding> SFT for CopySpace<VM> {
5353

5454
#[inline(always)]
5555
fn get_forwarded_object(&self, object: ObjectReference) -> Option<ObjectReference> {
56-
if !self.from_space() {
56+
if !self.is_from_space() {
5757
return None;
5858
}
5959

@@ -179,7 +179,7 @@ impl<VM: VMBinding> CopySpace<VM> {
179179
}
180180
}
181181

182-
fn from_space(&self) -> bool {
182+
fn is_from_space(&self) -> bool {
183183
self.from_space.load(Ordering::SeqCst)
184184
}
185185

@@ -193,7 +193,7 @@ impl<VM: VMBinding> CopySpace<VM> {
193193
) -> ObjectReference {
194194
trace!("copyspace.trace_object(, {:?}, {:?})", object, semantics,);
195195
debug_assert!(
196-
self.from_space(),
196+
self.is_from_space(),
197197
"Trace object called for object ({:?}) in to-space",
198198
object
199199
);

src/policy/largeobjectspace.rs

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -153,7 +153,7 @@ impl<VM: VMBinding> LargeObjectSpace<VM> {
153153

154154
pub fn prepare(&mut self, full_heap: bool) {
155155
if full_heap {
156-
debug_assert!(self.treadmill.from_space_empty());
156+
debug_assert!(self.treadmill.is_from_space_empty());
157157
self.mark_state = MARK_BIT - self.mark_state;
158158
}
159159
self.treadmill.flip(full_heap);
@@ -162,7 +162,7 @@ impl<VM: VMBinding> LargeObjectSpace<VM> {
162162

163163
pub fn release(&mut self, full_heap: bool) {
164164
self.sweep_large_pages(true);
165-
debug_assert!(self.treadmill.nursery_empty());
165+
debug_assert!(self.treadmill.is_nursery_empty());
166166
if full_heap {
167167
self.sweep_large_pages(false);
168168
}

src/policy/space.rs

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -578,14 +578,13 @@ impl<VM: VMBinding> CommonSpace<VM> {
578578
extent
579579
);
580580

581-
let start: Address;
582-
if let VMRequest::Fixed { start: _start, .. } = vmrequest {
583-
start = _start;
581+
let start = if let VMRequest::Fixed { start: _start, .. } = vmrequest {
582+
_start
584583
} else {
585584
// FIXME
586585
//if (HeapLayout.vmMap.isFinalized()) VM.assertions.fail("heap is narrowed after regionMap is finalized: " + name);
587-
start = heap.reserve(extent, top);
588-
}
586+
heap.reserve(extent, top)
587+
};
589588
assert!(
590589
start == chunk_align_up(start),
591590
"{} starting on non-aligned boundary: {}",

src/scheduler/stat.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@ impl SchedulerStat {
3434
/// Extract the work-packet name from the full type name.
3535
/// i.e. simplifies `crate::scheduler::gc_work::SomeWorkPacket<Semispace>` to `SomeWorkPacket`.
3636
fn work_name(&self, name: &str) -> String {
37-
let end_index = name.find('<').unwrap_or_else(|| name.len());
37+
let end_index = name.find('<').unwrap_or(name.len());
3838
let name = name[..end_index].to_owned();
3939
match name.rfind(':') {
4040
Some(start_index) => name[(start_index + 1)..end_index].to_owned(),

src/util/analysis/obj_size.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -58,7 +58,7 @@ impl<VM: VMBinding> RtAnalysis<VM> for PerSizeClassObjectCounter {
5858
match c {
5959
None => {
6060
// Create (and increment) the counter associated with the size class if it doesn't exist
61-
let ctr = new_ctr!(stats, size_classes, &size_class);
61+
let ctr = new_ctr!(stats, size_classes, size_class);
6262
ctr.lock().unwrap().inc();
6363
}
6464
Some(ctr) => {

src/util/heap/freelistpageresource.rs

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -271,11 +271,10 @@ impl<VM: VMBinding> FreeListPageResource<VM> {
271271
self.free_list.set_uncoalescable(region_start as _);
272272
self.free_list.set_uncoalescable(region_end as i32 + 1);
273273
for p in (region_start..region_end).step_by(PAGES_IN_CHUNK) {
274-
let liberated;
275274
if p != region_start {
276275
self.free_list.clear_uncoalescable(p as _);
277276
}
278-
liberated = self.free_list.free(p as _, true); // add chunk to our free list
277+
let liberated = self.free_list.free(p as _, true); // add chunk to our free list
279278
debug_assert!(liberated as usize == PAGES_IN_CHUNK + (p - region_start));
280279
if self.meta_data_pages_per_region > 1 {
281280
let meta_data_pages_per_region = self.meta_data_pages_per_region;

0 commit comments

Comments
 (0)