diff --git a/docs/userguide/src/tutorial/code/mygc_semispace/global.rs b/docs/userguide/src/tutorial/code/mygc_semispace/global.rs index bbbf6fe6f5..3c90f4d59d 100644 --- a/docs/userguide/src/tutorial/code/mygc_semispace/global.rs +++ b/docs/userguide/src/tutorial/code/mygc_semispace/global.rs @@ -121,6 +121,12 @@ impl Plan for MyGC { } // ANCHOR_END: release + // ANCHOR: end_of_gc + fn end_of_gc(&mut self, tls: VMWorkerThread) { + self.common.end_of_gc(tls); + } + // ANCHOR_END: end_of_gc + // Modify // ANCHOR: plan_get_collection_reserve fn get_collection_reserved_pages(&self) -> usize { diff --git a/docs/userguide/src/tutorial/code/mygc_semispace/mutator.rs b/docs/userguide/src/tutorial/code/mygc_semispace/mutator.rs index 9cd110392f..5cd416069e 100644 --- a/docs/userguide/src/tutorial/code/mygc_semispace/mutator.rs +++ b/docs/userguide/src/tutorial/code/mygc_semispace/mutator.rs @@ -19,17 +19,17 @@ use enum_map::EnumMap; // Add pub fn mygc_mutator_prepare( - _mutator: &mut Mutator, - _tls: VMWorkerThread, + mutator: &mut Mutator, + tls: VMWorkerThread, ) { - // Do nothing + crate::plan::mutator_context::common_prepare_func::(mutator, tls); } // Add // ANCHOR: release pub fn mygc_mutator_release( mutator: &mut Mutator, - _tls: VMWorkerThread, + tls: VMWorkerThread, ) { // rebind the allocation bump pointer to the appropriate semispace let bump_allocator = unsafe { @@ -46,6 +46,8 @@ pub fn mygc_mutator_release( .unwrap() .tospace(), ); + + crate::plan::mutator_context::common_release_func::(mutator, tls); } // ANCHOR_END: release diff --git a/src/plan/generational/copying/global.rs b/src/plan/generational/copying/global.rs index faa4d73266..8bdd002d23 100644 --- a/src/plan/generational/copying/global.rs +++ b/src/plan/generational/copying/global.rs @@ -114,9 +114,10 @@ impl Plan for GenCopy { } } - fn end_of_gc(&mut self, _tls: VMWorkerThread) { + fn end_of_gc(&mut self, tls: VMWorkerThread) { self.gen .set_next_gc_full_heap(CommonGenPlan::should_next_gc_be_full_heap(self)); + self.gen.common.end_of_gc(tls); } fn get_collection_reserved_pages(&self) -> usize { diff --git a/src/plan/generational/copying/mutator.rs b/src/plan/generational/copying/mutator.rs index 668ccb2cc9..31e1d341f1 100644 --- a/src/plan/generational/copying/mutator.rs +++ b/src/plan/generational/copying/mutator.rs @@ -3,9 +3,9 @@ use super::GenCopy; use crate::plan::barriers::ObjectBarrier; use crate::plan::generational::barrier::GenObjectBarrierSemantics; use crate::plan::generational::create_gen_space_mapping; -use crate::plan::mutator_context::unreachable_prepare_func; use crate::plan::mutator_context::Mutator; use crate::plan::mutator_context::MutatorConfig; +use crate::plan::mutator_context::{common_prepare_func, common_release_func}; use crate::plan::AllocationSemantics; use crate::util::alloc::allocators::Allocators; use crate::util::alloc::BumpAllocator; @@ -13,7 +13,7 @@ use crate::util::{VMMutatorThread, VMWorkerThread}; use crate::vm::VMBinding; use crate::MMTK; -pub fn gencopy_mutator_release(mutator: &mut Mutator, _tls: VMWorkerThread) { +pub fn gencopy_mutator_release(mutator: &mut Mutator, tls: VMWorkerThread) { // reset nursery allocator let bump_allocator = unsafe { mutator @@ -23,6 +23,8 @@ pub fn gencopy_mutator_release(mutator: &mut Mutator, _tls: V .downcast_mut::>() .unwrap(); bump_allocator.reset(); + + common_release_func(mutator, tls); } pub fn create_gencopy_mutator( @@ -36,7 +38,7 @@ pub fn create_gencopy_mutator( mmtk.get_plan(), &gencopy.gen.nursery, )), - prepare_func: &unreachable_prepare_func, + prepare_func: &common_prepare_func, release_func: &gencopy_mutator_release, }; diff --git a/src/plan/generational/immix/global.rs b/src/plan/generational/immix/global.rs index 97b09b15fd..ff1d37dd85 100644 --- a/src/plan/generational/immix/global.rs +++ b/src/plan/generational/immix/global.rs @@ -146,9 +146,10 @@ impl Plan for GenImmix { .store(full_heap, Ordering::Relaxed); } - fn end_of_gc(&mut self, _tls: VMWorkerThread) { + fn end_of_gc(&mut self, tls: VMWorkerThread) { self.gen .set_next_gc_full_heap(CommonGenPlan::should_next_gc_be_full_heap(self)); + self.gen.common.end_of_gc(tls); } fn get_collection_reserved_pages(&self) -> usize { diff --git a/src/plan/generational/immix/mutator.rs b/src/plan/generational/immix/mutator.rs index fdda23cd8b..3e0b062636 100644 --- a/src/plan/generational/immix/mutator.rs +++ b/src/plan/generational/immix/mutator.rs @@ -3,9 +3,9 @@ use crate::plan::barriers::ObjectBarrier; use crate::plan::generational::barrier::GenObjectBarrierSemantics; use crate::plan::generational::create_gen_space_mapping; use crate::plan::generational::immix::GenImmix; -use crate::plan::mutator_context::unreachable_prepare_func; use crate::plan::mutator_context::Mutator; use crate::plan::mutator_context::MutatorConfig; +use crate::plan::mutator_context::{common_prepare_func, common_release_func}; use crate::plan::AllocationSemantics; use crate::util::alloc::allocators::Allocators; use crate::util::alloc::BumpAllocator; @@ -13,7 +13,7 @@ use crate::util::{VMMutatorThread, VMWorkerThread}; use crate::vm::VMBinding; use crate::MMTK; -pub fn genimmix_mutator_release(mutator: &mut Mutator, _tls: VMWorkerThread) { +pub fn genimmix_mutator_release(mutator: &mut Mutator, tls: VMWorkerThread) { // reset nursery allocator let bump_allocator = unsafe { mutator @@ -23,6 +23,8 @@ pub fn genimmix_mutator_release(mutator: &mut Mutator, _tls: .downcast_mut::>() .unwrap(); bump_allocator.reset(); + + common_release_func(mutator, tls); } pub fn create_genimmix_mutator( @@ -36,7 +38,7 @@ pub fn create_genimmix_mutator( mmtk.get_plan(), &genimmix.gen.nursery, )), - prepare_func: &unreachable_prepare_func, + prepare_func: &common_prepare_func, release_func: &genimmix_mutator_release, }; diff --git a/src/plan/global.rs b/src/plan/global.rs index 5b28c4b720..7bd6723fe3 100644 --- a/src/plan/global.rs +++ b/src/plan/global.rs @@ -7,6 +7,7 @@ use crate::plan::tracing::ObjectQueue; use crate::plan::Mutator; use crate::policy::immortalspace::ImmortalSpace; use crate::policy::largeobjectspace::LargeObjectSpace; +use crate::policy::marksweepspace::native_ms::MarkSweepSpace; use crate::policy::space::{PlanCreateSpaceArgs, Space}; #[cfg(feature = "vm_space")] use crate::policy::vmspace::VMSpace; @@ -197,7 +198,7 @@ pub trait Plan: 'static + HasSpaces + Sync + Downcast { /// Inform the plan about the end of a GC. It is guaranteed that there is no further work for this GC. /// This is invoked once per GC by one worker thread. `tls` is the worker thread that executes this method. - fn end_of_gc(&mut self, _tls: VMWorkerThread) {} + fn end_of_gc(&mut self, _tls: VMWorkerThread); /// Notify the plan that an emergency collection will happen. The plan should try to free as much memory as possible. /// The default implementation will force a full heap collection for generational plans. @@ -521,6 +522,10 @@ impl BasePlan { self.vm_space.release(); } + pub fn end_of_gc(&mut self, _tls: VMWorkerThread) { + // Not doing anything special here. + } + pub(crate) fn collection_required(&self, plan: &P, space_full: bool) -> bool { let stress_force_gc = crate::util::heap::gc_trigger::GCTrigger::::should_do_stress_gc_inner( @@ -561,9 +566,8 @@ pub struct CommonPlan { pub immortal: ImmortalSpace, #[space] pub los: LargeObjectSpace, - // TODO: We should use a marksweep space for nonmoving. #[space] - pub nonmoving: ImmortalSpace, + pub nonmoving: MarkSweepSpace, #[parent] pub base: BasePlan, } @@ -580,7 +584,7 @@ impl CommonPlan { args.get_space_args("los", true, VMRequest::discontiguous()), false, ), - nonmoving: ImmortalSpace::new(args.get_space_args( + nonmoving: MarkSweepSpace::new(args.get_space_args( "nonmoving", true, VMRequest::discontiguous(), @@ -631,6 +635,11 @@ impl CommonPlan { self.base.release(tls, full_heap) } + pub fn end_of_gc(&mut self, tls: VMWorkerThread) { + self.nonmoving.end_of_gc(); + self.base.end_of_gc(tls); + } + pub fn get_immortal(&self) -> &ImmortalSpace { &self.immortal } @@ -639,7 +648,7 @@ impl CommonPlan { &self.los } - pub fn get_nonmoving(&self) -> &ImmortalSpace { + pub fn get_nonmoving(&self) -> &MarkSweepSpace { &self.nonmoving } } diff --git a/src/plan/immix/global.rs b/src/plan/immix/global.rs index 0741ba3a99..981b2991bf 100644 --- a/src/plan/immix/global.rs +++ b/src/plan/immix/global.rs @@ -41,7 +41,6 @@ pub const IMMIX_CONSTRAINTS: PlanConstraints = PlanConstraints { moves_objects: crate::policy::immix::DEFRAG, // Max immix object size is half of a block. max_non_los_default_alloc_bytes: crate::policy::immix::MAX_IMMIX_OBJECT_SIZE, - needs_prepare_mutator: false, ..PlanConstraints::default() }; @@ -97,6 +96,10 @@ impl Plan for Immix { .store(self.immix_space.release(true), Ordering::Relaxed); } + fn end_of_gc(&mut self, tls: VMWorkerThread) { + self.common.end_of_gc(tls); + } + fn get_collection_reserved_pages(&self) -> usize { self.immix_space.defrag_headroom_pages() } diff --git a/src/plan/immix/mutator.rs b/src/plan/immix/mutator.rs index 0df443c9cd..9e38447698 100644 --- a/src/plan/immix/mutator.rs +++ b/src/plan/immix/mutator.rs @@ -1,10 +1,10 @@ use super::Immix; use crate::plan::mutator_context::create_allocator_mapping; use crate::plan::mutator_context::create_space_mapping; -use crate::plan::mutator_context::unreachable_prepare_func; use crate::plan::mutator_context::Mutator; use crate::plan::mutator_context::MutatorConfig; use crate::plan::mutator_context::ReservedAllocators; +use crate::plan::mutator_context::{common_prepare_func, common_release_func}; use crate::plan::AllocationSemantics; use crate::util::alloc::allocators::{AllocatorSelector, Allocators}; use crate::util::alloc::ImmixAllocator; @@ -16,7 +16,7 @@ use crate::{ }; use enum_map::EnumMap; -pub fn immix_mutator_release(mutator: &mut Mutator, _tls: VMWorkerThread) { +pub fn immix_mutator_release(mutator: &mut Mutator, tls: VMWorkerThread) { let immix_allocator = unsafe { mutator .allocators @@ -25,6 +25,8 @@ pub fn immix_mutator_release(mutator: &mut Mutator, _tls: VMW .downcast_mut::>() .unwrap(); immix_allocator.reset(); + + common_release_func(mutator, tls) } pub(in crate::plan) const RESERVED_ALLOCATORS: ReservedAllocators = ReservedAllocators { @@ -52,7 +54,7 @@ pub fn create_immix_mutator( vec.push((AllocatorSelector::Immix(0), &immix.immix_space)); vec }), - prepare_func: &unreachable_prepare_func, + prepare_func: &common_prepare_func, release_func: &immix_mutator_release, }; diff --git a/src/plan/markcompact/global.rs b/src/plan/markcompact/global.rs index e9a2ec037d..bf55c3da7b 100644 --- a/src/plan/markcompact/global.rs +++ b/src/plan/markcompact/global.rs @@ -42,7 +42,6 @@ pub const MARKCOMPACT_CONSTRAINTS: PlanConstraints = PlanConstraints { needs_forward_after_liveness: true, max_non_los_default_alloc_bytes: crate::plan::plan_constraints::MAX_NON_LOS_ALLOC_BYTES_COPYING_PLAN, - needs_prepare_mutator: false, ..PlanConstraints::default() }; @@ -73,6 +72,10 @@ impl Plan for MarkCompact { self.mc_space.release(); } + fn end_of_gc(&mut self, tls: VMWorkerThread) { + self.common.end_of_gc(tls); + } + fn get_allocator_mapping(&self) -> &'static EnumMap { &ALLOCATOR_MAPPING } diff --git a/src/plan/markcompact/mutator.rs b/src/plan/markcompact/mutator.rs index fc42d7ea7e..f62a5cc710 100644 --- a/src/plan/markcompact/mutator.rs +++ b/src/plan/markcompact/mutator.rs @@ -2,10 +2,10 @@ use super::MarkCompact; use crate::plan::barriers::NoBarrier; use crate::plan::mutator_context::create_allocator_mapping; use crate::plan::mutator_context::create_space_mapping; -use crate::plan::mutator_context::unreachable_prepare_func; use crate::plan::mutator_context::Mutator; use crate::plan::mutator_context::MutatorConfig; use crate::plan::mutator_context::ReservedAllocators; +use crate::plan::mutator_context::{common_prepare_func, common_release_func}; use crate::plan::AllocationSemantics; use crate::util::alloc::allocators::{AllocatorSelector, Allocators}; use crate::util::alloc::MarkCompactAllocator; @@ -39,7 +39,7 @@ pub fn create_markcompact_mutator( vec.push((AllocatorSelector::MarkCompact(0), markcompact.mc_space())); vec }), - prepare_func: &unreachable_prepare_func, + prepare_func: &common_prepare_func, release_func: &markcompact_mutator_release, }; @@ -52,17 +52,16 @@ pub fn create_markcompact_mutator( } } -pub fn markcompact_mutator_release( - _mutator: &mut Mutator, - _tls: VMWorkerThread, -) { +pub fn markcompact_mutator_release(mutator: &mut Mutator, tls: VMWorkerThread) { // reset the thread-local allocation bump pointer let markcompact_allocator = unsafe { - _mutator + mutator .allocators - .get_allocator_mut(_mutator.config.allocator_mapping[AllocationSemantics::Default]) + .get_allocator_mut(mutator.config.allocator_mapping[AllocationSemantics::Default]) } .downcast_mut::>() .unwrap(); markcompact_allocator.reset(); + + common_release_func(mutator, tls); } diff --git a/src/plan/marksweep/global.rs b/src/plan/marksweep/global.rs index 8715e34baf..1916e8b5ac 100644 --- a/src/plan/marksweep/global.rs +++ b/src/plan/marksweep/global.rs @@ -65,8 +65,9 @@ impl Plan for MarkSweep { self.common.release(tls, true); } - fn end_of_gc(&mut self, _tls: VMWorkerThread) { + fn end_of_gc(&mut self, tls: VMWorkerThread) { self.ms.end_of_gc(); + self.common.end_of_gc(tls); } fn collection_required(&self, space_full: bool, _space: Option>) -> bool { diff --git a/src/plan/marksweep/mutator.rs b/src/plan/marksweep/mutator.rs index d93431b0fa..4f31b1c4c9 100644 --- a/src/plan/marksweep/mutator.rs +++ b/src/plan/marksweep/mutator.rs @@ -5,6 +5,7 @@ use crate::plan::mutator_context::Mutator; use crate::plan::mutator_context::MutatorConfig; use crate::plan::mutator_context::ReservedAllocators; use crate::plan::mutator_context::SpaceMapping; +use crate::plan::mutator_context::{common_prepare_func, common_release_func}; use crate::plan::AllocationSemantics; use crate::plan::Plan; use crate::util::alloc::allocators::{AllocatorSelector, Allocators}; @@ -20,8 +21,12 @@ mod malloc_mark_sweep { // Do nothing for malloc mark sweep (malloc allocator) - pub fn ms_mutator_prepare(_mutator: &mut Mutator, _tls: VMWorkerThread) {} - pub fn ms_mutator_release(_mutator: &mut Mutator, _tls: VMWorkerThread) {} + pub fn ms_mutator_prepare(mutator: &mut Mutator, tls: VMWorkerThread) { + common_prepare_func(mutator, tls); + } + pub fn ms_mutator_release(mutator: &mut Mutator, tls: VMWorkerThread) { + common_release_func(mutator, tls); + } // malloc mark sweep uses 1 malloc allocator @@ -69,13 +74,17 @@ mod native_mark_sweep { // We forward calls to the allocator prepare and release #[cfg(not(feature = "malloc_mark_sweep"))] - pub fn ms_mutator_prepare(mutator: &mut Mutator, _tls: VMWorkerThread) { + pub fn ms_mutator_prepare(mutator: &mut Mutator, tls: VMWorkerThread) { get_freelist_allocator_mut::(mutator).prepare(); + + common_prepare_func(mutator, tls); } #[cfg(not(feature = "malloc_mark_sweep"))] - pub fn ms_mutator_release(mutator: &mut Mutator, _tls: VMWorkerThread) { + pub fn ms_mutator_release(mutator: &mut Mutator, tls: VMWorkerThread) { get_freelist_allocator_mut::(mutator).release(); + + common_release_func(mutator, tls); } // native mark sweep uses 1 free list allocator diff --git a/src/plan/mutator_context.rs b/src/plan/mutator_context.rs index 5b2ce60703..95de8ea30b 100644 --- a/src/plan/mutator_context.rs +++ b/src/plan/mutator_context.rs @@ -5,7 +5,7 @@ use crate::plan::global::Plan; use crate::plan::AllocationSemantics; use crate::policy::space::Space; use crate::util::alloc::allocators::{AllocatorSelector, Allocators}; -use crate::util::alloc::Allocator; +use crate::util::alloc::{Allocator, FreeListAllocator}; use crate::util::{Address, ObjectReference}; use crate::util::{VMMutatorThread, VMWorkerThread}; use crate::vm::VMBinding; @@ -24,6 +24,19 @@ pub(crate) fn unreachable_prepare_func( unreachable!("`MutatorConfig::prepare_func` must not be called for the current plan.") } +/// An mutator prepare implementation for plans that use [`crate::plan::global::CommonPlan`]. +pub(crate) fn common_prepare_func(mutator: &mut Mutator, _tls: VMWorkerThread) { + // Prepare the free list allocator used for non moving + unsafe { + mutator + .allocators + .get_allocator_mut(mutator.config.allocator_mapping[AllocationSemantics::NonMoving]) + } + .downcast_mut::>() + .unwrap() + .prepare(); +} + /// A place-holder implementation for `MutatorConfig::release_func` that should not be called. /// Currently only used by `NoGC`. pub(crate) fn unreachable_release_func( @@ -33,6 +46,19 @@ pub(crate) fn unreachable_release_func( unreachable!("`MutatorConfig::release_func` must not be called for the current plan.") } +/// An mutator release implementation for plans that use [`crate::plan::global::CommonPlan`]. +pub(crate) fn common_release_func(mutator: &mut Mutator, _tls: VMWorkerThread) { + // Release the free list allocator used for non moving + unsafe { + mutator + .allocators + .get_allocator_mut(mutator.config.allocator_mapping[AllocationSemantics::NonMoving]) + } + .downcast_mut::>() + .unwrap() + .release(); +} + /// A place-holder implementation for `MutatorConfig::release_func` that does nothing. pub(crate) fn no_op_release_func(_mutator: &mut Mutator, _tls: VMWorkerThread) {} @@ -423,10 +449,8 @@ pub(crate) fn create_allocator_mapping( map[AllocationSemantics::Los] = AllocatorSelector::LargeObject(reserved.n_large_object); reserved.n_large_object += 1; - // TODO: This should be freelist allocator once we use marksweep for nonmoving space. - map[AllocationSemantics::NonMoving] = - AllocatorSelector::BumpPointer(reserved.n_bump_pointer); - reserved.n_bump_pointer += 1; + map[AllocationSemantics::NonMoving] = AllocatorSelector::FreeList(reserved.n_free_list); + reserved.n_free_list += 1; } reserved.validate(); @@ -488,12 +512,11 @@ pub(crate) fn create_space_mapping( plan.common().get_los(), )); reserved.n_large_object += 1; - // TODO: This should be freelist allocator once we use marksweep for nonmoving space. vec.push(( - AllocatorSelector::BumpPointer(reserved.n_bump_pointer), + AllocatorSelector::FreeList(reserved.n_free_list), plan.common().get_nonmoving(), )); - reserved.n_bump_pointer += 1; + reserved.n_free_list += 1; } reserved.validate(); diff --git a/src/plan/nogc/global.rs b/src/plan/nogc/global.rs index b2d02f58d3..a0d40525f9 100644 --- a/src/plan/nogc/global.rs +++ b/src/plan/nogc/global.rs @@ -66,6 +66,10 @@ impl Plan for NoGC { unreachable!() } + fn end_of_gc(&mut self, _tls: VMWorkerThread) { + unreachable!() + } + fn get_allocator_mapping(&self) -> &'static EnumMap { &ALLOCATOR_MAPPING } diff --git a/src/plan/pageprotect/global.rs b/src/plan/pageprotect/global.rs index 9e9bcb1fdf..f534e5930e 100644 --- a/src/plan/pageprotect/global.rs +++ b/src/plan/pageprotect/global.rs @@ -31,7 +31,6 @@ pub struct PageProtect { /// The plan constraints for the page protect plan. pub const CONSTRAINTS: PlanConstraints = PlanConstraints { moves_objects: false, - needs_prepare_mutator: false, ..PlanConstraints::default() }; @@ -58,6 +57,10 @@ impl Plan for PageProtect { self.space.release(true); } + fn end_of_gc(&mut self, tls: VMWorkerThread) { + self.common.end_of_gc(tls); + } + fn collection_required(&self, space_full: bool, _space: Option>) -> bool { self.base().collection_required(self, space_full) } diff --git a/src/plan/plan_constraints.rs b/src/plan/plan_constraints.rs index 85087d0dfc..d03363308d 100644 --- a/src/plan/plan_constraints.rs +++ b/src/plan/plan_constraints.rs @@ -55,11 +55,15 @@ impl PlanConstraints { needs_linear_scan: crate::util::constants::SUPPORT_CARD_SCANNING || crate::util::constants::LAZY_SWEEP, needs_concurrent_workers: false, - may_trace_duplicate_edges: false, + // The nonmoving space, marksweep, may trace duplicate edges. However, with this default to true, + // essentially, we are not checking any duplicated edges. + // FIXME: Should we remove this field and no longer check for duplicate edges? Or we could ask + // the binding if they would use nonmoving or not. + may_trace_duplicate_edges: true, needs_forward_after_liveness: false, needs_log_bit: false, barrier: BarrierSelector::NoBarrier, - needs_prepare_mutator: true, + needs_prepare_mutator: !cfg!(feature = "eager_sweeping"), } } } diff --git a/src/plan/semispace/global.rs b/src/plan/semispace/global.rs index a55cbc0976..ffb45cd828 100644 --- a/src/plan/semispace/global.rs +++ b/src/plan/semispace/global.rs @@ -40,7 +40,6 @@ pub const SS_CONSTRAINTS: PlanConstraints = PlanConstraints { moves_objects: true, max_non_los_default_alloc_bytes: crate::plan::plan_constraints::MAX_NON_LOS_ALLOC_BYTES_COPYING_PLAN, - needs_prepare_mutator: false, ..PlanConstraints::default() }; @@ -96,6 +95,10 @@ impl Plan for SemiSpace { self.fromspace().release(); } + fn end_of_gc(&mut self, tls: VMWorkerThread) { + self.common.end_of_gc(tls); + } + fn collection_required(&self, space_full: bool, _space: Option>) -> bool { self.base().collection_required(self, space_full) } diff --git a/src/plan/semispace/mutator.rs b/src/plan/semispace/mutator.rs index 2fe6474518..8a610057ae 100644 --- a/src/plan/semispace/mutator.rs +++ b/src/plan/semispace/mutator.rs @@ -1,8 +1,8 @@ use super::SemiSpace; use crate::plan::barriers::NoBarrier; -use crate::plan::mutator_context::unreachable_prepare_func; use crate::plan::mutator_context::Mutator; use crate::plan::mutator_context::MutatorConfig; +use crate::plan::mutator_context::{common_prepare_func, common_release_func}; use crate::plan::mutator_context::{ create_allocator_mapping, create_space_mapping, ReservedAllocators, }; @@ -14,7 +14,7 @@ use crate::vm::VMBinding; use crate::MMTK; use enum_map::EnumMap; -pub fn ss_mutator_release(mutator: &mut Mutator, _tls: VMWorkerThread) { +pub fn ss_mutator_release(mutator: &mut Mutator, tls: VMWorkerThread) { // rebind the allocation bump pointer to the appropriate semispace let bump_allocator = unsafe { mutator @@ -30,6 +30,8 @@ pub fn ss_mutator_release(mutator: &mut Mutator, _tls: VMWork .unwrap() .tospace(), ); + + common_release_func(mutator, tls); } const RESERVED_ALLOCATORS: ReservedAllocators = ReservedAllocators { @@ -57,7 +59,7 @@ pub fn create_ss_mutator( vec.push((AllocatorSelector::BumpPointer(0), ss.tospace())); vec }), - prepare_func: &unreachable_prepare_func, + prepare_func: &common_prepare_func, release_func: &ss_mutator_release, }; diff --git a/src/plan/sticky/immix/global.rs b/src/plan/sticky/immix/global.rs index 22e68d2f20..49f20b1428 100644 --- a/src/plan/sticky/immix/global.rs +++ b/src/plan/sticky/immix/global.rs @@ -135,11 +135,12 @@ impl Plan for StickyImmix { } } - fn end_of_gc(&mut self, _tls: crate::util::opaque_pointer::VMWorkerThread) { + fn end_of_gc(&mut self, tls: crate::util::opaque_pointer::VMWorkerThread) { let next_gc_full_heap = crate::plan::generational::global::CommonGenPlan::should_next_gc_be_full_heap(self); self.next_gc_full_heap .store(next_gc_full_heap, Ordering::Relaxed); + self.immix.common.end_of_gc(tls); } fn collection_required(&self, space_full: bool, space: Option>) -> bool { diff --git a/src/plan/sticky/immix/mutator.rs b/src/plan/sticky/immix/mutator.rs index 4957872d6a..d1e15480bb 100644 --- a/src/plan/sticky/immix/mutator.rs +++ b/src/plan/sticky/immix/mutator.rs @@ -1,7 +1,7 @@ use crate::plan::barriers::ObjectBarrier; use crate::plan::generational::barrier::GenObjectBarrierSemantics; use crate::plan::immix; -use crate::plan::mutator_context::{create_space_mapping, unreachable_prepare_func, MutatorConfig}; +use crate::plan::mutator_context::{common_prepare_func, create_space_mapping, MutatorConfig}; use crate::plan::sticky::immix::global::StickyImmix; use crate::util::alloc::allocators::Allocators; use crate::util::alloc::AllocatorSelector; @@ -29,7 +29,7 @@ pub fn create_stickyimmix_mutator( vec.push((AllocatorSelector::Immix(0), stickyimmix.get_immix_space())); vec }), - prepare_func: &unreachable_prepare_func, + prepare_func: &common_prepare_func, release_func: &stickyimmix_mutator_release, }; diff --git a/src/policy/marksweepspace/native_ms/block_list.rs b/src/policy/marksweepspace/native_ms/block_list.rs index 944a45ba54..5bc2c813de 100644 --- a/src/policy/marksweepspace/native_ms/block_list.rs +++ b/src/policy/marksweepspace/native_ms/block_list.rs @@ -37,6 +37,7 @@ impl BlockList { /// Remove a block from the list pub fn remove(&mut self, block: Block) { + trace!("Blocklist {:?}: Remove {:?}", self as *const _, block); match (block.load_prev_block(), block.load_next_block()) { (None, None) => { self.first = None; @@ -45,12 +46,14 @@ impl BlockList { (None, Some(next)) => { next.clear_prev_block(); self.first = Some(next); - next.store_block_list(self); + // next.store_block_list(self); + debug_assert_eq!(next.load_block_list(), self as *mut _); } (Some(prev), None) => { prev.clear_next_block(); self.last = Some(prev); - prev.store_block_list(self); + // prev.store_block_list(self); + debug_assert_eq!(prev.load_block_list(), self as *mut _); } (Some(prev), Some(next)) => { prev.store_next_block(next); @@ -80,6 +83,7 @@ impl BlockList { /// Push block to the front of the list pub fn push(&mut self, block: Block) { + trace!("Blocklist {:?}: Push {:?}", self as *const _, block); if self.is_empty() { block.clear_next_block(); block.clear_prev_block(); @@ -132,6 +136,7 @@ impl BlockList { /// Remove all blocks fn reset(&mut self) { + trace!("Blocklist {:?}: Reset", self as *const _); self.first = None; self.last = None; } @@ -152,10 +157,12 @@ impl BlockList { .compare_exchange(false, true, Ordering::SeqCst, Ordering::SeqCst) .is_ok(); } + trace!("Blocklist {:?}: locked", self as *const _); } /// Unlock list. See the comments on the lock method. pub fn unlock(&mut self) { + trace!("Blocklist {:?}: unlock", self as *const _); self.lock.store(false, Ordering::SeqCst); } diff --git a/src/policy/marksweepspace/native_ms/global.rs b/src/policy/marksweepspace/native_ms/global.rs index 5db093574e..4c39ffa5b8 100644 --- a/src/policy/marksweepspace/native_ms/global.rs +++ b/src/policy/marksweepspace/native_ms/global.rs @@ -286,7 +286,7 @@ impl MarkSweepSpace { } } - fn trace_object( + pub fn trace_object( &self, queue: &mut Q, object: ObjectReference,