From d15768a51b17ed0924d56772a7de4e0f5994f0c1 Mon Sep 17 00:00:00 2001 From: Yi Lin Date: Tue, 26 Mar 2024 04:21:59 +0000 Subject: [PATCH 01/14] Use mark sweep space as the non moving space --- src/plan/global.rs | 8 ++++---- src/plan/mutator_context.rs | 11 ++++------- src/policy/marksweepspace/native_ms/global.rs | 2 +- 3 files changed, 9 insertions(+), 12 deletions(-) diff --git a/src/plan/global.rs b/src/plan/global.rs index 5b28c4b720..0697a35919 100644 --- a/src/plan/global.rs +++ b/src/plan/global.rs @@ -7,6 +7,7 @@ use crate::plan::tracing::ObjectQueue; use crate::plan::Mutator; use crate::policy::immortalspace::ImmortalSpace; use crate::policy::largeobjectspace::LargeObjectSpace; +use crate::policy::marksweepspace::native_ms::MarkSweepSpace; use crate::policy::space::{PlanCreateSpaceArgs, Space}; #[cfg(feature = "vm_space")] use crate::policy::vmspace::VMSpace; @@ -561,9 +562,8 @@ pub struct CommonPlan { pub immortal: ImmortalSpace, #[space] pub los: LargeObjectSpace, - // TODO: We should use a marksweep space for nonmoving. #[space] - pub nonmoving: ImmortalSpace, + pub nonmoving: MarkSweepSpace, #[parent] pub base: BasePlan, } @@ -580,7 +580,7 @@ impl CommonPlan { args.get_space_args("los", true, VMRequest::discontiguous()), false, ), - nonmoving: ImmortalSpace::new(args.get_space_args( + nonmoving: MarkSweepSpace::new(args.get_space_args( "nonmoving", true, VMRequest::discontiguous(), @@ -639,7 +639,7 @@ impl CommonPlan { &self.los } - pub fn get_nonmoving(&self) -> &ImmortalSpace { + pub fn get_nonmoving(&self) -> &MarkSweepSpace { &self.nonmoving } } diff --git a/src/plan/mutator_context.rs b/src/plan/mutator_context.rs index 5b2ce60703..759bf4e68d 100644 --- a/src/plan/mutator_context.rs +++ b/src/plan/mutator_context.rs @@ -423,10 +423,8 @@ pub(crate) fn create_allocator_mapping( map[AllocationSemantics::Los] = AllocatorSelector::LargeObject(reserved.n_large_object); reserved.n_large_object += 1; - // TODO: This should be freelist allocator once we use marksweep for nonmoving space. - map[AllocationSemantics::NonMoving] = - AllocatorSelector::BumpPointer(reserved.n_bump_pointer); - reserved.n_bump_pointer += 1; + map[AllocationSemantics::NonMoving] = AllocatorSelector::FreeList(reserved.n_free_list); + reserved.n_free_list += 1; } reserved.validate(); @@ -488,12 +486,11 @@ pub(crate) fn create_space_mapping( plan.common().get_los(), )); reserved.n_large_object += 1; - // TODO: This should be freelist allocator once we use marksweep for nonmoving space. vec.push(( - AllocatorSelector::BumpPointer(reserved.n_bump_pointer), + AllocatorSelector::FreeList(reserved.n_free_list), plan.common().get_nonmoving(), )); - reserved.n_bump_pointer += 1; + reserved.n_free_list += 1; } reserved.validate(); diff --git a/src/policy/marksweepspace/native_ms/global.rs b/src/policy/marksweepspace/native_ms/global.rs index a533fcde33..a5c2f809eb 100644 --- a/src/policy/marksweepspace/native_ms/global.rs +++ b/src/policy/marksweepspace/native_ms/global.rs @@ -236,7 +236,7 @@ impl MarkSweepSpace { } } - fn trace_object( + pub fn trace_object( &self, queue: &mut Q, object: ObjectReference, From 68afeccd4b2874f001084343c21a4a1355880c4c Mon Sep 17 00:00:00 2001 From: Yi Lin Date: Wed, 27 Mar 2024 01:13:27 +0000 Subject: [PATCH 02/14] Run mutator prepare/release for plans that use CommonPlan --- .../tutorial/code/mygc_semispace/mutator.rs | 10 ++++--- src/plan/generational/copying/mutator.rs | 8 ++++-- src/plan/generational/immix/mutator.rs | 8 ++++-- src/plan/generational/mod.rs | 1 - src/plan/immix/global.rs | 1 - src/plan/immix/mutator.rs | 8 ++++-- src/plan/markcompact/global.rs | 1 - src/plan/markcompact/mutator.rs | 15 +++++----- src/plan/marksweep/mutator.rs | 17 ++++++++--- src/plan/mutator_context.rs | 28 ++++++++++++++++++- src/plan/pageprotect/global.rs | 1 - src/plan/plan_constraints.rs | 2 +- src/plan/semispace/global.rs | 1 - src/plan/semispace/mutator.rs | 8 ++++-- src/plan/sticky/immix/mutator.rs | 4 +-- 15 files changed, 76 insertions(+), 37 deletions(-) diff --git a/docs/userguide/src/tutorial/code/mygc_semispace/mutator.rs b/docs/userguide/src/tutorial/code/mygc_semispace/mutator.rs index 9cd110392f..5cd416069e 100644 --- a/docs/userguide/src/tutorial/code/mygc_semispace/mutator.rs +++ b/docs/userguide/src/tutorial/code/mygc_semispace/mutator.rs @@ -19,17 +19,17 @@ use enum_map::EnumMap; // Add pub fn mygc_mutator_prepare( - _mutator: &mut Mutator, - _tls: VMWorkerThread, + mutator: &mut Mutator, + tls: VMWorkerThread, ) { - // Do nothing + crate::plan::mutator_context::common_prepare_func::(mutator, tls); } // Add // ANCHOR: release pub fn mygc_mutator_release( mutator: &mut Mutator, - _tls: VMWorkerThread, + tls: VMWorkerThread, ) { // rebind the allocation bump pointer to the appropriate semispace let bump_allocator = unsafe { @@ -46,6 +46,8 @@ pub fn mygc_mutator_release( .unwrap() .tospace(), ); + + crate::plan::mutator_context::common_release_func::(mutator, tls); } // ANCHOR_END: release diff --git a/src/plan/generational/copying/mutator.rs b/src/plan/generational/copying/mutator.rs index 668ccb2cc9..31e1d341f1 100644 --- a/src/plan/generational/copying/mutator.rs +++ b/src/plan/generational/copying/mutator.rs @@ -3,9 +3,9 @@ use super::GenCopy; use crate::plan::barriers::ObjectBarrier; use crate::plan::generational::barrier::GenObjectBarrierSemantics; use crate::plan::generational::create_gen_space_mapping; -use crate::plan::mutator_context::unreachable_prepare_func; use crate::plan::mutator_context::Mutator; use crate::plan::mutator_context::MutatorConfig; +use crate::plan::mutator_context::{common_prepare_func, common_release_func}; use crate::plan::AllocationSemantics; use crate::util::alloc::allocators::Allocators; use crate::util::alloc::BumpAllocator; @@ -13,7 +13,7 @@ use crate::util::{VMMutatorThread, VMWorkerThread}; use crate::vm::VMBinding; use crate::MMTK; -pub fn gencopy_mutator_release(mutator: &mut Mutator, _tls: VMWorkerThread) { +pub fn gencopy_mutator_release(mutator: &mut Mutator, tls: VMWorkerThread) { // reset nursery allocator let bump_allocator = unsafe { mutator @@ -23,6 +23,8 @@ pub fn gencopy_mutator_release(mutator: &mut Mutator, _tls: V .downcast_mut::>() .unwrap(); bump_allocator.reset(); + + common_release_func(mutator, tls); } pub fn create_gencopy_mutator( @@ -36,7 +38,7 @@ pub fn create_gencopy_mutator( mmtk.get_plan(), &gencopy.gen.nursery, )), - prepare_func: &unreachable_prepare_func, + prepare_func: &common_prepare_func, release_func: &gencopy_mutator_release, }; diff --git a/src/plan/generational/immix/mutator.rs b/src/plan/generational/immix/mutator.rs index fdda23cd8b..3e0b062636 100644 --- a/src/plan/generational/immix/mutator.rs +++ b/src/plan/generational/immix/mutator.rs @@ -3,9 +3,9 @@ use crate::plan::barriers::ObjectBarrier; use crate::plan::generational::barrier::GenObjectBarrierSemantics; use crate::plan::generational::create_gen_space_mapping; use crate::plan::generational::immix::GenImmix; -use crate::plan::mutator_context::unreachable_prepare_func; use crate::plan::mutator_context::Mutator; use crate::plan::mutator_context::MutatorConfig; +use crate::plan::mutator_context::{common_prepare_func, common_release_func}; use crate::plan::AllocationSemantics; use crate::util::alloc::allocators::Allocators; use crate::util::alloc::BumpAllocator; @@ -13,7 +13,7 @@ use crate::util::{VMMutatorThread, VMWorkerThread}; use crate::vm::VMBinding; use crate::MMTK; -pub fn genimmix_mutator_release(mutator: &mut Mutator, _tls: VMWorkerThread) { +pub fn genimmix_mutator_release(mutator: &mut Mutator, tls: VMWorkerThread) { // reset nursery allocator let bump_allocator = unsafe { mutator @@ -23,6 +23,8 @@ pub fn genimmix_mutator_release(mutator: &mut Mutator, _tls: .downcast_mut::>() .unwrap(); bump_allocator.reset(); + + common_release_func(mutator, tls); } pub fn create_genimmix_mutator( @@ -36,7 +38,7 @@ pub fn create_genimmix_mutator( mmtk.get_plan(), &genimmix.gen.nursery, )), - prepare_func: &unreachable_prepare_func, + prepare_func: &common_prepare_func, release_func: &genimmix_mutator_release, }; diff --git a/src/plan/generational/mod.rs b/src/plan/generational/mod.rs index 95aaa48928..4cd162c10c 100644 --- a/src/plan/generational/mod.rs +++ b/src/plan/generational/mod.rs @@ -52,7 +52,6 @@ pub const GEN_CONSTRAINTS: PlanConstraints = PlanConstraints { crate::plan::plan_constraints::MAX_NON_LOS_ALLOC_BYTES_COPYING_PLAN, crate::util::options::NURSERY_SIZE, ), - needs_prepare_mutator: false, ..PlanConstraints::default() }; diff --git a/src/plan/immix/global.rs b/src/plan/immix/global.rs index 0741ba3a99..4adfbc6f45 100644 --- a/src/plan/immix/global.rs +++ b/src/plan/immix/global.rs @@ -41,7 +41,6 @@ pub const IMMIX_CONSTRAINTS: PlanConstraints = PlanConstraints { moves_objects: crate::policy::immix::DEFRAG, // Max immix object size is half of a block. max_non_los_default_alloc_bytes: crate::policy::immix::MAX_IMMIX_OBJECT_SIZE, - needs_prepare_mutator: false, ..PlanConstraints::default() }; diff --git a/src/plan/immix/mutator.rs b/src/plan/immix/mutator.rs index 0df443c9cd..9e38447698 100644 --- a/src/plan/immix/mutator.rs +++ b/src/plan/immix/mutator.rs @@ -1,10 +1,10 @@ use super::Immix; use crate::plan::mutator_context::create_allocator_mapping; use crate::plan::mutator_context::create_space_mapping; -use crate::plan::mutator_context::unreachable_prepare_func; use crate::plan::mutator_context::Mutator; use crate::plan::mutator_context::MutatorConfig; use crate::plan::mutator_context::ReservedAllocators; +use crate::plan::mutator_context::{common_prepare_func, common_release_func}; use crate::plan::AllocationSemantics; use crate::util::alloc::allocators::{AllocatorSelector, Allocators}; use crate::util::alloc::ImmixAllocator; @@ -16,7 +16,7 @@ use crate::{ }; use enum_map::EnumMap; -pub fn immix_mutator_release(mutator: &mut Mutator, _tls: VMWorkerThread) { +pub fn immix_mutator_release(mutator: &mut Mutator, tls: VMWorkerThread) { let immix_allocator = unsafe { mutator .allocators @@ -25,6 +25,8 @@ pub fn immix_mutator_release(mutator: &mut Mutator, _tls: VMW .downcast_mut::>() .unwrap(); immix_allocator.reset(); + + common_release_func(mutator, tls) } pub(in crate::plan) const RESERVED_ALLOCATORS: ReservedAllocators = ReservedAllocators { @@ -52,7 +54,7 @@ pub fn create_immix_mutator( vec.push((AllocatorSelector::Immix(0), &immix.immix_space)); vec }), - prepare_func: &unreachable_prepare_func, + prepare_func: &common_prepare_func, release_func: &immix_mutator_release, }; diff --git a/src/plan/markcompact/global.rs b/src/plan/markcompact/global.rs index e9a2ec037d..4c387a1a06 100644 --- a/src/plan/markcompact/global.rs +++ b/src/plan/markcompact/global.rs @@ -42,7 +42,6 @@ pub const MARKCOMPACT_CONSTRAINTS: PlanConstraints = PlanConstraints { needs_forward_after_liveness: true, max_non_los_default_alloc_bytes: crate::plan::plan_constraints::MAX_NON_LOS_ALLOC_BYTES_COPYING_PLAN, - needs_prepare_mutator: false, ..PlanConstraints::default() }; diff --git a/src/plan/markcompact/mutator.rs b/src/plan/markcompact/mutator.rs index fc42d7ea7e..f62a5cc710 100644 --- a/src/plan/markcompact/mutator.rs +++ b/src/plan/markcompact/mutator.rs @@ -2,10 +2,10 @@ use super::MarkCompact; use crate::plan::barriers::NoBarrier; use crate::plan::mutator_context::create_allocator_mapping; use crate::plan::mutator_context::create_space_mapping; -use crate::plan::mutator_context::unreachable_prepare_func; use crate::plan::mutator_context::Mutator; use crate::plan::mutator_context::MutatorConfig; use crate::plan::mutator_context::ReservedAllocators; +use crate::plan::mutator_context::{common_prepare_func, common_release_func}; use crate::plan::AllocationSemantics; use crate::util::alloc::allocators::{AllocatorSelector, Allocators}; use crate::util::alloc::MarkCompactAllocator; @@ -39,7 +39,7 @@ pub fn create_markcompact_mutator( vec.push((AllocatorSelector::MarkCompact(0), markcompact.mc_space())); vec }), - prepare_func: &unreachable_prepare_func, + prepare_func: &common_prepare_func, release_func: &markcompact_mutator_release, }; @@ -52,17 +52,16 @@ pub fn create_markcompact_mutator( } } -pub fn markcompact_mutator_release( - _mutator: &mut Mutator, - _tls: VMWorkerThread, -) { +pub fn markcompact_mutator_release(mutator: &mut Mutator, tls: VMWorkerThread) { // reset the thread-local allocation bump pointer let markcompact_allocator = unsafe { - _mutator + mutator .allocators - .get_allocator_mut(_mutator.config.allocator_mapping[AllocationSemantics::Default]) + .get_allocator_mut(mutator.config.allocator_mapping[AllocationSemantics::Default]) } .downcast_mut::>() .unwrap(); markcompact_allocator.reset(); + + common_release_func(mutator, tls); } diff --git a/src/plan/marksweep/mutator.rs b/src/plan/marksweep/mutator.rs index d93431b0fa..4f31b1c4c9 100644 --- a/src/plan/marksweep/mutator.rs +++ b/src/plan/marksweep/mutator.rs @@ -5,6 +5,7 @@ use crate::plan::mutator_context::Mutator; use crate::plan::mutator_context::MutatorConfig; use crate::plan::mutator_context::ReservedAllocators; use crate::plan::mutator_context::SpaceMapping; +use crate::plan::mutator_context::{common_prepare_func, common_release_func}; use crate::plan::AllocationSemantics; use crate::plan::Plan; use crate::util::alloc::allocators::{AllocatorSelector, Allocators}; @@ -20,8 +21,12 @@ mod malloc_mark_sweep { // Do nothing for malloc mark sweep (malloc allocator) - pub fn ms_mutator_prepare(_mutator: &mut Mutator, _tls: VMWorkerThread) {} - pub fn ms_mutator_release(_mutator: &mut Mutator, _tls: VMWorkerThread) {} + pub fn ms_mutator_prepare(mutator: &mut Mutator, tls: VMWorkerThread) { + common_prepare_func(mutator, tls); + } + pub fn ms_mutator_release(mutator: &mut Mutator, tls: VMWorkerThread) { + common_release_func(mutator, tls); + } // malloc mark sweep uses 1 malloc allocator @@ -69,13 +74,17 @@ mod native_mark_sweep { // We forward calls to the allocator prepare and release #[cfg(not(feature = "malloc_mark_sweep"))] - pub fn ms_mutator_prepare(mutator: &mut Mutator, _tls: VMWorkerThread) { + pub fn ms_mutator_prepare(mutator: &mut Mutator, tls: VMWorkerThread) { get_freelist_allocator_mut::(mutator).prepare(); + + common_prepare_func(mutator, tls); } #[cfg(not(feature = "malloc_mark_sweep"))] - pub fn ms_mutator_release(mutator: &mut Mutator, _tls: VMWorkerThread) { + pub fn ms_mutator_release(mutator: &mut Mutator, tls: VMWorkerThread) { get_freelist_allocator_mut::(mutator).release(); + + common_release_func(mutator, tls); } // native mark sweep uses 1 free list allocator diff --git a/src/plan/mutator_context.rs b/src/plan/mutator_context.rs index 759bf4e68d..95de8ea30b 100644 --- a/src/plan/mutator_context.rs +++ b/src/plan/mutator_context.rs @@ -5,7 +5,7 @@ use crate::plan::global::Plan; use crate::plan::AllocationSemantics; use crate::policy::space::Space; use crate::util::alloc::allocators::{AllocatorSelector, Allocators}; -use crate::util::alloc::Allocator; +use crate::util::alloc::{Allocator, FreeListAllocator}; use crate::util::{Address, ObjectReference}; use crate::util::{VMMutatorThread, VMWorkerThread}; use crate::vm::VMBinding; @@ -24,6 +24,19 @@ pub(crate) fn unreachable_prepare_func( unreachable!("`MutatorConfig::prepare_func` must not be called for the current plan.") } +/// An mutator prepare implementation for plans that use [`crate::plan::global::CommonPlan`]. +pub(crate) fn common_prepare_func(mutator: &mut Mutator, _tls: VMWorkerThread) { + // Prepare the free list allocator used for non moving + unsafe { + mutator + .allocators + .get_allocator_mut(mutator.config.allocator_mapping[AllocationSemantics::NonMoving]) + } + .downcast_mut::>() + .unwrap() + .prepare(); +} + /// A place-holder implementation for `MutatorConfig::release_func` that should not be called. /// Currently only used by `NoGC`. pub(crate) fn unreachable_release_func( @@ -33,6 +46,19 @@ pub(crate) fn unreachable_release_func( unreachable!("`MutatorConfig::release_func` must not be called for the current plan.") } +/// An mutator release implementation for plans that use [`crate::plan::global::CommonPlan`]. +pub(crate) fn common_release_func(mutator: &mut Mutator, _tls: VMWorkerThread) { + // Release the free list allocator used for non moving + unsafe { + mutator + .allocators + .get_allocator_mut(mutator.config.allocator_mapping[AllocationSemantics::NonMoving]) + } + .downcast_mut::>() + .unwrap() + .release(); +} + /// A place-holder implementation for `MutatorConfig::release_func` that does nothing. pub(crate) fn no_op_release_func(_mutator: &mut Mutator, _tls: VMWorkerThread) {} diff --git a/src/plan/pageprotect/global.rs b/src/plan/pageprotect/global.rs index 9e9bcb1fdf..65a002be14 100644 --- a/src/plan/pageprotect/global.rs +++ b/src/plan/pageprotect/global.rs @@ -31,7 +31,6 @@ pub struct PageProtect { /// The plan constraints for the page protect plan. pub const CONSTRAINTS: PlanConstraints = PlanConstraints { moves_objects: false, - needs_prepare_mutator: false, ..PlanConstraints::default() }; diff --git a/src/plan/plan_constraints.rs b/src/plan/plan_constraints.rs index 85087d0dfc..b273dff43d 100644 --- a/src/plan/plan_constraints.rs +++ b/src/plan/plan_constraints.rs @@ -59,7 +59,7 @@ impl PlanConstraints { needs_forward_after_liveness: false, needs_log_bit: false, barrier: BarrierSelector::NoBarrier, - needs_prepare_mutator: true, + needs_prepare_mutator: !cfg!(feature = "eager_sweeping"), } } } diff --git a/src/plan/semispace/global.rs b/src/plan/semispace/global.rs index a55cbc0976..76778846d6 100644 --- a/src/plan/semispace/global.rs +++ b/src/plan/semispace/global.rs @@ -40,7 +40,6 @@ pub const SS_CONSTRAINTS: PlanConstraints = PlanConstraints { moves_objects: true, max_non_los_default_alloc_bytes: crate::plan::plan_constraints::MAX_NON_LOS_ALLOC_BYTES_COPYING_PLAN, - needs_prepare_mutator: false, ..PlanConstraints::default() }; diff --git a/src/plan/semispace/mutator.rs b/src/plan/semispace/mutator.rs index 2fe6474518..8a610057ae 100644 --- a/src/plan/semispace/mutator.rs +++ b/src/plan/semispace/mutator.rs @@ -1,8 +1,8 @@ use super::SemiSpace; use crate::plan::barriers::NoBarrier; -use crate::plan::mutator_context::unreachable_prepare_func; use crate::plan::mutator_context::Mutator; use crate::plan::mutator_context::MutatorConfig; +use crate::plan::mutator_context::{common_prepare_func, common_release_func}; use crate::plan::mutator_context::{ create_allocator_mapping, create_space_mapping, ReservedAllocators, }; @@ -14,7 +14,7 @@ use crate::vm::VMBinding; use crate::MMTK; use enum_map::EnumMap; -pub fn ss_mutator_release(mutator: &mut Mutator, _tls: VMWorkerThread) { +pub fn ss_mutator_release(mutator: &mut Mutator, tls: VMWorkerThread) { // rebind the allocation bump pointer to the appropriate semispace let bump_allocator = unsafe { mutator @@ -30,6 +30,8 @@ pub fn ss_mutator_release(mutator: &mut Mutator, _tls: VMWork .unwrap() .tospace(), ); + + common_release_func(mutator, tls); } const RESERVED_ALLOCATORS: ReservedAllocators = ReservedAllocators { @@ -57,7 +59,7 @@ pub fn create_ss_mutator( vec.push((AllocatorSelector::BumpPointer(0), ss.tospace())); vec }), - prepare_func: &unreachable_prepare_func, + prepare_func: &common_prepare_func, release_func: &ss_mutator_release, }; diff --git a/src/plan/sticky/immix/mutator.rs b/src/plan/sticky/immix/mutator.rs index 4957872d6a..d1e15480bb 100644 --- a/src/plan/sticky/immix/mutator.rs +++ b/src/plan/sticky/immix/mutator.rs @@ -1,7 +1,7 @@ use crate::plan::barriers::ObjectBarrier; use crate::plan::generational::barrier::GenObjectBarrierSemantics; use crate::plan::immix; -use crate::plan::mutator_context::{create_space_mapping, unreachable_prepare_func, MutatorConfig}; +use crate::plan::mutator_context::{common_prepare_func, create_space_mapping, MutatorConfig}; use crate::plan::sticky::immix::global::StickyImmix; use crate::util::alloc::allocators::Allocators; use crate::util::alloc::AllocatorSelector; @@ -29,7 +29,7 @@ pub fn create_stickyimmix_mutator( vec.push((AllocatorSelector::Immix(0), stickyimmix.get_immix_space())); vec }), - prepare_func: &unreachable_prepare_func, + prepare_func: &common_prepare_func, release_func: &stickyimmix_mutator_release, }; From 4d19a49e1a4bb8a83592d7e39c9d54d2f8017099 Mon Sep 17 00:00:00 2001 From: Yi Lin Date: Wed, 3 Apr 2024 23:24:45 +0000 Subject: [PATCH 03/14] Default to allow tracing duplicate edges --- src/plan/plan_constraints.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/plan/plan_constraints.rs b/src/plan/plan_constraints.rs index b273dff43d..d03363308d 100644 --- a/src/plan/plan_constraints.rs +++ b/src/plan/plan_constraints.rs @@ -55,7 +55,11 @@ impl PlanConstraints { needs_linear_scan: crate::util::constants::SUPPORT_CARD_SCANNING || crate::util::constants::LAZY_SWEEP, needs_concurrent_workers: false, - may_trace_duplicate_edges: false, + // The nonmoving space, marksweep, may trace duplicate edges. However, with this default to true, + // essentially, we are not checking any duplicated edges. + // FIXME: Should we remove this field and no longer check for duplicate edges? Or we could ask + // the binding if they would use nonmoving or not. + may_trace_duplicate_edges: true, needs_forward_after_liveness: false, needs_log_bit: false, barrier: BarrierSelector::NoBarrier, From 1e057526788599ff5627ee9efffc86326c596a7d Mon Sep 17 00:00:00 2001 From: Yi Lin Date: Mon, 8 Apr 2024 23:55:39 +0000 Subject: [PATCH 04/14] Assert marksweep block list --- Cargo.toml | 3 + src/policy/marksweepspace/native_ms/block.rs | 12 +- .../marksweepspace/native_ms/block_list.rs | 132 +++++++++++++++++- src/util/alloc/free_list_allocator.rs | 8 +- 4 files changed, 138 insertions(+), 17 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index e1f108df3f..e5f90ca368 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -125,6 +125,9 @@ immix_smaller_block = [] # Zero the unmarked lines after a GC cycle in immix. This helps debug untraced objects. immix_zero_on_release = [] +# Run sanity checks for BlockList in mark sweep. +ms_block_list_sanity = [] + # Run sanity GC sanity = [] # Run analysis diff --git a/src/policy/marksweepspace/native_ms/block.rs b/src/policy/marksweepspace/native_ms/block.rs index 625a82d851..26ee399e43 100644 --- a/src/policy/marksweepspace/native_ms/block.rs +++ b/src/policy/marksweepspace/native_ms/block.rs @@ -136,35 +136,35 @@ impl Block { .is_ok() } - pub fn load_prev_block(&self) -> Option { + pub(in crate::policy::marksweepspace::native_ms) fn load_prev_block(&self) -> Option { let prev = unsafe { Block::PREV_BLOCK_TABLE.load::(self.start()) }; NonZeroUsize::new(prev).map(Block) } - pub fn load_next_block(&self) -> Option { + pub(in crate::policy::marksweepspace::native_ms) fn load_next_block(&self) -> Option { let next = unsafe { Block::NEXT_BLOCK_TABLE.load::(self.start()) }; NonZeroUsize::new(next).map(Block) } - pub fn store_next_block(&self, next: Block) { + pub(in crate::policy::marksweepspace::native_ms) fn store_next_block(&self, next: Block) { unsafe { Block::NEXT_BLOCK_TABLE.store::(self.start(), next.start().as_usize()); } } - pub fn clear_next_block(&self) { + pub(in crate::policy::marksweepspace::native_ms) fn clear_next_block(&self) { unsafe { Block::NEXT_BLOCK_TABLE.store::(self.start(), 0); } } - pub fn store_prev_block(&self, prev: Block) { + pub(in crate::policy::marksweepspace::native_ms) fn store_prev_block(&self, prev: Block) { unsafe { Block::PREV_BLOCK_TABLE.store::(self.start(), prev.start().as_usize()); } } - pub fn clear_prev_block(&self) { + pub(in crate::policy::marksweepspace::native_ms) fn clear_prev_block(&self) { unsafe { Block::PREV_BLOCK_TABLE.store::(self.start(), 0); } diff --git a/src/policy/marksweepspace/native_ms/block_list.rs b/src/policy/marksweepspace/native_ms/block_list.rs index 0aa6e03c7f..9eb7d26088 100644 --- a/src/policy/marksweepspace/native_ms/block_list.rs +++ b/src/policy/marksweepspace/native_ms/block_list.rs @@ -4,19 +4,23 @@ use crate::util::linear_scan::Region; use crate::vm::VMBinding; use std::sync::atomic::AtomicBool; use std::sync::atomic::Ordering; +#[cfg(feature = "ms_block_list_sanity")] +use std::sync::Mutex; /// List of blocks owned by the allocator #[repr(C)] pub struct BlockList { - pub first: Option, - pub last: Option, - pub size: usize, - pub lock: AtomicBool, + first: Option, + last: Option, + size: usize, + lock: AtomicBool, + #[cfg(feature = "ms_block_list_sanity")] + sanity_list: Mutex>, } impl std::fmt::Debug for BlockList { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(f, "BlockList {:?}", self.iter().collect::>()) + write!(f, "{:?}", self.iter().collect::>()) } } @@ -27,12 +31,52 @@ impl BlockList { last: None, size, lock: AtomicBool::new(false), + #[cfg(feature = "ms_block_list_sanity")] + sanity_list: Mutex::new(vec![]), + } + } + + // fn access_block_list R>(&self, access_func: F) -> R { + // #[cfg(feature = "ms_block_list_sanity")] + // let mut sanity_list = self.sanity_list.lock().unwrap(); + + // let ret = access_func(); + + // // Verify the block list is the same as the sanity list + // #[cfg(feature = "ms_block_list_sanity")] + // { + // if !sanity_list.iter().map(|b| *b).eq(BlockListIterator { cursor: self.first }) { + // eprintln!("Sanity block list: {:?}", &mut sanity_list as &mut Vec); + // eprintln!("Actual block list: {:?}", self); + // panic!("Incorrect block list"); + // } + // } + + // ret + // } + #[cfg(feature = "ms_block_list_sanity")] + fn verify_block_list(&self, sanity_list: &mut Vec) { + if !sanity_list.iter().map(|b| *b).eq(BlockListIterator { cursor: self.first }) { + eprintln!("Sanity block list: {:?}", sanity_list); + eprintln!("First {:?}", sanity_list.get(0)); + eprintln!("Actual block list: {:?}", self); + eprintln!("First {:?}", self.first); + panic!("Incorrect block list"); } } /// List has no blocks pub fn is_empty(&self) -> bool { - self.first.is_none() + let ret = self.first.is_none(); + + #[cfg(feature = "ms_block_list_sanity")] + { + let mut sanity_list = self.sanity_list.lock().unwrap(); + self.verify_block_list(&mut sanity_list); + assert_eq!(sanity_list.is_empty(), ret); + } + + ret } /// Remove a block from the list @@ -57,11 +101,22 @@ impl BlockList { next.store_prev_block(prev); } } + + #[cfg(feature = "ms_block_list_sanity")] + { + let mut sanity_list = self.sanity_list.lock().unwrap(); + if let Some((index, _)) = sanity_list.iter().enumerate().find(|&(_, &val)| val == block) { + sanity_list.remove(index); + } else { + panic!("Cannot find {:?} in the block list", block); + } + self.verify_block_list(&mut sanity_list); + } } /// Pop the first block in the list pub fn pop(&mut self) -> Option { - if let Some(head) = self.first { + let ret = if let Some(head) = self.first { if let Some(next) = head.load_next_block() { self.first = Some(next); next.clear_prev_block(); @@ -75,7 +130,21 @@ impl BlockList { Some(head) } else { None + }; + + #[cfg(feature = "ms_block_list_sanity")] + { + let mut sanity_list = self.sanity_list.lock().unwrap(); + let sanity_ret = if sanity_list.is_empty() { + None + } else { + Some(sanity_list.remove(0)) // pop first + }; + self.verify_block_list(&mut sanity_list); + assert_eq!(sanity_ret, ret); } + + ret } /// Push block to the front of the list @@ -93,10 +162,28 @@ impl BlockList { self.first = Some(block); } block.store_block_list(self); + + #[cfg(feature = "ms_block_list_sanity")] + { + let mut sanity_list = self.sanity_list.lock().unwrap(); + sanity_list.insert(0, block); // push front + self.verify_block_list(&mut sanity_list); + } } /// Moves all the blocks of `other` into `self`, leaving `other` empty. pub fn append(&mut self, other: &mut BlockList) { + #[cfg(feature = "ms_block_list_sanity")] + { + // Check before merging + let mut sanity_list = self.sanity_list.lock().unwrap(); + self.verify_block_list(&mut sanity_list); + let mut sanity_list_other = other.sanity_list.lock().unwrap(); + other.verify_block_list(&mut sanity_list_other); + } + #[cfg(feature = "ms_block_list_sanity")] + let mut sanity_list_in_other = other.sanity_list.lock().unwrap().clone(); + debug_assert_eq!(self.size, other.size); if !other.is_empty() { debug_assert!( @@ -128,12 +215,25 @@ impl BlockList { } other.reset(); } + + #[cfg(feature = "ms_block_list_sanity")] + { + let mut sanity_list = self.sanity_list.lock().unwrap(); + sanity_list.append(&mut sanity_list_in_other); + self.verify_block_list(&mut sanity_list); + } } /// Remove all blocks fn reset(&mut self) { self.first = None; self.last = None; + + #[cfg(feature = "ms_block_list_sanity")] + { + let mut sanity_list = self.sanity_list.lock().unwrap(); + sanity_list.clear(); + } } /// Lock the list. The MiMalloc allocator mostly uses thread-local block lists, and those operations on the list @@ -172,6 +272,24 @@ impl BlockList { } } } + + /// Get the size of this block list. + pub fn size(&self) -> usize { + let ret = self.size; + + #[cfg(feature = "ms_block_list_sanity")] + { + let mut sanity_list = self.sanity_list.lock().unwrap(); + self.verify_block_list(&mut sanity_list); + } + + ret + } + + /// Get the first block in the list. + pub fn first(&self) -> Option { + self.first + } } pub struct BlockListIterator { diff --git a/src/util/alloc/free_list_allocator.rs b/src/util/alloc/free_list_allocator.rs index 50ce36bd27..7f27b7acd7 100644 --- a/src/util/alloc/free_list_allocator.rs +++ b/src/util/alloc/free_list_allocator.rs @@ -218,10 +218,10 @@ impl FreeListAllocator { debug_assert!(bin <= MAX_BIN); let available = &mut available_blocks[bin]; - debug_assert!(available.size >= size); + debug_assert!(available.size() >= size); if !available.is_empty() { - let mut cursor = available.first; + let mut cursor = available.first(); while let Some(block) = cursor { if block.has_free_cells() { @@ -230,7 +230,7 @@ impl FreeListAllocator { available.pop(); consumed_blocks.get_mut(bin).unwrap().push(block); - cursor = available.first; + cursor = available.first(); } } @@ -303,7 +303,7 @@ impl FreeListAllocator { crate::policy::marksweepspace::native_ms::BlockAcquireResult::Fresh(block) => { self.add_to_available_blocks(bin, block, stress_test); - self.init_block(block, self.available_blocks[bin].size); + self.init_block(block, self.available_blocks[bin].size()); return Some(block); } From 0b9942a12d3d3af144fd1876655149b37d02ae93 Mon Sep 17 00:00:00 2001 From: Yi Lin Date: Tue, 9 Apr 2024 06:10:53 +0000 Subject: [PATCH 05/14] Apply lock when we manipulate block lists during release --- .../marksweepspace/native_ms/block_list.rs | 30 ++++++++++++++++--- src/policy/marksweepspace/native_ms/global.rs | 4 +++ 2 files changed, 30 insertions(+), 4 deletions(-) diff --git a/src/policy/marksweepspace/native_ms/block_list.rs b/src/policy/marksweepspace/native_ms/block_list.rs index 9eb7d26088..552afc25f8 100644 --- a/src/policy/marksweepspace/native_ms/block_list.rs +++ b/src/policy/marksweepspace/native_ms/block_list.rs @@ -56,11 +56,16 @@ impl BlockList { // } #[cfg(feature = "ms_block_list_sanity")] fn verify_block_list(&self, sanity_list: &mut Vec) { - if !sanity_list.iter().map(|b| *b).eq(BlockListIterator { cursor: self.first }) { + if !sanity_list + .iter() + .map(|b| *b) + .eq(BlockListIterator { cursor: self.first }) + { eprintln!("Sanity block list: {:?}", sanity_list); eprintln!("First {:?}", sanity_list.get(0)); eprintln!("Actual block list: {:?}", self); eprintln!("First {:?}", self.first); + eprintln!("Block list {:?}", self as *const _); panic!("Incorrect block list"); } } @@ -81,6 +86,7 @@ impl BlockList { /// Remove a block from the list pub fn remove(&mut self, block: Block) { + trace!("Blocklist {:?}: Remove {:?}", self as *const _, block); match (block.load_prev_block(), block.load_next_block()) { (None, None) => { self.first = None; @@ -89,12 +95,14 @@ impl BlockList { (None, Some(next)) => { next.clear_prev_block(); self.first = Some(next); - next.store_block_list(self); + // next.store_block_list(self); + debug_assert_eq!(next.load_block_list(), self as *mut _); } (Some(prev), None) => { prev.clear_next_block(); self.last = Some(prev); - prev.store_block_list(self); + // prev.store_block_list(self); + debug_assert_eq!(prev.load_block_list(), self as *mut _); } (Some(prev), Some(next)) => { prev.store_next_block(next); @@ -105,7 +113,11 @@ impl BlockList { #[cfg(feature = "ms_block_list_sanity")] { let mut sanity_list = self.sanity_list.lock().unwrap(); - if let Some((index, _)) = sanity_list.iter().enumerate().find(|&(_, &val)| val == block) { + if let Some((index, _)) = sanity_list + .iter() + .enumerate() + .find(|&(_, &val)| val == block) + { sanity_list.remove(index); } else { panic!("Cannot find {:?} in the block list", block); @@ -144,11 +156,13 @@ impl BlockList { assert_eq!(sanity_ret, ret); } + trace!("Blocklist {:?}: Pop = {:?}", self as *const _, ret); ret } /// Push block to the front of the list pub fn push(&mut self, block: Block) { + trace!("Blocklist {:?}: Push {:?}", self as *const _, block); if self.is_empty() { block.clear_next_block(); block.clear_prev_block(); @@ -173,6 +187,11 @@ impl BlockList { /// Moves all the blocks of `other` into `self`, leaving `other` empty. pub fn append(&mut self, other: &mut BlockList) { + trace!( + "Blocklist {:?}: Append Blocklist {:?}", + self as *const _, + other as *const _ + ); #[cfg(feature = "ms_block_list_sanity")] { // Check before merging @@ -226,6 +245,7 @@ impl BlockList { /// Remove all blocks fn reset(&mut self) { + trace!("Blocklist {:?}: Reset", self as *const _); self.first = None; self.last = None; @@ -252,10 +272,12 @@ impl BlockList { .compare_exchange(false, true, Ordering::SeqCst, Ordering::SeqCst) .is_ok(); } + trace!("Blocklist {:?}: locked", self as *const _); } /// Unlock list. See the comments on the lock method. pub fn unlock(&mut self) { + trace!("Blocklist {:?}: unlock", self as *const _); self.lock.store(false, Ordering::SeqCst); } diff --git a/src/policy/marksweepspace/native_ms/global.rs b/src/policy/marksweepspace/native_ms/global.rs index a5c2f809eb..cd95ce2298 100644 --- a/src/policy/marksweepspace/native_ms/global.rs +++ b/src/policy/marksweepspace/native_ms/global.rs @@ -65,9 +65,13 @@ impl AbandonedBlockLists { fn move_consumed_to_unswept(&mut self) { let mut i = 0; while i < MI_BIN_FULL { + self.consumed[i].lock(); + self.unswept[i].lock(); if !self.consumed[i].is_empty() { self.unswept[i].append(&mut self.consumed[i]); } + self.unswept[i].unlock(); + self.consumed[i].unlock(); i += 1; } } From 44fb3f4986952c4998ea04182f4c5b118814b56f Mon Sep 17 00:00:00 2001 From: Yi Lin Date: Fri, 12 Apr 2024 05:12:09 +0000 Subject: [PATCH 06/14] Remove data race for marksweep block lists --- src/plan/marksweep/global.rs | 4 + src/policy/marksweepspace/malloc_ms/global.rs | 2 + src/policy/marksweepspace/native_ms/block.rs | 2 + src/policy/marksweepspace/native_ms/global.rs | 119 +++++++++++------- src/util/alloc/free_list_allocator.rs | 34 +++-- 5 files changed, 97 insertions(+), 64 deletions(-) diff --git a/src/plan/marksweep/global.rs b/src/plan/marksweep/global.rs index 41dde00dab..8715e34baf 100644 --- a/src/plan/marksweep/global.rs +++ b/src/plan/marksweep/global.rs @@ -65,6 +65,10 @@ impl Plan for MarkSweep { self.common.release(tls, true); } + fn end_of_gc(&mut self, _tls: VMWorkerThread) { + self.ms.end_of_gc(); + } + fn collection_required(&self, space_full: bool, _space: Option>) -> bool { self.base().collection_required(self, space_full) } diff --git a/src/policy/marksweepspace/malloc_ms/global.rs b/src/policy/marksweepspace/malloc_ms/global.rs index 8d42b74f0d..91d9b8a698 100644 --- a/src/policy/marksweepspace/malloc_ms/global.rs +++ b/src/policy/marksweepspace/malloc_ms/global.rs @@ -492,6 +492,8 @@ impl MallocSpace { self.scheduler.work_buckets[WorkBucketStage::Release].bulk_add(work_packets); } + pub fn end_of_gc(&mut self) {} + pub fn sweep_chunk(&self, chunk_start: Address) { // Call the relevant sweep function depending on the location of the mark bits match *VM::VMObjectModel::LOCAL_MARK_BIT_SPEC { diff --git a/src/policy/marksweepspace/native_ms/block.rs b/src/policy/marksweepspace/native_ms/block.rs index 625a82d851..57f64ce1fe 100644 --- a/src/policy/marksweepspace/native_ms/block.rs +++ b/src/policy/marksweepspace/native_ms/block.rs @@ -188,6 +188,7 @@ impl Block { } pub fn store_block_cell_size(&self, size: usize) { + debug_assert_ne!(size, 0); unsafe { Block::SIZE_TABLE.store::(self.start(), size) } } @@ -282,6 +283,7 @@ impl Block { /// that we need to use this method correctly. fn simple_sweep(&self) { let cell_size = self.load_block_cell_size(); + debug_assert_ne!(cell_size, 0); let mut cell = self.start(); let mut last = unsafe { Address::zero() }; while cell + cell_size <= self.start() + Block::BYTES { diff --git a/src/policy/marksweepspace/native_ms/global.rs b/src/policy/marksweepspace/native_ms/global.rs index a533fcde33..d2bc96f7ea 100644 --- a/src/policy/marksweepspace/native_ms/global.rs +++ b/src/policy/marksweepspace/native_ms/global.rs @@ -46,13 +46,18 @@ pub struct MarkSweepSpace { pub common: CommonSpace, pr: FreeListPageResource, /// Allocation status for all chunks in MS space - pub chunk_map: ChunkMap, + chunk_map: ChunkMap, /// Work packet scheduler scheduler: Arc>, /// Abandoned blocks. If a mutator dies, all its blocks go to this abandoned block - /// lists. In a GC, we also 'flush' all the local blocks to this global pool so they - /// can be used by allocators from other threads. - pub abandoned: Mutex, + /// lists. We reuse blocks in these lists in the mutator phase. + /// The space needs to do the release work for these block lists. + abandoned: Mutex, + /// Abandoned blocks during a GC. Each allocator finishes doing release work, and returns + /// their local blocks to the global lists. Thus we do not need to do release work for + /// these block lists in the space. These lists are only filled in the release phase, + /// and will be moved to the abandoned lists above at the end of a GC. + abandoned_in_gc: Mutex, } pub struct AbandonedBlockLists { @@ -62,6 +67,14 @@ pub struct AbandonedBlockLists { } impl AbandonedBlockLists { + fn new() -> Self { + Self { + available: new_empty_block_lists(), + unswept: new_empty_block_lists(), + consumed: new_empty_block_lists(), + } + } + fn move_consumed_to_unswept(&mut self) { let mut i = 0; while i < MI_BIN_FULL { @@ -88,6 +101,23 @@ impl AbandonedBlockLists { } } } + + fn merge(&mut self, other: &mut Self) { + for i in 0..MI_BIN_FULL { + self.available[i].append(&mut other.available[i]); + self.unswept[i].append(&mut other.unswept[i]); + self.consumed[i].append(&mut other.consumed[i]); + } + } + + #[cfg(debug_assertions)] + fn assert_empty(&self) { + for i in 0..MI_BIN_FULL { + assert!(self.available[i].is_empty()); + assert!(self.unswept[i].is_empty()); + assert!(self.consumed[i].is_empty()); + } + } } impl SFT for MarkSweepSpace { @@ -228,11 +258,8 @@ impl MarkSweepSpace { common, chunk_map: ChunkMap::new(), scheduler, - abandoned: Mutex::new(AbandonedBlockLists { - available: new_empty_block_lists(), - unswept: new_empty_block_lists(), - consumed: new_empty_block_lists(), - }), + abandoned: Mutex::new(AbandonedBlockLists::new()), + abandoned_in_gc: Mutex::new(AbandonedBlockLists::new()), } } @@ -261,27 +288,20 @@ impl MarkSweepSpace { self.chunk_map.set(block.chunk(), ChunkState::Allocated); } - pub fn get_next_metadata_spec(&self) -> SideMetadataSpec { - Block::NEXT_BLOCK_TABLE - } - pub fn prepare(&mut self) { - if let MetadataSpec::OnSide(side) = *VM::VMObjectModel::LOCAL_MARK_BIT_SPEC { - for chunk in self.chunk_map.all_chunks() { - side.bzero_metadata(chunk.start(), Chunk::BYTES); - } - } else { - unimplemented!("in header mark bit is not supported"); - } + #[cfg(debug_assertions)] + self.abandoned_in_gc.lock().unwrap().assert_empty(); + + // # Safety: ImmixSpace reference is always valid within this collection cycle. + let space = unsafe { &*(self as *const Self) }; + let work_packets = self + .chunk_map + .generate_tasks(|chunk| Box::new(PrepareChunkMap { space, chunk })); + self.scheduler.work_buckets[crate::scheduler::WorkBucketStage::Prepare] + .bulk_add(work_packets); } pub fn release(&mut self) { - // We sweep and release unmarked blocks here. For sweeping cells inside each block, we either - // do that when we release mutators (eager sweeping), or do that at allocation time (lazy sweeping). - use crate::scheduler::WorkBucketStage; - let work_packets = self.generate_sweep_tasks(); - self.scheduler.work_buckets[WorkBucketStage::Release].bulk_add(work_packets); - if cfg!(feature = "eager_sweeping") { // For eager sweeping, we have to sweep the lists that are abandoned to these global lists. let mut abandoned = self.abandoned.lock().unwrap(); @@ -294,6 +314,15 @@ impl MarkSweepSpace { } } + pub fn end_of_gc(&mut self) { + let mut from = self.abandoned_in_gc.lock().unwrap(); + let mut to = self.abandoned.lock().unwrap(); + to.merge(&mut from); + + #[cfg(debug_assertions)] + from.assert_empty(); + } + /// Release a block. pub fn release_block(&self, block: Block) { self.block_clear_metadata(block); @@ -340,42 +369,42 @@ impl MarkSweepSpace { } } - pub fn generate_sweep_tasks(&self) -> Vec>> { - // # Safety: ImmixSpace reference is always valid within this collection cycle. - let space = unsafe { &*(self as *const Self) }; - self.chunk_map - .generate_tasks(|chunk| Box::new(SweepChunk { space, chunk })) + pub fn get_abandoned_block_lists(&self) -> &Mutex { + &self.abandoned + } + + pub fn get_abandoned_block_lists_in_gc(&self) -> &Mutex { + &self.abandoned_in_gc } } use crate::scheduler::GCWork; use crate::MMTK; -/// Chunk sweeping work packet. -struct SweepChunk { +struct PrepareChunkMap { space: &'static MarkSweepSpace, chunk: Chunk, } -impl GCWork for SweepChunk { +impl GCWork for PrepareChunkMap { fn do_work(&mut self, _worker: &mut GCWorker, _mmtk: &'static MMTK) { debug_assert!(self.space.chunk_map.get(self.chunk) == ChunkState::Allocated); // number of allocated blocks. - let mut allocated_blocks = 0; - // Iterate over all allocated blocks in this chunk. - for block in self + let occupied_blocks = self .chunk .iter_region::() .filter(|block| block.get_state() != BlockState::Unallocated) - { - if !block.attempt_release(self.space) { - // Block is live. Increment the allocated block count. - allocated_blocks += 1; - } - } - // Set this chunk as free if there is not live blocks. - if allocated_blocks == 0 { + .count(); + if occupied_blocks == 0 { + // Set this chunk as free if there is no live blocks. self.space.chunk_map.set(self.chunk, ChunkState::Free) + } else { + // Otherwise this chunk is occupied, and we reset the mark bit if it is on the side. + if let MetadataSpec::OnSide(side) = *VM::VMObjectModel::LOCAL_MARK_BIT_SPEC { + for chunk in self.space.chunk_map.all_chunks() { + side.bzero_metadata(chunk.start(), Chunk::BYTES); + } + } } } } diff --git a/src/util/alloc/free_list_allocator.rs b/src/util/alloc/free_list_allocator.rs index 50ce36bd27..7ca592c682 100644 --- a/src/util/alloc/free_list_allocator.rs +++ b/src/util/alloc/free_list_allocator.rs @@ -123,7 +123,8 @@ impl Allocator for FreeListAllocator { } fn on_mutator_destroy(&mut self) { - self.abandon_blocks(); + let mut global = self.space.get_abandoned_block_lists().lock().unwrap(); + self.abandon_blocks(&mut global); } } @@ -333,6 +334,7 @@ impl FreeListAllocator { } fn init_block(&self, block: Block, cell_size: usize) { + debug_assert_ne!(cell_size, 0); self.space.record_new_block(block); // construct free list @@ -407,18 +409,9 @@ impl FreeListAllocator { block.store_tls(self.tls); } - pub(crate) fn prepare(&mut self) { - // For lazy sweeping, it doesn't matter whether we do it in prepare or release. - // However, in the release phase, we will do block-level sweeping. And that will cause - // race if we also reset the allocator in release (which will mutate on the block lists). - // So we just move reset to the prepare phase. - #[cfg(not(feature = "eager_sweeping"))] - self.reset(); - } + pub(crate) fn prepare(&mut self) {} pub(crate) fn release(&mut self) { - // For eager sweeping, we have to do this in the release phase when we know the liveness of the blocks - #[cfg(feature = "eager_sweeping")] self.reset(); } @@ -428,6 +421,7 @@ impl FreeListAllocator { /// for mark sweep. const ABANDON_BLOCKS_IN_RESET: bool = true; + /// Lazy sweeping. We just move all the blocks to the unswept block list. #[cfg(not(feature = "eager_sweeping"))] fn reset(&mut self) { trace!("reset"); @@ -450,10 +444,12 @@ impl FreeListAllocator { } if Self::ABANDON_BLOCKS_IN_RESET { - self.abandon_blocks(); + let mut global = self.space.get_abandoned_block_lists_in_gc().lock().unwrap(); + self.abandon_blocks(&mut global); } } + /// Eager sweeping. We sweep all the block lists, and move them to available block lists. #[cfg(feature = "eager_sweeping")] fn reset(&mut self) { debug!("reset"); @@ -479,31 +475,31 @@ impl FreeListAllocator { } if Self::ABANDON_BLOCKS_IN_RESET { - self.abandon_blocks(); + let mut global = self.space.get_abandoned_block_lists_in_gc().lock().unwrap(); + self.abandon_blocks(&mut global); } } - fn abandon_blocks(&mut self) { - let mut abandoned = self.space.abandoned.lock().unwrap(); + fn abandon_blocks(&mut self, global: &mut AbandonedBlockLists) { for i in 0..MI_BIN_FULL { let available = self.available_blocks.get_mut(i).unwrap(); if !available.is_empty() { - abandoned.available[i].append(available); + global.available[i].append(available); } let available_stress = self.available_blocks_stress.get_mut(i).unwrap(); if !available_stress.is_empty() { - abandoned.available[i].append(available_stress); + global.available[i].append(available_stress); } let consumed = self.consumed_blocks.get_mut(i).unwrap(); if !consumed.is_empty() { - abandoned.consumed[i].append(consumed); + global.consumed[i].append(consumed); } let unswept = self.unswept_blocks.get_mut(i).unwrap(); if !unswept.is_empty() { - abandoned.unswept[i].append(unswept); + global.unswept[i].append(unswept); } } } From 19e87aa72cf0e8a2ee2bcf2d26f09fa693961912 Mon Sep 17 00:00:00 2001 From: Yi Lin Date: Mon, 15 Apr 2024 00:49:53 +0000 Subject: [PATCH 07/14] Set block state to unmarked in prepare. Minor fixes. --- src/policy/marksweepspace/native_ms/global.rs | 21 ++++++++++++------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/src/policy/marksweepspace/native_ms/global.rs b/src/policy/marksweepspace/native_ms/global.rs index d2bc96f7ea..1b735f0083 100644 --- a/src/policy/marksweepspace/native_ms/global.rs +++ b/src/policy/marksweepspace/native_ms/global.rs @@ -292,7 +292,7 @@ impl MarkSweepSpace { #[cfg(debug_assertions)] self.abandoned_in_gc.lock().unwrap().assert_empty(); - // # Safety: ImmixSpace reference is always valid within this collection cycle. + // # Safety: MarkSweepSpace reference is always valid within this collection cycle. let space = unsafe { &*(self as *const Self) }; let work_packets = self .chunk_map @@ -315,9 +315,9 @@ impl MarkSweepSpace { } pub fn end_of_gc(&mut self) { - let mut from = self.abandoned_in_gc.lock().unwrap(); - let mut to = self.abandoned.lock().unwrap(); - to.merge(&mut from); + let from = self.abandoned_in_gc.get_mut().unwrap(); + let to = self.abandoned.get_mut().unwrap(); + to.merge(from); #[cfg(debug_assertions)] from.assert_empty(); @@ -390,12 +390,17 @@ impl GCWork for PrepareChunkMap { fn do_work(&mut self, _worker: &mut GCWorker, _mmtk: &'static MMTK) { debug_assert!(self.space.chunk_map.get(self.chunk) == ChunkState::Allocated); // number of allocated blocks. - let occupied_blocks = self - .chunk + let mut n_occupied_blocks = 0; + self.chunk .iter_region::() .filter(|block| block.get_state() != BlockState::Unallocated) - .count(); - if occupied_blocks == 0 { + .for_each(|block| { + // Clear block mark + block.set_state(BlockState::Unmarked); + // Count occupied blocks + n_occupied_blocks += 1 + }); + if n_occupied_blocks == 0 { // Set this chunk as free if there is no live blocks. self.space.chunk_map.set(self.chunk, ChunkState::Free) } else { From a920da3892c7c9b81189aee7a3ae0a6b7b7a4b71 Mon Sep 17 00:00:00 2001 From: Yi Lin Date: Mon, 15 Apr 2024 23:51:00 +0000 Subject: [PATCH 08/14] Release free blocks in lazy sweeping --- .../marksweepspace/native_ms/block_list.rs | 11 +++++++++-- src/policy/marksweepspace/native_ms/global.rs | 6 +++--- src/util/alloc/free_list_allocator.rs | 19 +++++++++++-------- 3 files changed, 23 insertions(+), 13 deletions(-) diff --git a/src/policy/marksweepspace/native_ms/block_list.rs b/src/policy/marksweepspace/native_ms/block_list.rs index 0aa6e03c7f..2fb7b7fdc3 100644 --- a/src/policy/marksweepspace/native_ms/block_list.rs +++ b/src/policy/marksweepspace/native_ms/block_list.rs @@ -164,14 +164,21 @@ impl BlockList { BlockListIterator { cursor: self.first } } - /// Sweep all the blocks in the block list. - pub fn sweep_blocks(&self, space: &super::MarkSweepSpace) { + /// Release unmarked blocks, and sweep other blocks in the block list. Used by eager sweeping. + pub fn release_and_sweep_blocks(&self, space: &super::MarkSweepSpace) { for block in self.iter() { if !block.attempt_release(space) { block.sweep::(); } } } + + /// Release unmarked blocks, and do not sweep any blocks. Used by lazy sweeping + pub fn release_blocks(&self, space: &super::MarkSweepSpace) { + for block in self.iter() { + block.attempt_release(space); + } + } } pub struct BlockListIterator { diff --git a/src/policy/marksweepspace/native_ms/global.rs b/src/policy/marksweepspace/native_ms/global.rs index 1b735f0083..103a9874a9 100644 --- a/src/policy/marksweepspace/native_ms/global.rs +++ b/src/policy/marksweepspace/native_ms/global.rs @@ -87,9 +87,9 @@ impl AbandonedBlockLists { fn sweep(&mut self, space: &MarkSweepSpace) { for i in 0..MI_BIN_FULL { - self.available[i].sweep_blocks(space); - self.consumed[i].sweep_blocks(space); - self.unswept[i].sweep_blocks(space); + self.available[i].release_and_sweep_blocks(space); + self.consumed[i].release_and_sweep_blocks(space); + self.unswept[i].release_and_sweep_blocks(space); // As we have swept blocks, move blocks in the unswept list to available or consumed list. while let Some(block) = self.unswept[i].pop() { diff --git a/src/util/alloc/free_list_allocator.rs b/src/util/alloc/free_list_allocator.rs index 7ca592c682..843f413bc7 100644 --- a/src/util/alloc/free_list_allocator.rs +++ b/src/util/alloc/free_list_allocator.rs @@ -298,11 +298,13 @@ impl FreeListAllocator { loop { match self.space.acquire_block(self.tls, size, align) { crate::policy::marksweepspace::native_ms::BlockAcquireResult::Exhausted => { + debug!("Acquire global block: None"); // GC return None; } crate::policy::marksweepspace::native_ms::BlockAcquireResult::Fresh(block) => { + debug!("Acquire global block: Fresh {:?}", block); self.add_to_available_blocks(bin, block, stress_test); self.init_block(block, self.available_blocks[bin].size); @@ -310,6 +312,7 @@ impl FreeListAllocator { } crate::policy::marksweepspace::native_ms::BlockAcquireResult::AbandonedAvailable(block) => { + debug!("Acquire global block: AbandonedAvailable {:?}", block); block.store_tls(self.tls); if block.has_free_cells() { self.add_to_available_blocks(bin, block, stress_test); @@ -320,6 +323,7 @@ impl FreeListAllocator { } crate::policy::marksweepspace::native_ms::BlockAcquireResult::AbandonedUnswept(block) => { + debug!("Acquire global block: AbandonedUnswep {:?}", block); block.store_tls(self.tls); block.sweep::(); if block.has_free_cells() { @@ -428,19 +432,18 @@ impl FreeListAllocator { // consumed and available are now unswept for bin in 0..MI_BIN_FULL { let unswept = self.unswept_blocks.get_mut(bin).unwrap(); - unswept.lock(); + + // We should have no unswept blocks here. Blocks should be either in avialable, or consumed. + debug_assert!(unswept.is_empty()); let mut sweep_later = |list: &mut BlockList| { - list.lock(); + list.release_blocks(self.space); unswept.append(list); - list.unlock(); }; sweep_later(&mut self.available_blocks[bin]); sweep_later(&mut self.available_blocks_stress[bin]); sweep_later(&mut self.consumed_blocks[bin]); - - unswept.unlock(); } if Self::ABANDON_BLOCKS_IN_RESET { @@ -456,11 +459,11 @@ impl FreeListAllocator { // sweep all blocks and push consumed onto available list for bin in 0..MI_BIN_FULL { // Sweep available blocks - self.available_blocks[bin].sweep_blocks(self.space); - self.available_blocks_stress[bin].sweep_blocks(self.space); + self.available_blocks[bin].release_and_sweep_blocks(self.space); + self.available_blocks_stress[bin].release_and_sweep_blocks(self.space); // Sweep consumed blocks, and also push the blocks back to the available list. - self.consumed_blocks[bin].sweep_blocks(self.space); + self.consumed_blocks[bin].release_and_sweep_blocks(self.space); if *self.context.options.precise_stress && self.context.options.is_stress_test_gc_enabled() { From f09153482d6a58cf0ecbce059d564ba73a0ddbe0 Mon Sep 17 00:00:00 2001 From: Yi Lin Date: Tue, 16 Apr 2024 05:11:17 +0000 Subject: [PATCH 09/14] Add some comments for MarkSweepSpace about block lists. Also release global lists. --- src/policy/marksweepspace/native_ms/block.rs | 14 +------ src/policy/marksweepspace/native_ms/global.rs | 39 +++++++++++++++---- 2 files changed, 33 insertions(+), 20 deletions(-) diff --git a/src/policy/marksweepspace/native_ms/block.rs b/src/policy/marksweepspace/native_ms/block.rs index 57f64ce1fe..13945b9ccb 100644 --- a/src/policy/marksweepspace/native_ms/block.rs +++ b/src/policy/marksweepspace/native_ms/block.rs @@ -225,18 +225,8 @@ impl Block { match self.get_state() { BlockState::Unallocated => false, BlockState::Unmarked => { - unsafe { - let block_list = loop { - let list = self.load_block_list(); - (*list).lock(); - if list == self.load_block_list() { - break list; - } - (*list).unlock(); - }; - (*block_list).remove(self); - (*block_list).unlock(); - } + let block_list = self.load_block_list(); + unsafe { &mut *block_list }.remove(self); space.release_block(self); true } diff --git a/src/policy/marksweepspace/native_ms/global.rs b/src/policy/marksweepspace/native_ms/global.rs index 103a9874a9..24a2410ad6 100644 --- a/src/policy/marksweepspace/native_ms/global.rs +++ b/src/policy/marksweepspace/native_ms/global.rs @@ -42,6 +42,26 @@ pub enum BlockAcquireResult { } /// A mark sweep space. +/// +/// The space and each free list allocator own some block lists. +/// A block that is in use belongs to exactly one of the block lists. In this case, +/// whoever owns a block list has exclusive access on the blocks in the list. +/// There should be no data race to access blocks. A thread should NOT access a block list +/// if it does not own the block list. +/// +/// The table below roughly describes what we do in each phase. +/// +/// | Phase | Allocator local block lists | Global abandoned block lists | Chunk map | +/// |----------------|-------------------------------------------------|----------------------------------------------|-----------| +/// | Allocation | Alloc from local | Move blocks from global to local block lists | - | +/// | | Lazy: sweep local blocks | | | +/// | GC - Prepare | - | - | Find used chunks, reset block mark, bzero mark bit | +/// | GC - Trace | Trace object and mark blocks. | Trace object and mark blocks. | - | +/// | | No block list access. | No block list access. | | +/// | GC - Release | Lazy: Move blocks to local unswept list | Lazy: Move blocks to global unswept list | _ | +/// | | Eager: Sweep local blocks | Eager: Sweep global blocks | | +/// | | Both: Return local blocks to a temp global list | | | +/// | GC - End of GC | - | Merge the temp global lists | - | pub struct MarkSweepSpace { pub common: CommonSpace, pr: FreeListPageResource, @@ -75,13 +95,16 @@ impl AbandonedBlockLists { } } - fn move_consumed_to_unswept(&mut self) { - let mut i = 0; - while i < MI_BIN_FULL { - if !self.consumed[i].is_empty() { - self.unswept[i].append(&mut self.consumed[i]); - } - i += 1; + fn sweep_later(&mut self, space: &MarkSweepSpace) { + for i in 0..MI_BIN_FULL { + // Release free blocks + self.available[i].release_blocks(space); + self.consumed[i].release_blocks(space); + self.unswept[i].release_blocks(space); + + // Move remaining blocks to unswept + self.unswept[i].append(&mut self.available[i]); + self.unswept[i].append(&mut self.consumed[i]); } } @@ -310,7 +333,7 @@ impl MarkSweepSpace { // For lazy sweeping, we just move blocks from consumed to unswept. When an allocator tries // to use them, they will sweep the block. let mut abandoned = self.abandoned.lock().unwrap(); - abandoned.move_consumed_to_unswept(); + abandoned.sweep_later(self); } } From df034cc70de49c9ed5cba29fd869a60ced27014a Mon Sep 17 00:00:00 2001 From: Yi Lin Date: Tue, 16 Apr 2024 05:38:27 +0000 Subject: [PATCH 10/14] Remove trailing spaces --- src/policy/marksweepspace/native_ms/global.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/policy/marksweepspace/native_ms/global.rs b/src/policy/marksweepspace/native_ms/global.rs index 24a2410ad6..5db093574e 100644 --- a/src/policy/marksweepspace/native_ms/global.rs +++ b/src/policy/marksweepspace/native_ms/global.rs @@ -42,15 +42,15 @@ pub enum BlockAcquireResult { } /// A mark sweep space. -/// +/// /// The space and each free list allocator own some block lists. /// A block that is in use belongs to exactly one of the block lists. In this case, /// whoever owns a block list has exclusive access on the blocks in the list. /// There should be no data race to access blocks. A thread should NOT access a block list /// if it does not own the block list. -/// +/// /// The table below roughly describes what we do in each phase. -/// +/// /// | Phase | Allocator local block lists | Global abandoned block lists | Chunk map | /// |----------------|-------------------------------------------------|----------------------------------------------|-----------| /// | Allocation | Alloc from local | Move blocks from global to local block lists | - | From f0d1f1a6ba5b93db1b5a66caf6be338486552d07 Mon Sep 17 00:00:00 2001 From: Yi Lin Date: Tue, 16 Apr 2024 07:00:37 +0000 Subject: [PATCH 11/14] Fix the assertion about unswept blocks --- src/util/alloc/free_list_allocator.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/util/alloc/free_list_allocator.rs b/src/util/alloc/free_list_allocator.rs index 843f413bc7..27546fc337 100644 --- a/src/util/alloc/free_list_allocator.rs +++ b/src/util/alloc/free_list_allocator.rs @@ -433,8 +433,8 @@ impl FreeListAllocator { for bin in 0..MI_BIN_FULL { let unswept = self.unswept_blocks.get_mut(bin).unwrap(); - // We should have no unswept blocks here. Blocks should be either in avialable, or consumed. - debug_assert!(unswept.is_empty()); + // If we abandon all the local blocks, we should have no unswept blocks here. Blocks should be either in available, or consumed. + debug_assert!(!Self::ABANDON_BLOCKS_IN_RESET || unswept.is_empty()); let mut sweep_later = |list: &mut BlockList| { list.release_blocks(self.space); From 2181ff88ad69edfd426cf7fff98b18cdfc31f64f Mon Sep 17 00:00:00 2001 From: Yi Lin Date: Wed, 17 Apr 2024 00:17:00 +0000 Subject: [PATCH 12/14] Add assertions: we should not have unallocated blocks in a block list --- src/policy/marksweepspace/native_ms/block.rs | 5 +++-- src/policy/marksweepspace/native_ms/block_list.rs | 6 +++++- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/src/policy/marksweepspace/native_ms/block.rs b/src/policy/marksweepspace/native_ms/block.rs index 13945b9ccb..e9af1c8272 100644 --- a/src/policy/marksweepspace/native_ms/block.rs +++ b/src/policy/marksweepspace/native_ms/block.rs @@ -220,10 +220,11 @@ impl Block { Self::MARK_TABLE.store_atomic::(self.start(), state, Ordering::SeqCst); } - /// Release this block if it is unmarked. Return true if the block is release. + /// Release this block if it is unmarked. Return true if the block is released. pub fn attempt_release(self, space: &MarkSweepSpace) -> bool { match self.get_state() { - BlockState::Unallocated => false, + // We should not have unallocated blocks in a block list + BlockState::Unallocated => unreachable!(), BlockState::Unmarked => { let block_list = self.load_block_list(); unsafe { &mut *block_list }.remove(self); diff --git a/src/policy/marksweepspace/native_ms/block_list.rs b/src/policy/marksweepspace/native_ms/block_list.rs index 2fb7b7fdc3..944a45ba54 100644 --- a/src/policy/marksweepspace/native_ms/block_list.rs +++ b/src/policy/marksweepspace/native_ms/block_list.rs @@ -1,4 +1,4 @@ -use super::Block; +use super::{Block, BlockState}; use crate::util::alloc::allocator; use crate::util::linear_scan::Region; use crate::vm::VMBinding; @@ -167,6 +167,8 @@ impl BlockList { /// Release unmarked blocks, and sweep other blocks in the block list. Used by eager sweeping. pub fn release_and_sweep_blocks(&self, space: &super::MarkSweepSpace) { for block in self.iter() { + // We should not have unallocated blocks in a block list + debug_assert_ne!(block.get_state(), BlockState::Unallocated); if !block.attempt_release(space) { block.sweep::(); } @@ -176,6 +178,8 @@ impl BlockList { /// Release unmarked blocks, and do not sweep any blocks. Used by lazy sweeping pub fn release_blocks(&self, space: &super::MarkSweepSpace) { for block in self.iter() { + // We should not have unallocated blocks in a block list + debug_assert_ne!(block.get_state(), BlockState::Unallocated); block.attempt_release(space); } } From 6a8c3060a9413bc5bddbf7823de7386006c99fe9 Mon Sep 17 00:00:00 2001 From: Yi Lin Date: Wed, 17 Apr 2024 00:32:57 +0000 Subject: [PATCH 13/14] Revert "Assert marksweep block list" This reverts commit 1e057526788599ff5627ee9efffc86326c596a7d. --- Cargo.toml | 3 - src/policy/marksweepspace/native_ms/block.rs | 12 +- .../marksweepspace/native_ms/block_list.rs | 147 +----------------- src/util/alloc/free_list_allocator.rs | 8 +- 4 files changed, 17 insertions(+), 153 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index e570b40e28..b12675979c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -125,9 +125,6 @@ immix_smaller_block = [] # Zero the unmarked lines after a GC cycle in immix. This helps debug untraced objects. immix_zero_on_release = [] -# Run sanity checks for BlockList in mark sweep. -ms_block_list_sanity = [] - # Run sanity GC sanity = [] # Run analysis diff --git a/src/policy/marksweepspace/native_ms/block.rs b/src/policy/marksweepspace/native_ms/block.rs index 26ee399e43..625a82d851 100644 --- a/src/policy/marksweepspace/native_ms/block.rs +++ b/src/policy/marksweepspace/native_ms/block.rs @@ -136,35 +136,35 @@ impl Block { .is_ok() } - pub(in crate::policy::marksweepspace::native_ms) fn load_prev_block(&self) -> Option { + pub fn load_prev_block(&self) -> Option { let prev = unsafe { Block::PREV_BLOCK_TABLE.load::(self.start()) }; NonZeroUsize::new(prev).map(Block) } - pub(in crate::policy::marksweepspace::native_ms) fn load_next_block(&self) -> Option { + pub fn load_next_block(&self) -> Option { let next = unsafe { Block::NEXT_BLOCK_TABLE.load::(self.start()) }; NonZeroUsize::new(next).map(Block) } - pub(in crate::policy::marksweepspace::native_ms) fn store_next_block(&self, next: Block) { + pub fn store_next_block(&self, next: Block) { unsafe { Block::NEXT_BLOCK_TABLE.store::(self.start(), next.start().as_usize()); } } - pub(in crate::policy::marksweepspace::native_ms) fn clear_next_block(&self) { + pub fn clear_next_block(&self) { unsafe { Block::NEXT_BLOCK_TABLE.store::(self.start(), 0); } } - pub(in crate::policy::marksweepspace::native_ms) fn store_prev_block(&self, prev: Block) { + pub fn store_prev_block(&self, prev: Block) { unsafe { Block::PREV_BLOCK_TABLE.store::(self.start(), prev.start().as_usize()); } } - pub(in crate::policy::marksweepspace::native_ms) fn clear_prev_block(&self) { + pub fn clear_prev_block(&self) { unsafe { Block::PREV_BLOCK_TABLE.store::(self.start(), 0); } diff --git a/src/policy/marksweepspace/native_ms/block_list.rs b/src/policy/marksweepspace/native_ms/block_list.rs index 552afc25f8..50bd7a17a1 100644 --- a/src/policy/marksweepspace/native_ms/block_list.rs +++ b/src/policy/marksweepspace/native_ms/block_list.rs @@ -4,23 +4,19 @@ use crate::util::linear_scan::Region; use crate::vm::VMBinding; use std::sync::atomic::AtomicBool; use std::sync::atomic::Ordering; -#[cfg(feature = "ms_block_list_sanity")] -use std::sync::Mutex; /// List of blocks owned by the allocator #[repr(C)] pub struct BlockList { - first: Option, - last: Option, - size: usize, - lock: AtomicBool, - #[cfg(feature = "ms_block_list_sanity")] - sanity_list: Mutex>, + pub first: Option, + pub last: Option, + pub size: usize, + pub lock: AtomicBool, } impl std::fmt::Debug for BlockList { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(f, "{:?}", self.iter().collect::>()) + write!(f, "BlockList {:?}", self.iter().collect::>()) } } @@ -31,57 +27,12 @@ impl BlockList { last: None, size, lock: AtomicBool::new(false), - #[cfg(feature = "ms_block_list_sanity")] - sanity_list: Mutex::new(vec![]), - } - } - - // fn access_block_list R>(&self, access_func: F) -> R { - // #[cfg(feature = "ms_block_list_sanity")] - // let mut sanity_list = self.sanity_list.lock().unwrap(); - - // let ret = access_func(); - - // // Verify the block list is the same as the sanity list - // #[cfg(feature = "ms_block_list_sanity")] - // { - // if !sanity_list.iter().map(|b| *b).eq(BlockListIterator { cursor: self.first }) { - // eprintln!("Sanity block list: {:?}", &mut sanity_list as &mut Vec); - // eprintln!("Actual block list: {:?}", self); - // panic!("Incorrect block list"); - // } - // } - - // ret - // } - #[cfg(feature = "ms_block_list_sanity")] - fn verify_block_list(&self, sanity_list: &mut Vec) { - if !sanity_list - .iter() - .map(|b| *b) - .eq(BlockListIterator { cursor: self.first }) - { - eprintln!("Sanity block list: {:?}", sanity_list); - eprintln!("First {:?}", sanity_list.get(0)); - eprintln!("Actual block list: {:?}", self); - eprintln!("First {:?}", self.first); - eprintln!("Block list {:?}", self as *const _); - panic!("Incorrect block list"); } } /// List has no blocks pub fn is_empty(&self) -> bool { - let ret = self.first.is_none(); - - #[cfg(feature = "ms_block_list_sanity")] - { - let mut sanity_list = self.sanity_list.lock().unwrap(); - self.verify_block_list(&mut sanity_list); - assert_eq!(sanity_list.is_empty(), ret); - } - - ret + self.first.is_none() } /// Remove a block from the list @@ -109,26 +60,11 @@ impl BlockList { next.store_prev_block(prev); } } - - #[cfg(feature = "ms_block_list_sanity")] - { - let mut sanity_list = self.sanity_list.lock().unwrap(); - if let Some((index, _)) = sanity_list - .iter() - .enumerate() - .find(|&(_, &val)| val == block) - { - sanity_list.remove(index); - } else { - panic!("Cannot find {:?} in the block list", block); - } - self.verify_block_list(&mut sanity_list); - } } /// Pop the first block in the list pub fn pop(&mut self) -> Option { - let ret = if let Some(head) = self.first { + if let Some(head) = self.first { if let Some(next) = head.load_next_block() { self.first = Some(next); next.clear_prev_block(); @@ -142,22 +78,7 @@ impl BlockList { Some(head) } else { None - }; - - #[cfg(feature = "ms_block_list_sanity")] - { - let mut sanity_list = self.sanity_list.lock().unwrap(); - let sanity_ret = if sanity_list.is_empty() { - None - } else { - Some(sanity_list.remove(0)) // pop first - }; - self.verify_block_list(&mut sanity_list); - assert_eq!(sanity_ret, ret); } - - trace!("Blocklist {:?}: Pop = {:?}", self as *const _, ret); - ret } /// Push block to the front of the list @@ -176,33 +97,10 @@ impl BlockList { self.first = Some(block); } block.store_block_list(self); - - #[cfg(feature = "ms_block_list_sanity")] - { - let mut sanity_list = self.sanity_list.lock().unwrap(); - sanity_list.insert(0, block); // push front - self.verify_block_list(&mut sanity_list); - } } /// Moves all the blocks of `other` into `self`, leaving `other` empty. pub fn append(&mut self, other: &mut BlockList) { - trace!( - "Blocklist {:?}: Append Blocklist {:?}", - self as *const _, - other as *const _ - ); - #[cfg(feature = "ms_block_list_sanity")] - { - // Check before merging - let mut sanity_list = self.sanity_list.lock().unwrap(); - self.verify_block_list(&mut sanity_list); - let mut sanity_list_other = other.sanity_list.lock().unwrap(); - other.verify_block_list(&mut sanity_list_other); - } - #[cfg(feature = "ms_block_list_sanity")] - let mut sanity_list_in_other = other.sanity_list.lock().unwrap().clone(); - debug_assert_eq!(self.size, other.size); if !other.is_empty() { debug_assert!( @@ -234,13 +132,6 @@ impl BlockList { } other.reset(); } - - #[cfg(feature = "ms_block_list_sanity")] - { - let mut sanity_list = self.sanity_list.lock().unwrap(); - sanity_list.append(&mut sanity_list_in_other); - self.verify_block_list(&mut sanity_list); - } } /// Remove all blocks @@ -248,12 +139,6 @@ impl BlockList { trace!("Blocklist {:?}: Reset", self as *const _); self.first = None; self.last = None; - - #[cfg(feature = "ms_block_list_sanity")] - { - let mut sanity_list = self.sanity_list.lock().unwrap(); - sanity_list.clear(); - } } /// Lock the list. The MiMalloc allocator mostly uses thread-local block lists, and those operations on the list @@ -294,24 +179,6 @@ impl BlockList { } } } - - /// Get the size of this block list. - pub fn size(&self) -> usize { - let ret = self.size; - - #[cfg(feature = "ms_block_list_sanity")] - { - let mut sanity_list = self.sanity_list.lock().unwrap(); - self.verify_block_list(&mut sanity_list); - } - - ret - } - - /// Get the first block in the list. - pub fn first(&self) -> Option { - self.first - } } pub struct BlockListIterator { diff --git a/src/util/alloc/free_list_allocator.rs b/src/util/alloc/free_list_allocator.rs index 7f27b7acd7..50ce36bd27 100644 --- a/src/util/alloc/free_list_allocator.rs +++ b/src/util/alloc/free_list_allocator.rs @@ -218,10 +218,10 @@ impl FreeListAllocator { debug_assert!(bin <= MAX_BIN); let available = &mut available_blocks[bin]; - debug_assert!(available.size() >= size); + debug_assert!(available.size >= size); if !available.is_empty() { - let mut cursor = available.first(); + let mut cursor = available.first; while let Some(block) = cursor { if block.has_free_cells() { @@ -230,7 +230,7 @@ impl FreeListAllocator { available.pop(); consumed_blocks.get_mut(bin).unwrap().push(block); - cursor = available.first(); + cursor = available.first; } } @@ -303,7 +303,7 @@ impl FreeListAllocator { crate::policy::marksweepspace::native_ms::BlockAcquireResult::Fresh(block) => { self.add_to_available_blocks(bin, block, stress_test); - self.init_block(block, self.available_blocks[bin].size()); + self.init_block(block, self.available_blocks[bin].size); return Some(block); } From b4424295ebfd146cf45c5b1568524a5c64440093 Mon Sep 17 00:00:00 2001 From: Yi Lin Date: Mon, 22 Apr 2024 01:53:39 +0000 Subject: [PATCH 14/14] Add end_of_gc for nonmoving/ms space --- .../src/tutorial/code/mygc_semispace/global.rs | 6 ++++++ src/plan/generational/copying/global.rs | 3 ++- src/plan/generational/immix/global.rs | 3 ++- src/plan/global.rs | 11 ++++++++++- src/plan/immix/global.rs | 4 ++++ src/plan/markcompact/global.rs | 4 ++++ src/plan/marksweep/global.rs | 3 ++- src/plan/nogc/global.rs | 4 ++++ src/plan/pageprotect/global.rs | 4 ++++ src/plan/semispace/global.rs | 4 ++++ src/plan/sticky/immix/global.rs | 3 ++- 11 files changed, 44 insertions(+), 5 deletions(-) diff --git a/docs/userguide/src/tutorial/code/mygc_semispace/global.rs b/docs/userguide/src/tutorial/code/mygc_semispace/global.rs index bbbf6fe6f5..3c90f4d59d 100644 --- a/docs/userguide/src/tutorial/code/mygc_semispace/global.rs +++ b/docs/userguide/src/tutorial/code/mygc_semispace/global.rs @@ -121,6 +121,12 @@ impl Plan for MyGC { } // ANCHOR_END: release + // ANCHOR: end_of_gc + fn end_of_gc(&mut self, tls: VMWorkerThread) { + self.common.end_of_gc(tls); + } + // ANCHOR_END: end_of_gc + // Modify // ANCHOR: plan_get_collection_reserve fn get_collection_reserved_pages(&self) -> usize { diff --git a/src/plan/generational/copying/global.rs b/src/plan/generational/copying/global.rs index faa4d73266..8bdd002d23 100644 --- a/src/plan/generational/copying/global.rs +++ b/src/plan/generational/copying/global.rs @@ -114,9 +114,10 @@ impl Plan for GenCopy { } } - fn end_of_gc(&mut self, _tls: VMWorkerThread) { + fn end_of_gc(&mut self, tls: VMWorkerThread) { self.gen .set_next_gc_full_heap(CommonGenPlan::should_next_gc_be_full_heap(self)); + self.gen.common.end_of_gc(tls); } fn get_collection_reserved_pages(&self) -> usize { diff --git a/src/plan/generational/immix/global.rs b/src/plan/generational/immix/global.rs index 97b09b15fd..ff1d37dd85 100644 --- a/src/plan/generational/immix/global.rs +++ b/src/plan/generational/immix/global.rs @@ -146,9 +146,10 @@ impl Plan for GenImmix { .store(full_heap, Ordering::Relaxed); } - fn end_of_gc(&mut self, _tls: VMWorkerThread) { + fn end_of_gc(&mut self, tls: VMWorkerThread) { self.gen .set_next_gc_full_heap(CommonGenPlan::should_next_gc_be_full_heap(self)); + self.gen.common.end_of_gc(tls); } fn get_collection_reserved_pages(&self) -> usize { diff --git a/src/plan/global.rs b/src/plan/global.rs index 0697a35919..7bd6723fe3 100644 --- a/src/plan/global.rs +++ b/src/plan/global.rs @@ -198,7 +198,7 @@ pub trait Plan: 'static + HasSpaces + Sync + Downcast { /// Inform the plan about the end of a GC. It is guaranteed that there is no further work for this GC. /// This is invoked once per GC by one worker thread. `tls` is the worker thread that executes this method. - fn end_of_gc(&mut self, _tls: VMWorkerThread) {} + fn end_of_gc(&mut self, _tls: VMWorkerThread); /// Notify the plan that an emergency collection will happen. The plan should try to free as much memory as possible. /// The default implementation will force a full heap collection for generational plans. @@ -522,6 +522,10 @@ impl BasePlan { self.vm_space.release(); } + pub fn end_of_gc(&mut self, _tls: VMWorkerThread) { + // Not doing anything special here. + } + pub(crate) fn collection_required(&self, plan: &P, space_full: bool) -> bool { let stress_force_gc = crate::util::heap::gc_trigger::GCTrigger::::should_do_stress_gc_inner( @@ -631,6 +635,11 @@ impl CommonPlan { self.base.release(tls, full_heap) } + pub fn end_of_gc(&mut self, tls: VMWorkerThread) { + self.nonmoving.end_of_gc(); + self.base.end_of_gc(tls); + } + pub fn get_immortal(&self) -> &ImmortalSpace { &self.immortal } diff --git a/src/plan/immix/global.rs b/src/plan/immix/global.rs index 4adfbc6f45..981b2991bf 100644 --- a/src/plan/immix/global.rs +++ b/src/plan/immix/global.rs @@ -96,6 +96,10 @@ impl Plan for Immix { .store(self.immix_space.release(true), Ordering::Relaxed); } + fn end_of_gc(&mut self, tls: VMWorkerThread) { + self.common.end_of_gc(tls); + } + fn get_collection_reserved_pages(&self) -> usize { self.immix_space.defrag_headroom_pages() } diff --git a/src/plan/markcompact/global.rs b/src/plan/markcompact/global.rs index 4c387a1a06..bf55c3da7b 100644 --- a/src/plan/markcompact/global.rs +++ b/src/plan/markcompact/global.rs @@ -72,6 +72,10 @@ impl Plan for MarkCompact { self.mc_space.release(); } + fn end_of_gc(&mut self, tls: VMWorkerThread) { + self.common.end_of_gc(tls); + } + fn get_allocator_mapping(&self) -> &'static EnumMap { &ALLOCATOR_MAPPING } diff --git a/src/plan/marksweep/global.rs b/src/plan/marksweep/global.rs index 8715e34baf..1916e8b5ac 100644 --- a/src/plan/marksweep/global.rs +++ b/src/plan/marksweep/global.rs @@ -65,8 +65,9 @@ impl Plan for MarkSweep { self.common.release(tls, true); } - fn end_of_gc(&mut self, _tls: VMWorkerThread) { + fn end_of_gc(&mut self, tls: VMWorkerThread) { self.ms.end_of_gc(); + self.common.end_of_gc(tls); } fn collection_required(&self, space_full: bool, _space: Option>) -> bool { diff --git a/src/plan/nogc/global.rs b/src/plan/nogc/global.rs index b2d02f58d3..a0d40525f9 100644 --- a/src/plan/nogc/global.rs +++ b/src/plan/nogc/global.rs @@ -66,6 +66,10 @@ impl Plan for NoGC { unreachable!() } + fn end_of_gc(&mut self, _tls: VMWorkerThread) { + unreachable!() + } + fn get_allocator_mapping(&self) -> &'static EnumMap { &ALLOCATOR_MAPPING } diff --git a/src/plan/pageprotect/global.rs b/src/plan/pageprotect/global.rs index 65a002be14..f534e5930e 100644 --- a/src/plan/pageprotect/global.rs +++ b/src/plan/pageprotect/global.rs @@ -57,6 +57,10 @@ impl Plan for PageProtect { self.space.release(true); } + fn end_of_gc(&mut self, tls: VMWorkerThread) { + self.common.end_of_gc(tls); + } + fn collection_required(&self, space_full: bool, _space: Option>) -> bool { self.base().collection_required(self, space_full) } diff --git a/src/plan/semispace/global.rs b/src/plan/semispace/global.rs index 76778846d6..ffb45cd828 100644 --- a/src/plan/semispace/global.rs +++ b/src/plan/semispace/global.rs @@ -95,6 +95,10 @@ impl Plan for SemiSpace { self.fromspace().release(); } + fn end_of_gc(&mut self, tls: VMWorkerThread) { + self.common.end_of_gc(tls); + } + fn collection_required(&self, space_full: bool, _space: Option>) -> bool { self.base().collection_required(self, space_full) } diff --git a/src/plan/sticky/immix/global.rs b/src/plan/sticky/immix/global.rs index 22e68d2f20..49f20b1428 100644 --- a/src/plan/sticky/immix/global.rs +++ b/src/plan/sticky/immix/global.rs @@ -135,11 +135,12 @@ impl Plan for StickyImmix { } } - fn end_of_gc(&mut self, _tls: crate::util::opaque_pointer::VMWorkerThread) { + fn end_of_gc(&mut self, tls: crate::util::opaque_pointer::VMWorkerThread) { let next_gc_full_heap = crate::plan::generational::global::CommonGenPlan::should_next_gc_be_full_heap(self); self.next_gc_full_heap .store(next_gc_full_heap, Ordering::Relaxed); + self.immix.common.end_of_gc(tls); } fn collection_required(&self, space_full: bool, space: Option>) -> bool {