From b7b5988a6e65aa977ee134084bc09aeeacd8246b Mon Sep 17 00:00:00 2001 From: Yi Lin Date: Tue, 15 Apr 2025 04:33:21 +0000 Subject: [PATCH 1/6] Refactor ChunkState to encode space index --- src/policy/immix/immixspace.rs | 6 +- src/policy/marksweepspace/native_ms/global.rs | 10 +-- src/util/heap/chunk_map.rs | 64 ++++++++++++++----- src/util/object_enum.rs | 2 +- .../mock_test_allocate_nonmoving.rs | 44 +++++++++++++ src/vm/tests/mock_tests/mod.rs | 1 + 6 files changed, 103 insertions(+), 24 deletions(-) create mode 100644 src/vm/tests/mock_tests/mock_test_allocate_nonmoving.rs diff --git a/src/policy/immix/immixspace.rs b/src/policy/immix/immixspace.rs index b932808d8f..cea999a1bf 100644 --- a/src/policy/immix/immixspace.rs +++ b/src/policy/immix/immixspace.rs @@ -524,7 +524,7 @@ impl ImmixSpace { self.defrag.notify_new_clean_block(copy); let block = Block::from_aligned_address(block_address); block.init(copy); - self.chunk_map.set(block.chunk(), ChunkState::Allocated); + self.chunk_map.set(block.chunk(), ChunkState::allocated(self.common().descriptor.get_index())); self.lines_consumed .fetch_add(Block::LINES, Ordering::SeqCst); Some(block) @@ -899,7 +899,7 @@ struct SweepChunk { impl GCWork for SweepChunk { fn do_work(&mut self, _worker: &mut GCWorker, mmtk: &'static MMTK) { - assert_eq!(self.space.chunk_map.get(self.chunk), ChunkState::Allocated); + assert!(self.space.chunk_map.get(self.chunk).is_allocated()); let mut histogram = self.space.defrag.new_histogram(); let line_mark_state = if super::BLOCK_ONLY { @@ -950,7 +950,7 @@ impl GCWork for SweepChunk { probe!(mmtk, sweep_chunk, allocated_blocks); // Set this chunk as free if there is not live blocks. if allocated_blocks == 0 { - self.space.chunk_map.set(self.chunk, ChunkState::Free) + self.space.chunk_map.set(self.chunk, ChunkState::free()) } self.space.defrag.add_completed_mark_histogram(histogram); self.epilogue.finish_one_work_packet(); diff --git a/src/policy/marksweepspace/native_ms/global.rs b/src/policy/marksweepspace/native_ms/global.rs index 0b349c4508..747149d23c 100644 --- a/src/policy/marksweepspace/native_ms/global.rs +++ b/src/policy/marksweepspace/native_ms/global.rs @@ -402,7 +402,7 @@ impl MarkSweepSpace { pub fn record_new_block(&self, block: Block) { block.init(); - self.chunk_map.set(block.chunk(), ChunkState::Allocated); + self.chunk_map.set(block.chunk(), ChunkState::allocated(self.common.descriptor.get_index())); } pub fn prepare(&mut self, full_heap: bool) { @@ -567,7 +567,7 @@ struct PrepareChunkMap { impl GCWork for PrepareChunkMap { fn do_work(&mut self, _worker: &mut GCWorker, _mmtk: &'static MMTK) { - debug_assert!(self.space.chunk_map.get(self.chunk) == ChunkState::Allocated); + debug_assert!(self.space.chunk_map.get(self.chunk).is_allocated()); // number of allocated blocks. let mut n_occupied_blocks = 0; self.chunk @@ -581,7 +581,7 @@ impl GCWork for PrepareChunkMap { }); if n_occupied_blocks == 0 { // Set this chunk as free if there is no live blocks. - self.space.chunk_map.set(self.chunk, ChunkState::Free) + self.space.chunk_map.set(self.chunk, ChunkState::free()) } else { // Otherwise this chunk is occupied, and we reset the mark bit if it is on the side. if let MetadataSpec::OnSide(side) = *VM::VMObjectModel::LOCAL_MARK_BIT_SPEC { @@ -617,7 +617,7 @@ struct SweepChunk { impl GCWork for SweepChunk { fn do_work(&mut self, _worker: &mut GCWorker, _mmtk: &'static MMTK) { - assert_eq!(self.space.chunk_map.get(self.chunk), ChunkState::Allocated); + assert!(self.space.chunk_map.get(self.chunk).is_allocated()); // number of allocated blocks. let mut allocated_blocks = 0; @@ -636,7 +636,7 @@ impl GCWork for SweepChunk { probe!(mmtk, sweep_chunk, allocated_blocks); // Set this chunk as free if there is not live blocks. if allocated_blocks == 0 { - self.space.chunk_map.set(self.chunk, ChunkState::Free) + self.space.chunk_map.set(self.chunk, ChunkState::free()); } self.epilogue.finish_one_work_packet(); } diff --git a/src/util/heap/chunk_map.rs b/src/util/heap/chunk_map.rs index 26912b3f89..7725f635d9 100644 --- a/src/util/heap/chunk_map.rs +++ b/src/util/heap/chunk_map.rs @@ -45,13 +45,44 @@ impl Chunk { } /// Chunk allocation state -#[repr(u8)] -#[derive(Debug, PartialEq, Clone, Copy)] -pub enum ChunkState { - /// The chunk is not allocated. - Free = 0, - /// The chunk is allocated. - Allocated = 1, +/// Highest bit: 0 = free, 1 = allocated +/// Lower 4 bits: Space index (0-15) +#[repr(transparent)] +#[derive(PartialEq, Clone, Copy)] +pub struct ChunkState(u8); + +impl ChunkState { + pub fn allocated(space_index: usize) -> ChunkState { + debug_assert!(space_index < crate::util::heap::layout::heap_parameters::MAX_SPACES); + let mut encode = space_index as u8; + encode |= 0x80; + ChunkState(encode) + } + pub fn free() -> ChunkState { + ChunkState(0) + } + pub fn is_free(&self) -> bool { + self.0 & 0x80 == 0 + } + pub fn is_allocated(&self) -> bool { + !self.is_free() + } + pub fn get_space_index(&self) -> usize { + debug_assert!(self.is_allocated()); + let index = (self.0 & 0x0F) as usize; + debug_assert!(index < crate::util::heap::layout::heap_parameters::MAX_SPACES); + index + } +} + +impl std::fmt::Debug for ChunkState { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + if self.is_free() { + write!(f, "Free") + } else { + write!(f, "Allocated in space {}", self.get_space_index()) + } + } } /// A byte-map to record all the allocated chunks. @@ -78,10 +109,17 @@ impl ChunkMap { if self.get(chunk) == state { return; } + #[cfg(debug_assertions)] + { + let old_state = self.get(chunk); + if state.is_allocated() { + assert!(old_state.is_free() || old_state.get_space_index() == state.get_space_index(), "Chunk {:?}: old state {:?}, new state {:?}. Cannot set to new state.", chunk, old_state, state); + } + } // Update alloc byte - unsafe { Self::ALLOC_TABLE.store::(chunk.start(), state as u8) }; + unsafe { Self::ALLOC_TABLE.store::(chunk.start(), state.0) }; // If this is a newly allcoated chunk, then expand the chunk range. - if state == ChunkState::Allocated { + if state.is_allocated() { debug_assert!(!chunk.start().is_zero()); let mut range = self.chunk_range.lock(); if range.start == Chunk::ZERO { @@ -99,11 +137,7 @@ impl ChunkMap { /// Get chunk state pub fn get(&self, chunk: Chunk) -> ChunkState { let byte = unsafe { Self::ALLOC_TABLE.load::(chunk.start()) }; - match byte { - 0 => ChunkState::Free, - 1 => ChunkState::Allocated, - _ => unreachable!(), - } + ChunkState(byte) } /// A range of all chunks in the heap. @@ -120,7 +154,7 @@ impl ChunkMap { let mut work_packets: Vec>> = vec![]; for chunk in self .all_chunks() - .filter(|c| self.get(*c) == ChunkState::Allocated) + .filter(|c| self.get(*c).is_allocated()) { work_packets.push(func(chunk)); } diff --git a/src/util/object_enum.rs b/src/util/object_enum.rs index 18508f088b..d3502a2985 100644 --- a/src/util/object_enum.rs +++ b/src/util/object_enum.rs @@ -85,7 +85,7 @@ pub(crate) fn enumerate_blocks_from_chunk_map( B: BlockMayHaveObjects, { for chunk in chunk_map.all_chunks() { - if chunk_map.get(chunk) == ChunkState::Allocated { + if chunk_map.get(chunk).is_allocated() { for block in chunk.iter_region::() { if block.may_have_objects() { enumerator.visit_address_range(block.start(), block.end()); diff --git a/src/vm/tests/mock_tests/mock_test_allocate_nonmoving.rs b/src/vm/tests/mock_tests/mock_test_allocate_nonmoving.rs new file mode 100644 index 0000000000..2487b2b36f --- /dev/null +++ b/src/vm/tests/mock_tests/mock_test_allocate_nonmoving.rs @@ -0,0 +1,44 @@ +// GITHUB-CI: MMTK_PLAN=all + +use lazy_static::lazy_static; + +use super::mock_test_prelude::*; +use crate::plan::AllocationSemantics; + +#[test] +pub fn allocate_alignment() { + with_mockvm( + || -> MockVM { + MockVM { + is_collection_enabled: MockMethod::new_fixed(Box::new(|_| false)), + ..MockVM::default() + } + }, + || { + // 1MB heap + const MB: usize = 1024 * 1024; + let mut fixture = MutatorFixture::create_with_heapsize(MB); + + // Normal alloc + let addr = memory_manager::alloc( + &mut fixture.mutator, + 16, + 8, + 0, + AllocationSemantics::Default, + ); + assert!(!addr.is_zero()); + + // Non moving alloc + let addr = memory_manager::alloc( + &mut fixture.mutator, + 16, + 8, + 0, + AllocationSemantics::NonMoving, + ); + assert!(!addr.is_zero()); + }, + no_cleanup, + ) +} diff --git a/src/vm/tests/mock_tests/mod.rs b/src/vm/tests/mock_tests/mod.rs index aab9aafd8b..55b08b3b12 100644 --- a/src/vm/tests/mock_tests/mod.rs +++ b/src/vm/tests/mock_tests/mod.rs @@ -24,6 +24,7 @@ pub(crate) mod mock_test_prelude { } mod mock_test_allocate_align_offset; +mod mock_test_allocate_nonmoving; mod mock_test_allocate_with_disable_collection; mod mock_test_allocate_with_initialize_collection; mod mock_test_allocate_with_re_enable_collection; From 1c64dcb7e844f14583d0809bce8f866295d24dd8 Mon Sep 17 00:00:00 2001 From: Yi Lin Date: Thu, 17 Apr 2025 01:21:45 +0000 Subject: [PATCH 2/6] Make chunk map as global side metadata --- src/policy/immix/immixspace.rs | 9 +- src/policy/marksweepspace/native_ms/global.rs | 13 +-- src/util/heap/chunk_map.rs | 85 +++++++++++++------ src/util/metadata/side_metadata/global.rs | 5 ++ src/util/metadata/side_metadata/spec_defs.rs | 4 +- src/util/object_enum.rs | 15 ++-- .../mock_test_allocate_nonmoving.rs | 13 +-- 7 files changed, 85 insertions(+), 59 deletions(-) diff --git a/src/policy/immix/immixspace.rs b/src/policy/immix/immixspace.rs index cea999a1bf..dfe86240aa 100644 --- a/src/policy/immix/immixspace.rs +++ b/src/policy/immix/immixspace.rs @@ -299,6 +299,7 @@ impl ImmixSpace { let scheduler = args.scheduler.clone(); let common = CommonSpace::new(args.into_policy_args(true, false, Self::side_metadata_specs())); + let space_index = common.descriptor.get_index(); ImmixSpace { pr: if common.vmrequest.is_discontiguous() { BlockPageResource::new_discontiguous( @@ -316,7 +317,7 @@ impl ImmixSpace { ) }, common, - chunk_map: ChunkMap::new(), + chunk_map: ChunkMap::new(space_index), line_mark_state: AtomicU8::new(Line::RESET_MARK_STATE), line_unavail_state: AtomicU8::new(Line::RESET_MARK_STATE), lines_consumed: AtomicUsize::new(0), @@ -524,7 +525,7 @@ impl ImmixSpace { self.defrag.notify_new_clean_block(copy); let block = Block::from_aligned_address(block_address); block.init(copy); - self.chunk_map.set(block.chunk(), ChunkState::allocated(self.common().descriptor.get_index())); + self.chunk_map.set_allocated(block.chunk(), true); self.lines_consumed .fetch_add(Block::LINES, Ordering::SeqCst); Some(block) @@ -899,7 +900,7 @@ struct SweepChunk { impl GCWork for SweepChunk { fn do_work(&mut self, _worker: &mut GCWorker, mmtk: &'static MMTK) { - assert!(self.space.chunk_map.get(self.chunk).is_allocated()); + assert!(self.space.chunk_map.get(self.chunk).unwrap().is_allocated()); let mut histogram = self.space.defrag.new_histogram(); let line_mark_state = if super::BLOCK_ONLY { @@ -950,7 +951,7 @@ impl GCWork for SweepChunk { probe!(mmtk, sweep_chunk, allocated_blocks); // Set this chunk as free if there is not live blocks. if allocated_blocks == 0 { - self.space.chunk_map.set(self.chunk, ChunkState::free()) + self.space.chunk_map.set_allocated(self.chunk, false) } self.space.defrag.add_completed_mark_histogram(histogram); self.epilogue.finish_one_work_packet(); diff --git a/src/policy/marksweepspace/native_ms/global.rs b/src/policy/marksweepspace/native_ms/global.rs index 747149d23c..7291ba9a76 100644 --- a/src/policy/marksweepspace/native_ms/global.rs +++ b/src/policy/marksweepspace/native_ms/global.rs @@ -305,6 +305,7 @@ impl MarkSweepSpace { ]) }; let common = CommonSpace::new(args.into_policy_args(false, false, local_specs)); + let space_index = common.descriptor.get_index(); MarkSweepSpace { pr: if is_discontiguous { BlockPageResource::new_discontiguous( @@ -322,7 +323,7 @@ impl MarkSweepSpace { ) }, common, - chunk_map: ChunkMap::new(), + chunk_map: ChunkMap::new(space_index), scheduler, abandoned: Mutex::new(AbandonedBlockLists::new()), abandoned_in_gc: Mutex::new(AbandonedBlockLists::new()), @@ -402,7 +403,7 @@ impl MarkSweepSpace { pub fn record_new_block(&self, block: Block) { block.init(); - self.chunk_map.set(block.chunk(), ChunkState::allocated(self.common.descriptor.get_index())); + self.chunk_map.set_allocated(block.chunk(), true); } pub fn prepare(&mut self, full_heap: bool) { @@ -567,7 +568,7 @@ struct PrepareChunkMap { impl GCWork for PrepareChunkMap { fn do_work(&mut self, _worker: &mut GCWorker, _mmtk: &'static MMTK) { - debug_assert!(self.space.chunk_map.get(self.chunk).is_allocated()); + debug_assert!(self.space.chunk_map.get(self.chunk).unwrap().is_allocated()); // number of allocated blocks. let mut n_occupied_blocks = 0; self.chunk @@ -581,7 +582,7 @@ impl GCWork for PrepareChunkMap { }); if n_occupied_blocks == 0 { // Set this chunk as free if there is no live blocks. - self.space.chunk_map.set(self.chunk, ChunkState::free()) + self.space.chunk_map.set_allocated(self.chunk, false) } else { // Otherwise this chunk is occupied, and we reset the mark bit if it is on the side. if let MetadataSpec::OnSide(side) = *VM::VMObjectModel::LOCAL_MARK_BIT_SPEC { @@ -617,7 +618,7 @@ struct SweepChunk { impl GCWork for SweepChunk { fn do_work(&mut self, _worker: &mut GCWorker, _mmtk: &'static MMTK) { - assert!(self.space.chunk_map.get(self.chunk).is_allocated()); + assert!(self.space.chunk_map.get(self.chunk).unwrap().is_allocated()); // number of allocated blocks. let mut allocated_blocks = 0; @@ -636,7 +637,7 @@ impl GCWork for SweepChunk { probe!(mmtk, sweep_chunk, allocated_blocks); // Set this chunk as free if there is not live blocks. if allocated_blocks == 0 { - self.space.chunk_map.set(self.chunk, ChunkState::free()); + self.space.chunk_map.set_allocated(self.chunk, false); } self.epilogue.finish_one_work_packet(); } diff --git a/src/util/heap/chunk_map.rs b/src/util/heap/chunk_map.rs index 7725f635d9..a32a5f8645 100644 --- a/src/util/heap/chunk_map.rs +++ b/src/util/heap/chunk_map.rs @@ -44,7 +44,7 @@ impl Chunk { } } -/// Chunk allocation state +/// The allocation state for a chunk in the chunk map. It includes whether each chunk is allocated or free, and the space the chunk belongs to. /// Highest bit: 0 = free, 1 = allocated /// Lower 4 bits: Space index (0-15) #[repr(transparent)] @@ -52,21 +52,27 @@ impl Chunk { pub struct ChunkState(u8); impl ChunkState { + /// Create a new ChunkState that represents being allocated in the given space pub fn allocated(space_index: usize) -> ChunkState { debug_assert!(space_index < crate::util::heap::layout::heap_parameters::MAX_SPACES); let mut encode = space_index as u8; encode |= 0x80; ChunkState(encode) } - pub fn free() -> ChunkState { - ChunkState(0) + /// Create a new ChunkState that represents being free in the given space + pub fn free(space_index: usize) -> ChunkState { + debug_assert!(space_index < crate::util::heap::layout::heap_parameters::MAX_SPACES); + ChunkState(space_index as u8) } + /// Is the chunk free? pub fn is_free(&self) -> bool { self.0 & 0x80 == 0 } + /// Is the chunk allocated? pub fn is_allocated(&self) -> bool { !self.is_free() } + /// Get the space index of the chunk pub fn get_space_index(&self) -> usize { debug_assert!(self.is_allocated()); let index = (self.0 & 0x0F) as usize; @@ -78,17 +84,26 @@ impl ChunkState { impl std::fmt::Debug for ChunkState { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { if self.is_free() { - write!(f, "Free") + write!(f, "Free({})", self.get_space_index()) } else { - write!(f, "Allocated in space {}", self.get_space_index()) + write!(f, "Allocated({})", self.get_space_index()) } } } /// A byte-map to record all the allocated chunks. /// A plan can use this to maintain records for the chunks that they used, and the states of the chunks. -/// Any plan that uses the chunk map should include the `ALLOC_TABLE` spec in their local sidemetadata specs +/// Any plan that uses the chunk map should include the `ALLOC_TABLE` spec in their local sidemetadata specs. +/// +/// A chunk map is created for a space (identified by the space index), and will only update or list chunks for that space. pub struct ChunkMap { + /// The space that uses this chunk map. + space_index: usize, + /// The range of chunks that are used by the space. The range only records the lowest chunk and the highest chunk. + /// All the chunks that are used for the space are within the range, but not necessarily that all the chunks in the range + /// are used for the space. Spaces may be discontiguous, thus the range may include chunks that do not belong to the space. + /// We need to use the space index in the chunk map and the space index encoded with the chunk state to know if + /// the chunk belongs to the current space. chunk_range: Mutex>, } @@ -97,24 +112,35 @@ impl ChunkMap { pub const ALLOC_TABLE: SideMetadataSpec = crate::util::metadata::side_metadata::spec_defs::CHUNK_MARK; - pub fn new() -> Self { + pub fn new(space_index: usize) -> Self { Self { + space_index, chunk_range: Mutex::new(Chunk::ZERO..Chunk::ZERO), } } - /// Set chunk state - pub fn set(&self, chunk: Chunk, state: ChunkState) { + /// Set a chunk as allocated, or as free. + pub fn set_allocated(&self, chunk: Chunk, allocated: bool) { + let state = if allocated { + ChunkState::allocated(self.space_index) + } else { + ChunkState::free(self.space_index) + }; // Do nothing if the chunk is already in the expected state. - if self.get(chunk) == state { + if self.get_any(chunk) == state { return; } #[cfg(debug_assertions)] { - let old_state = self.get(chunk); - if state.is_allocated() { - assert!(old_state.is_free() || old_state.get_space_index() == state.get_space_index(), "Chunk {:?}: old state {:?}, new state {:?}. Cannot set to new state.", chunk, old_state, state); - } + let old_state = self.get_any(chunk); + // If a chunk is free, any space may use it. If a chunk is not free, only the current space may update its state. + assert!( + old_state.is_free() || old_state.get_space_index() == state.get_space_index(), + "Chunk {:?}: old state {:?}, new state {:?}. Cannot set to new state.", + chunk, + old_state, + state + ); } // Update alloc byte unsafe { Self::ALLOC_TABLE.store::(chunk.start(), state.0) }; @@ -134,16 +160,30 @@ impl ChunkMap { } } - /// Get chunk state - pub fn get(&self, chunk: Chunk) -> ChunkState { + /// Get chunk state. Return None if the chunk does not belong to the space. + pub fn get(&self, chunk: Chunk) -> Option { + let state = self.get_any(chunk); + (state.get_space_index() == self.space_index).then_some(state) + } + + /// Get chunk state, regardless of the space. This should always be private. + fn get_any(&self, chunk: Chunk) -> ChunkState { let byte = unsafe { Self::ALLOC_TABLE.load::(chunk.start()) }; ChunkState(byte) } /// A range of all chunks in the heap. - pub fn all_chunks(&self) -> RegionIterator { + pub fn all_chunks(&self) -> impl Iterator + use<'_> { + let chunk_range = self.chunk_range.lock(); + RegionIterator::::new(chunk_range.start, chunk_range.end) + .filter(|c| self.get(*c).is_some()) + } + + /// A range of all chunks in the heap. + pub fn all_allocated_chunks(&self) -> impl Iterator + use<'_> { let chunk_range = self.chunk_range.lock(); RegionIterator::::new(chunk_range.start, chunk_range.end) + .filter(|c| self.get(*c).is_some_and(|state| state.is_allocated())) } /// Helper function to create per-chunk processing work packets for each allocated chunks. @@ -152,18 +192,9 @@ impl ChunkMap { func: impl Fn(Chunk) -> Box>, ) -> Vec>> { let mut work_packets: Vec>> = vec![]; - for chunk in self - .all_chunks() - .filter(|c| self.get(*c).is_allocated()) - { + for chunk in self.all_allocated_chunks() { work_packets.push(func(chunk)); } work_packets } } - -impl Default for ChunkMap { - fn default() -> Self { - Self::new() - } -} diff --git a/src/util/metadata/side_metadata/global.rs b/src/util/metadata/side_metadata/global.rs index 8730602a4b..387d2f1d75 100644 --- a/src/util/metadata/side_metadata/global.rs +++ b/src/util/metadata/side_metadata/global.rs @@ -1345,6 +1345,11 @@ impl SideMetadataContext { } } + // Any plan that uses the chunk map needs to reserve the chunk map table. + // As we use either the mark sweep or (non moving) immix as the non moving space, + // and both policies use the chunk map, we just add the chunk map table globally. + ret.push(crate::util::heap::chunk_map::ChunkMap::ALLOC_TABLE); + ret.extend_from_slice(specs); ret } diff --git a/src/util/metadata/side_metadata/spec_defs.rs b/src/util/metadata/side_metadata/spec_defs.rs index be7a7730d3..621f11a3cf 100644 --- a/src/util/metadata/side_metadata/spec_defs.rs +++ b/src/util/metadata/side_metadata/spec_defs.rs @@ -60,6 +60,8 @@ define_side_metadata_specs!( MS_ACTIVE_CHUNK = (global: true, log_num_of_bits: 3, log_bytes_in_region: LOG_BYTES_IN_CHUNK), // Track the index in SFT map for a chunk (only used for SFT sparse chunk map) SFT_DENSE_CHUNK_MAP_INDEX = (global: true, log_num_of_bits: 3, log_bytes_in_region: LOG_BYTES_IN_CHUNK), + // Mark chunks (any plan that uses the chunk map should include this spec in their local sidemetadata specs) + CHUNK_MARK = (global: true, log_num_of_bits: 3, log_bytes_in_region: crate::util::heap::chunk_map::Chunk::LOG_BYTES), ); // This defines all LOCAL side metadata used by mmtk-core. @@ -75,8 +77,6 @@ define_side_metadata_specs!( IX_BLOCK_DEFRAG = (global: false, log_num_of_bits: 3, log_bytes_in_region: crate::policy::immix::block::Block::LOG_BYTES), // Mark blocks by immix IX_BLOCK_MARK = (global: false, log_num_of_bits: 3, log_bytes_in_region: crate::policy::immix::block::Block::LOG_BYTES), - // Mark chunks (any plan that uses the chunk map should include this spec in their local sidemetadata specs) - CHUNK_MARK = (global: false, log_num_of_bits: 3, log_bytes_in_region: crate::util::heap::chunk_map::Chunk::LOG_BYTES), // Mark blocks by (native mimalloc) marksweep MS_BLOCK_MARK = (global: false, log_num_of_bits: 3, log_bytes_in_region: crate::policy::marksweepspace::native_ms::Block::LOG_BYTES), // Next block in list for native mimalloc diff --git a/src/util/object_enum.rs b/src/util/object_enum.rs index d3502a2985..776885e11b 100644 --- a/src/util/object_enum.rs +++ b/src/util/object_enum.rs @@ -5,10 +5,7 @@ use std::marker::PhantomData; use crate::vm::VMBinding; use super::{ - heap::{ - chunk_map::{ChunkMap, ChunkState}, - MonotonePageResource, - }, + heap::{chunk_map::ChunkMap, MonotonePageResource}, linear_scan::Region, metadata::{side_metadata::spec_defs::VO_BIT, vo_bit}, Address, ObjectReference, @@ -84,12 +81,10 @@ pub(crate) fn enumerate_blocks_from_chunk_map( ) where B: BlockMayHaveObjects, { - for chunk in chunk_map.all_chunks() { - if chunk_map.get(chunk).is_allocated() { - for block in chunk.iter_region::() { - if block.may_have_objects() { - enumerator.visit_address_range(block.start(), block.end()); - } + for chunk in chunk_map.all_allocated_chunks() { + for block in chunk.iter_region::() { + if block.may_have_objects() { + enumerator.visit_address_range(block.start(), block.end()); } } } diff --git a/src/vm/tests/mock_tests/mock_test_allocate_nonmoving.rs b/src/vm/tests/mock_tests/mock_test_allocate_nonmoving.rs index 2487b2b36f..82d1c5e958 100644 --- a/src/vm/tests/mock_tests/mock_test_allocate_nonmoving.rs +++ b/src/vm/tests/mock_tests/mock_test_allocate_nonmoving.rs @@ -1,12 +1,10 @@ // GITHUB-CI: MMTK_PLAN=all -use lazy_static::lazy_static; - use super::mock_test_prelude::*; use crate::plan::AllocationSemantics; #[test] -pub fn allocate_alignment() { +pub fn allocate_nonmoving() { with_mockvm( || -> MockVM { MockVM { @@ -20,13 +18,8 @@ pub fn allocate_alignment() { let mut fixture = MutatorFixture::create_with_heapsize(MB); // Normal alloc - let addr = memory_manager::alloc( - &mut fixture.mutator, - 16, - 8, - 0, - AllocationSemantics::Default, - ); + let addr = + memory_manager::alloc(&mut fixture.mutator, 16, 8, 0, AllocationSemantics::Default); assert!(!addr.is_zero()); // Non moving alloc From abbde8bc091000f61d068de3ab733a505117da20 Mon Sep 17 00:00:00 2001 From: Yi Lin Date: Thu, 17 Apr 2025 03:39:41 +0000 Subject: [PATCH 3/6] Remove ALLOC_TABLE from local specs. Fix build for 1.74.1 --- src/policy/immix/immixspace.rs | 2 -- src/policy/marksweepspace/native_ms/global.rs | 1 - src/util/heap/chunk_map.rs | 4 ++-- 3 files changed, 2 insertions(+), 5 deletions(-) diff --git a/src/policy/immix/immixspace.rs b/src/policy/immix/immixspace.rs index dfe86240aa..49b5691cd5 100644 --- a/src/policy/immix/immixspace.rs +++ b/src/policy/immix/immixspace.rs @@ -252,7 +252,6 @@ impl ImmixSpace { vec![ MetadataSpec::OnSide(Block::DEFRAG_STATE_TABLE), MetadataSpec::OnSide(Block::MARK_TABLE), - MetadataSpec::OnSide(ChunkMap::ALLOC_TABLE), *VM::VMObjectModel::LOCAL_MARK_BIT_SPEC, *VM::VMObjectModel::LOCAL_FORWARDING_BITS_SPEC, *VM::VMObjectModel::LOCAL_FORWARDING_POINTER_SPEC, @@ -264,7 +263,6 @@ impl ImmixSpace { MetadataSpec::OnSide(Line::MARK_TABLE), MetadataSpec::OnSide(Block::DEFRAG_STATE_TABLE), MetadataSpec::OnSide(Block::MARK_TABLE), - MetadataSpec::OnSide(ChunkMap::ALLOC_TABLE), *VM::VMObjectModel::LOCAL_MARK_BIT_SPEC, *VM::VMObjectModel::LOCAL_FORWARDING_BITS_SPEC, *VM::VMObjectModel::LOCAL_FORWARDING_POINTER_SPEC, diff --git a/src/policy/marksweepspace/native_ms/global.rs b/src/policy/marksweepspace/native_ms/global.rs index 7291ba9a76..8cb13d8a5e 100644 --- a/src/policy/marksweepspace/native_ms/global.rs +++ b/src/policy/marksweepspace/native_ms/global.rs @@ -300,7 +300,6 @@ impl MarkSweepSpace { MetadataSpec::OnSide(Block::BLOCK_LIST_TABLE), MetadataSpec::OnSide(Block::TLS_TABLE), MetadataSpec::OnSide(Block::MARK_TABLE), - MetadataSpec::OnSide(ChunkMap::ALLOC_TABLE), *VM::VMObjectModel::LOCAL_MARK_BIT_SPEC, ]) }; diff --git a/src/util/heap/chunk_map.rs b/src/util/heap/chunk_map.rs index a32a5f8645..97759e07d3 100644 --- a/src/util/heap/chunk_map.rs +++ b/src/util/heap/chunk_map.rs @@ -173,14 +173,14 @@ impl ChunkMap { } /// A range of all chunks in the heap. - pub fn all_chunks(&self) -> impl Iterator + use<'_> { + pub fn all_chunks(&self) -> impl Iterator + '_ { let chunk_range = self.chunk_range.lock(); RegionIterator::::new(chunk_range.start, chunk_range.end) .filter(|c| self.get(*c).is_some()) } /// A range of all chunks in the heap. - pub fn all_allocated_chunks(&self) -> impl Iterator + use<'_> { + pub fn all_allocated_chunks(&self) -> impl Iterator + '_ { let chunk_range = self.chunk_range.lock(); RegionIterator::::new(chunk_range.start, chunk_range.end) .filter(|c| self.get(*c).is_some_and(|state| state.is_allocated())) From 4472263a6e96104bc7d471caff262c6bb6fb4d12 Mon Sep 17 00:00:00 2001 From: Yi Lin Date: Thu, 17 Apr 2025 04:32:13 +0000 Subject: [PATCH 4/6] Remove an outdated assertion --- src/policy/marksweepspace/native_ms/global.rs | 2 +- src/util/heap/chunk_map.rs | 10 ++++++---- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/src/policy/marksweepspace/native_ms/global.rs b/src/policy/marksweepspace/native_ms/global.rs index 8cb13d8a5e..26570e41ba 100644 --- a/src/policy/marksweepspace/native_ms/global.rs +++ b/src/policy/marksweepspace/native_ms/global.rs @@ -288,7 +288,7 @@ impl MarkSweepSpace { let vm_map = args.vm_map; let is_discontiguous = args.vmrequest.is_discontiguous(); let local_specs = { - metadata::extract_side_metadata(&vec![ + metadata::extract_side_metadata(&[ MetadataSpec::OnSide(Block::NEXT_BLOCK_TABLE), MetadataSpec::OnSide(Block::PREV_BLOCK_TABLE), MetadataSpec::OnSide(Block::FREE_LIST_TABLE), diff --git a/src/util/heap/chunk_map.rs b/src/util/heap/chunk_map.rs index 97759e07d3..d07601c1d7 100644 --- a/src/util/heap/chunk_map.rs +++ b/src/util/heap/chunk_map.rs @@ -52,11 +52,14 @@ impl Chunk { pub struct ChunkState(u8); impl ChunkState { + const ALLOC_BIT_MASK: u8 = 0x80; + const SPACE_INDEX_MASK: u8 = 0x0F; + /// Create a new ChunkState that represents being allocated in the given space pub fn allocated(space_index: usize) -> ChunkState { debug_assert!(space_index < crate::util::heap::layout::heap_parameters::MAX_SPACES); let mut encode = space_index as u8; - encode |= 0x80; + encode |= Self::ALLOC_BIT_MASK; ChunkState(encode) } /// Create a new ChunkState that represents being free in the given space @@ -66,7 +69,7 @@ impl ChunkState { } /// Is the chunk free? pub fn is_free(&self) -> bool { - self.0 & 0x80 == 0 + self.0 & Self::ALLOC_BIT_MASK == 0 } /// Is the chunk allocated? pub fn is_allocated(&self) -> bool { @@ -74,8 +77,7 @@ impl ChunkState { } /// Get the space index of the chunk pub fn get_space_index(&self) -> usize { - debug_assert!(self.is_allocated()); - let index = (self.0 & 0x0F) as usize; + let index = (self.0 & Self::SPACE_INDEX_MASK) as usize; debug_assert!(index < crate::util::heap::layout::heap_parameters::MAX_SPACES); index } From 5a724c27288fec54f3efe35f9940a178cc8e626b Mon Sep 17 00:00:00 2001 From: Yi Lin Date: Mon, 28 Apr 2025 04:36:22 +0000 Subject: [PATCH 5/6] No longer encode space index if a chunk is free. A few minor changes based on reviews --- src/util/heap/chunk_map.rs | 39 ++++++++------------ src/util/metadata/side_metadata/spec_defs.rs | 2 +- src/util/object_enum.rs | 2 +- 3 files changed, 18 insertions(+), 25 deletions(-) diff --git a/src/util/heap/chunk_map.rs b/src/util/heap/chunk_map.rs index d07601c1d7..457516b9ce 100644 --- a/src/util/heap/chunk_map.rs +++ b/src/util/heap/chunk_map.rs @@ -46,7 +46,7 @@ impl Chunk { /// The allocation state for a chunk in the chunk map. It includes whether each chunk is allocated or free, and the space the chunk belongs to. /// Highest bit: 0 = free, 1 = allocated -/// Lower 4 bits: Space index (0-15) +/// Lower 4 bits: Space index (0-15) if the chunk is allocated. #[repr(transparent)] #[derive(PartialEq, Clone, Copy)] pub struct ChunkState(u8); @@ -63,13 +63,12 @@ impl ChunkState { ChunkState(encode) } /// Create a new ChunkState that represents being free in the given space - pub fn free(space_index: usize) -> ChunkState { - debug_assert!(space_index < crate::util::heap::layout::heap_parameters::MAX_SPACES); - ChunkState(space_index as u8) + pub fn free() -> ChunkState { + ChunkState(0u8) } /// Is the chunk free? pub fn is_free(&self) -> bool { - self.0 & Self::ALLOC_BIT_MASK == 0 + self.0 == 0 } /// Is the chunk allocated? pub fn is_allocated(&self) -> bool { @@ -77,6 +76,7 @@ impl ChunkState { } /// Get the space index of the chunk pub fn get_space_index(&self) -> usize { + debug_assert!(self.is_allocated()); let index = (self.0 & Self::SPACE_INDEX_MASK) as usize; debug_assert!(index < crate::util::heap::layout::heap_parameters::MAX_SPACES); index @@ -86,7 +86,7 @@ impl ChunkState { impl std::fmt::Debug for ChunkState { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { if self.is_free() { - write!(f, "Free({})", self.get_space_index()) + write!(f, "Free") } else { write!(f, "Allocated({})", self.get_space_index()) } @@ -95,7 +95,7 @@ impl std::fmt::Debug for ChunkState { /// A byte-map to record all the allocated chunks. /// A plan can use this to maintain records for the chunks that they used, and the states of the chunks. -/// Any plan that uses the chunk map should include the `ALLOC_TABLE` spec in their local sidemetadata specs. +/// Any plan that uses the chunk map should include the `ALLOC_TABLE` spec in their global sidemetadata specs. /// /// A chunk map is created for a space (identified by the space index), and will only update or list chunks for that space. pub struct ChunkMap { @@ -126,18 +126,18 @@ impl ChunkMap { let state = if allocated { ChunkState::allocated(self.space_index) } else { - ChunkState::free(self.space_index) + ChunkState::free() }; // Do nothing if the chunk is already in the expected state. - if self.get_any(chunk) == state { + if self.get_internal(chunk) == state { return; } #[cfg(debug_assertions)] { - let old_state = self.get_any(chunk); + let old_state = self.get_internal(chunk); // If a chunk is free, any space may use it. If a chunk is not free, only the current space may update its state. assert!( - old_state.is_free() || old_state.get_space_index() == state.get_space_index(), + old_state.is_free() || old_state.get_space_index() == self.space_index, "Chunk {:?}: old state {:?}, new state {:?}. Cannot set to new state.", chunk, old_state, @@ -147,7 +147,7 @@ impl ChunkMap { // Update alloc byte unsafe { Self::ALLOC_TABLE.store::(chunk.start(), state.0) }; // If this is a newly allcoated chunk, then expand the chunk range. - if state.is_allocated() { + if allocated { debug_assert!(!chunk.start().is_zero()); let mut range = self.chunk_range.lock(); if range.start == Chunk::ZERO { @@ -164,12 +164,12 @@ impl ChunkMap { /// Get chunk state. Return None if the chunk does not belong to the space. pub fn get(&self, chunk: Chunk) -> Option { - let state = self.get_any(chunk); - (state.get_space_index() == self.space_index).then_some(state) + let state = self.get_internal(chunk); + (state.is_allocated() && state.get_space_index() == self.space_index).then_some(state) } /// Get chunk state, regardless of the space. This should always be private. - fn get_any(&self, chunk: Chunk) -> ChunkState { + fn get_internal(&self, chunk: Chunk) -> ChunkState { let byte = unsafe { Self::ALLOC_TABLE.load::(chunk.start()) }; ChunkState(byte) } @@ -181,20 +181,13 @@ impl ChunkMap { .filter(|c| self.get(*c).is_some()) } - /// A range of all chunks in the heap. - pub fn all_allocated_chunks(&self) -> impl Iterator + '_ { - let chunk_range = self.chunk_range.lock(); - RegionIterator::::new(chunk_range.start, chunk_range.end) - .filter(|c| self.get(*c).is_some_and(|state| state.is_allocated())) - } - /// Helper function to create per-chunk processing work packets for each allocated chunks. pub fn generate_tasks( &self, func: impl Fn(Chunk) -> Box>, ) -> Vec>> { let mut work_packets: Vec>> = vec![]; - for chunk in self.all_allocated_chunks() { + for chunk in self.all_chunks() { work_packets.push(func(chunk)); } work_packets diff --git a/src/util/metadata/side_metadata/spec_defs.rs b/src/util/metadata/side_metadata/spec_defs.rs index 621f11a3cf..736bf4ccfd 100644 --- a/src/util/metadata/side_metadata/spec_defs.rs +++ b/src/util/metadata/side_metadata/spec_defs.rs @@ -60,7 +60,7 @@ define_side_metadata_specs!( MS_ACTIVE_CHUNK = (global: true, log_num_of_bits: 3, log_bytes_in_region: LOG_BYTES_IN_CHUNK), // Track the index in SFT map for a chunk (only used for SFT sparse chunk map) SFT_DENSE_CHUNK_MAP_INDEX = (global: true, log_num_of_bits: 3, log_bytes_in_region: LOG_BYTES_IN_CHUNK), - // Mark chunks (any plan that uses the chunk map should include this spec in their local sidemetadata specs) + // Mark chunks (any plan that uses the chunk map should include this spec in their global sidemetadata specs) CHUNK_MARK = (global: true, log_num_of_bits: 3, log_bytes_in_region: crate::util::heap::chunk_map::Chunk::LOG_BYTES), ); diff --git a/src/util/object_enum.rs b/src/util/object_enum.rs index 776885e11b..4748cf7565 100644 --- a/src/util/object_enum.rs +++ b/src/util/object_enum.rs @@ -81,7 +81,7 @@ pub(crate) fn enumerate_blocks_from_chunk_map( ) where B: BlockMayHaveObjects, { - for chunk in chunk_map.all_allocated_chunks() { + for chunk in chunk_map.all_chunks() { for block in chunk.iter_region::() { if block.may_have_objects() { enumerator.visit_address_range(block.start(), block.end()); From 5bc0517999e73da8839e36fdb15228c4a447f32c Mon Sep 17 00:00:00 2001 From: Yi Lin Date: Tue, 29 Apr 2025 04:30:50 +0000 Subject: [PATCH 6/6] Update comments --- src/util/heap/chunk_map.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/util/heap/chunk_map.rs b/src/util/heap/chunk_map.rs index 457516b9ce..69caf12b12 100644 --- a/src/util/heap/chunk_map.rs +++ b/src/util/heap/chunk_map.rs @@ -62,7 +62,7 @@ impl ChunkState { encode |= Self::ALLOC_BIT_MASK; ChunkState(encode) } - /// Create a new ChunkState that represents being free in the given space + /// Create a new ChunkState that represents being free pub fn free() -> ChunkState { ChunkState(0u8) } @@ -174,7 +174,7 @@ impl ChunkMap { ChunkState(byte) } - /// A range of all chunks in the heap. + /// A range of all allocated chunks by this space in the heap. pub fn all_chunks(&self) -> impl Iterator + '_ { let chunk_range = self.chunk_range.lock(); RegionIterator::::new(chunk_range.start, chunk_range.end)