From 18379df79f25b8c7837f48d6a7f0a9597a684dd2 Mon Sep 17 00:00:00 2001 From: Yi Lin Date: Tue, 29 Oct 2024 05:13:06 +0000 Subject: [PATCH 1/8] Add alloc_no_gc --- src/memory_manager.rs | 90 +++++++++++++++---- src/plan/mutator_context.rs | 82 +++++++++++++++-- src/policy/immix/immixspace.rs | 4 +- src/policy/largeobjectspace.rs | 4 +- src/policy/lockfreeimmortalspace.rs | 8 +- src/policy/marksweepspace/native_ms/global.rs | 10 ++- src/policy/space.rs | 41 ++++++--- src/policy/vmspace.rs | 2 +- src/util/alloc/allocator.rs | 74 +++++++++++++++ src/util/alloc/bumpallocator.rs | 6 +- src/util/alloc/free_list_allocator.rs | 2 +- src/util/alloc/immix_allocator.rs | 6 +- src/util/alloc/large_object_allocator.rs | 3 +- .../mock_tests/mock_test_allocate_no_gc.rs | 35 ++++++++ src/vm/tests/mock_tests/mod.rs | 1 + 15 files changed, 320 insertions(+), 48 deletions(-) create mode 100644 src/vm/tests/mock_tests/mock_test_allocate_no_gc.rs diff --git a/src/memory_manager.rs b/src/memory_manager.rs index 9c6b30034a..f6a0e3b1b8 100644 --- a/src/memory_manager.rs +++ b/src/memory_manager.rs @@ -18,7 +18,7 @@ use crate::plan::{Mutator, MutatorContext}; use crate::scheduler::WorkBucketStage; use crate::scheduler::{GCWork, GCWorker}; use crate::util::alloc::allocators::AllocatorSelector; -use crate::util::constants::{LOG_BYTES_IN_PAGE, MIN_OBJECT_SIZE}; +use crate::util::constants::LOG_BYTES_IN_PAGE; use crate::util::heap::layout::vm_layout::vm_layout; use crate::util::opaque_pointer::*; use crate::util::{Address, ObjectReference}; @@ -140,7 +140,19 @@ pub fn flush_mutator(mutator: &mut Mutator) { mutator.flush() } -/// Allocate memory for an object. For performance reasons, a VM should +/// Allocate memory for an object. This function is a GC safepoint. If MMTk fails +/// to allocate memory, it will attempt a GC to free up some memory and retry +/// the allocation. +/// +/// This function in most cases returns a valid memory address. +/// This function may return a zero address iif 1. MMTk attempts at least one GC, +/// 2. the GC does not free up enough memory, 3. MMTk calls [`crate::vm::Collection::out_of_memory`] +/// to the binding, and 4. the binding returns from [`crate::vm::Collection::out_of_memory`] +/// instead of throwing an exception/error. +/// +/// * Note +/// +/// For performance reasons, a VM should /// implement the allocation fast-path on their side rather than just calling this function. /// /// If the VM provides a non-zero `offset` parameter, then the returned address will be @@ -159,24 +171,48 @@ pub fn alloc( offset: usize, semantics: AllocationSemantics, ) -> Address { - // MMTk has assumptions about minimal object size. - // We need to make sure that all allocations comply with the min object size. - // Ideally, we check the allocation size, and if it is smaller, we transparently allocate the min - // object size (the VM does not need to know this). However, for the VM bindings we support at the moment, - // their object sizes are all larger than MMTk's min object size, so we simply put an assertion here. - // If you plan to use MMTk with a VM with its object size smaller than MMTk's min object size, you should - // meet the min object size in the fastpath. - debug_assert!(size >= MIN_OBJECT_SIZE); - // Assert alignment - debug_assert!(align >= VM::MIN_ALIGNMENT); - debug_assert!(align <= VM::MAX_ALIGNMENT); - // Assert offset - debug_assert!(VM::USE_ALLOCATION_OFFSET || offset == 0); + #[cfg(debug_assertions)] + crate::util::alloc::allocator::asset_allocation_args::(size, align, offset); mutator.alloc(size, align, offset, semantics) } -/// Invoke the allocation slow path. This is only intended for use when a binding implements the fastpath on +/// Allocate memory for an object. This function is NOT a GC safepoint. If MMTk fails +/// to allocate memory, it will not attempt a GC, nor call [`crate::vm::Collection::out_of_memory`], +/// MMTk instead return a zero address. +/// +/// Generally [`alloc`] is preferred over this function. This function should only be used +/// when the binding does not want GCs to happen during the particular allocationq requests, +/// and is willing to deal with the allocation failure. +/// +/// Notes on [`alloc`] also apply to this function. +/// +/// Arguments: +/// * `mutator`: The mutator to perform this allocation request. +/// * `size`: The number of bytes required for the object. +/// * `align`: Required alignment for the object. +/// * `offset`: Offset associated with the alignment. +/// * `semantics`: The allocation semantic required for the allocation. +pub fn alloc_no_gc( + mutator: &mut Mutator, + size: usize, + align: usize, + offset: usize, + semantics: AllocationSemantics, +) -> Address { + #[cfg(debug_assertions)] + crate::util::alloc::allocator::asset_allocation_args::(size, align, offset); + + mutator.alloc_no_gc(size, align, offset, semantics) +} + +/// Invoke the allocation slow path. This function is a GC safepoint. If MMTk fails +/// to allocate memory, it will attempt a GC to free up some memory and retry +/// the allocation. See [`alloc`] for more details. +/// +/// * Notes +/// +/// This is only intended for use when a binding implements the fastpath on /// the binding side. When the binding handles fast path allocation and the fast path fails, it can use this /// method for slow path allocation. Calling before exhausting fast path allocaiton buffer will lead to bad /// performance. @@ -197,6 +233,28 @@ pub fn alloc_slow( mutator.alloc_slow(size, align, offset, semantics) } +/// Invoke the allocation slow path. This function is NOT a GC safepoint. If MMTk fails +/// to allocate memory, it will not attempt a GC, nor call [`crate::vm::Collection::out_of_memory`], +/// MMTk instead return a zero address. See [`alloc_no_gc`] for more details. +/// +/// Notes on [`alloc_slow_no_gc`] also apply to this function. +/// +/// Arguments: +/// * `mutator`: The mutator to perform this allocation request. +/// * `size`: The number of bytes required for the object. +/// * `align`: Required alignment for the object. +/// * `offset`: Offset associated with the alignment. +/// * `semantics`: The allocation semantic required for the allocation. +pub fn alloc_slow_no_gc( + mutator: &mut Mutator, + size: usize, + align: usize, + offset: usize, + semantics: AllocationSemantics, +) -> Address { + mutator.alloc_slow_no_gc(size, align, offset, semantics) +} + /// Perform post-allocation actions, usually initializing object metadata. For many allocators none are /// required. For performance reasons, a VM should implement the post alloc fast-path on their side /// rather than just calling this function. diff --git a/src/plan/mutator_context.rs b/src/plan/mutator_context.rs index 8bf266f430..9979f37b0c 100644 --- a/src/plan/mutator_context.rs +++ b/src/plan/mutator_context.rs @@ -113,11 +113,29 @@ impl MutatorContext for Mutator { offset: usize, allocator: AllocationSemantics, ) -> Address { - unsafe { + let allocator = unsafe { self.allocators .get_allocator_mut(self.config.allocator_mapping[allocator]) - } - .alloc(size, align, offset) + }; + // The value should be default/unset at the beginning of an allocation request. + debug_assert!(!allocator.get_context().is_no_gc_on_fail()); + allocator.alloc(size, align, offset) + } + + fn alloc_no_gc( + &mut self, + size: usize, + align: usize, + offset: usize, + allocator: AllocationSemantics, + ) -> Address { + let allocator = unsafe { + self.allocators + .get_allocator_mut(self.config.allocator_mapping[allocator]) + }; + // The value should be default/unset at the beginning of an allocation request. + debug_assert!(!allocator.get_context().is_no_gc_on_fail()); + allocator.alloc_no_gc(size, align, offset) } fn alloc_slow( @@ -127,11 +145,29 @@ impl MutatorContext for Mutator { offset: usize, allocator: AllocationSemantics, ) -> Address { - unsafe { + let allocator = unsafe { self.allocators .get_allocator_mut(self.config.allocator_mapping[allocator]) - } - .alloc_slow(size, align, offset) + }; + // The value should be default/unset at the beginning of an allocation request. + debug_assert!(!allocator.get_context().is_no_gc_on_fail()); + allocator.alloc_slow(size, align, offset) + } + + fn alloc_slow_no_gc( + &mut self, + size: usize, + align: usize, + offset: usize, + allocator: AllocationSemantics, + ) -> Address { + let allocator = unsafe { + self.allocators + .get_allocator_mut(self.config.allocator_mapping[allocator]) + }; + // The value should be default/unset at the beginning of an allocation request. + debug_assert!(!allocator.get_context().is_no_gc_on_fail()); + allocator.alloc_slow_no_gc(size, align, offset) } // Note that this method is slow, and we expect VM bindings that care about performance to implement allocation fastpath sequence in their bindings. @@ -264,7 +300,7 @@ pub trait MutatorContext: Send + 'static { fn prepare(&mut self, tls: VMWorkerThread); /// Do the release work for this mutator. fn release(&mut self, tls: VMWorkerThread); - /// Allocate memory for an object. + /// Allocate memory for an object. This is a GC safepoint. GC will be triggered if the allocation fails. /// /// Arguments: /// * `size`: the number of bytes required for the object. @@ -278,7 +314,24 @@ pub trait MutatorContext: Send + 'static { offset: usize, allocator: AllocationSemantics, ) -> Address; - /// The slow path allocation. This is only useful when the binding + /// Allocate memory for an object. This is NOT a GC safepoint. If the allocation fails, + /// the function returns a zero value without triggering a GC. + /// + /// Arguments: + /// * `size`: the number of bytes required for the object. + /// * `align`: required alignment for the object. + /// * `offset`: offset associated with the alignment. The result plus the offset will be aligned to the given alignment. + /// * `allocator`: the allocation semantic used for this object. + fn alloc_no_gc( + &mut self, + size: usize, + align: usize, + offset: usize, + allocator: AllocationSemantics, + ) -> Address; + /// The slow path allocation. This is a GC safepoint. GC will be triggered if the allocation fails. + /// + /// This is only useful when the binding /// implements the fast path allocation, and would like to explicitly /// call the slow path after the fast path allocation fails. fn alloc_slow( @@ -288,6 +341,19 @@ pub trait MutatorContext: Send + 'static { offset: usize, allocator: AllocationSemantics, ) -> Address; + /// The slow path allocation. This is NOT a GC safepoint. If the allocation fails, + /// the function returns a zero value without triggering a GC. + /// + /// This is only useful when the binding + /// implements the fast path allocation, and would like to explicitly + /// call the slow path after the fast path allocation fails. + fn alloc_slow_no_gc( + &mut self, + size: usize, + align: usize, + offset: usize, + allocator: AllocationSemantics, + ) -> Address; /// Perform post-allocation actions. For many allocators none are /// required. /// diff --git a/src/policy/immix/immixspace.rs b/src/policy/immix/immixspace.rs index 6ebd9c255e..e50a87d96f 100644 --- a/src/policy/immix/immixspace.rs +++ b/src/policy/immix/immixspace.rs @@ -516,8 +516,8 @@ impl ImmixSpace { } /// Allocate a clean block. - pub fn get_clean_block(&self, tls: VMThread, copy: bool) -> Option { - let block_address = self.acquire(tls, Block::PAGES); + pub fn get_clean_block(&self, tls: VMThread, copy: bool, no_gc_on_fail: bool) -> Option { + let block_address = self.acquire(tls, Block::PAGES, no_gc_on_fail); if block_address.is_zero() { return None; } diff --git a/src/policy/largeobjectspace.rs b/src/policy/largeobjectspace.rs index 8906a50044..e88ffcacec 100644 --- a/src/policy/largeobjectspace.rs +++ b/src/policy/largeobjectspace.rs @@ -303,8 +303,8 @@ impl LargeObjectSpace { } /// Allocate an object - pub fn allocate_pages(&self, tls: VMThread, pages: usize) -> Address { - self.acquire(tls, pages) + pub fn allocate_pages(&self, tls: VMThread, pages: usize, no_gc_on_fail: bool) -> Address { + self.acquire(tls, pages, no_gc_on_fail) } /// Test if the object's mark bit is the same as the given value. If it is not the same, diff --git a/src/policy/lockfreeimmortalspace.rs b/src/policy/lockfreeimmortalspace.rs index 05cf448001..b75e33d91d 100644 --- a/src/policy/lockfreeimmortalspace.rs +++ b/src/policy/lockfreeimmortalspace.rs @@ -135,7 +135,7 @@ impl Space for LockFreeImmortalSpace { data_pages + meta_pages } - fn acquire(&self, _tls: VMThread, pages: usize) -> Address { + fn acquire(&self, _tls: VMThread, pages: usize, no_gc_on_fail: bool) -> Address { trace!("LockFreeImmortalSpace::acquire"); let bytes = conversions::pages_to_bytes(pages); let start = self @@ -145,7 +145,11 @@ impl Space for LockFreeImmortalSpace { }) .expect("update cursor failed"); if start + bytes > self.limit { - panic!("OutOfMemory") + if no_gc_on_fail { + return Address::ZERO; + } else { + panic!("OutOfMemory"); + } } if self.slow_path_zeroing { crate::util::memory::zero(start, bytes); diff --git a/src/policy/marksweepspace/native_ms/global.rs b/src/policy/marksweepspace/native_ms/global.rs index eaecbe3741..2a3997a6d2 100644 --- a/src/policy/marksweepspace/native_ms/global.rs +++ b/src/policy/marksweepspace/native_ms/global.rs @@ -402,7 +402,13 @@ impl MarkSweepSpace { crate::util::metadata::vo_bit::bzero_vo_bit(block.start(), Block::BYTES); } - pub fn acquire_block(&self, tls: VMThread, size: usize, align: usize) -> BlockAcquireResult { + pub fn acquire_block( + &self, + tls: VMThread, + size: usize, + align: usize, + no_gc_on_fail: bool, + ) -> BlockAcquireResult { { let mut abandoned = self.abandoned.lock().unwrap(); let bin = mi_bin::(size, align); @@ -424,7 +430,7 @@ impl MarkSweepSpace { } } - let acquired = self.acquire(tls, Block::BYTES >> LOG_BYTES_IN_PAGE); + let acquired = self.acquire(tls, Block::BYTES >> LOG_BYTES_IN_PAGE, no_gc_on_fail); if acquired.is_zero() { BlockAcquireResult::Exhausted } else { diff --git a/src/policy/space.rs b/src/policy/space.rs index 8048009b20..849d8eb787 100644 --- a/src/policy/space.rs +++ b/src/policy/space.rs @@ -80,8 +80,12 @@ pub trait Space: 'static + SFT + Sync + Downcast { false } - fn acquire(&self, tls: VMThread, pages: usize) -> Address { - trace!("Space.acquire, tls={:?}", tls); + fn acquire(&self, tls: VMThread, pages: usize, no_gc_on_fail: bool) -> Address { + trace!( + "Space.acquire, tls={:?}, no_gc_on_fail={:?}", + tls, + no_gc_on_fail + ); debug_assert!( !self.will_oom_on_acquire(tls, pages << LOG_BYTES_IN_PAGE), @@ -95,7 +99,7 @@ pub trait Space: 'static + SFT + Sync + Downcast { VM::VMActivePlan::is_mutator(tls) && VM::VMCollection::is_collection_enabled(); // Is a GC allowed here? If we should poll but are not allowed to poll, we will panic. // initialize_collection() has to be called so we know GC is initialized. - let allow_gc = should_poll && self.common().global_state.is_initialized(); + let allow_gc = should_poll && self.common().global_state.is_initialized() && !no_gc_on_fail; trace!("Reserving pages"); let pr = self.get_page_resource(); @@ -104,17 +108,25 @@ pub trait Space: 'static + SFT + Sync + Downcast { trace!("Polling .."); if should_poll && self.get_gc_trigger().poll(false, Some(self.as_space())) { + // Clear the request + pr.clear_request(pages_reserved); + + // If we do not want GC on fail, just return zero. + if no_gc_on_fail { + return Address::ZERO; + } + + // Otherwise do GC here debug!("Collection required"); assert!(allow_gc, "GC is not allowed here: collection is not initialized (did you call initialize_collection()?)."); - - // Clear the request, and inform GC trigger about the pending allocation. - pr.clear_request(pages_reserved); + // Inform GC trigger about the pending allocation. self.get_gc_trigger() .policy .on_pending_allocation(pages_reserved); - VM::VMCollection::block_for_gc(VMMutatorThread(tls)); // We have checked that this is mutator - unsafe { Address::zero() } + + // Return zero -- the caller will handle re-allocation + Address::ZERO } else { debug!("Collection not required"); @@ -211,6 +223,14 @@ pub trait Space: 'static + SFT + Sync + Downcast { Err(_) => { drop(lock); // drop the lock immediately + // Clear the request + pr.clear_request(pages_reserved); + + // If we do not want GC on fail, just return zero. + if no_gc_on_fail { + return Address::ZERO; + } + // We thought we had memory to allocate, but somehow failed the allocation. Will force a GC. assert!( allow_gc, @@ -220,14 +240,13 @@ pub trait Space: 'static + SFT + Sync + Downcast { let gc_performed = self.get_gc_trigger().poll(true, Some(self.as_space())); debug_assert!(gc_performed, "GC not performed when forced."); - // Clear the request, and inform GC trigger about the pending allocation. - pr.clear_request(pages_reserved); + // Inform GC trigger about the pending allocation. self.get_gc_trigger() .policy .on_pending_allocation(pages_reserved); VM::VMCollection::block_for_gc(VMMutatorThread(tls)); // We asserted that this is mutator. - unsafe { Address::zero() } + Address::ZERO } } } diff --git a/src/policy/vmspace.rs b/src/policy/vmspace.rs index 60199c0fde..86637ab478 100644 --- a/src/policy/vmspace.rs +++ b/src/policy/vmspace.rs @@ -136,7 +136,7 @@ impl Space for VMSpace { unreachable!() } - fn acquire(&self, _tls: VMThread, _pages: usize) -> Address { + fn acquire(&self, _tls: VMThread, _pages: usize, _no_gc_on_fail: bool) -> Address { unreachable!() } diff --git a/src/util/alloc/allocator.rs b/src/util/alloc/allocator.rs index a60d26935c..70e3a70c4f 100644 --- a/src/util/alloc/allocator.rs +++ b/src/util/alloc/allocator.rs @@ -6,6 +6,7 @@ use crate::util::heap::gc_trigger::GCTrigger; use crate::util::options::Options; use crate::MMTK; +use std::sync::atomic::AtomicBool; use std::sync::atomic::Ordering; use std::sync::Arc; @@ -130,8 +131,31 @@ pub fn get_maximum_aligned_size_inner( } } +#[cfg(debug_assertions)] +pub(crate) fn asset_allocation_args(size: usize, align: usize, offset: usize) { + // MMTk has assumptions about minimal object size. + // We need to make sure that all allocations comply with the min object size. + // Ideally, we check the allocation size, and if it is smaller, we transparently allocate the min + // object size (the VM does not need to know this). However, for the VM bindings we support at the moment, + // their object sizes are all larger than MMTk's min object size, so we simply put an assertion here. + // If you plan to use MMTk with a VM with its object size smaller than MMTk's min object size, you should + // meet the min object size in the fastpath. + debug_assert!(size >= MIN_OBJECT_SIZE); + // Assert alignment + debug_assert!(align >= VM::MIN_ALIGNMENT); + debug_assert!(align <= VM::MAX_ALIGNMENT); + // Assert offset + debug_assert!(VM::USE_ALLOCATION_OFFSET || offset == 0); +} + /// The context an allocator needs to access in order to perform allocation. pub struct AllocatorContext { + /// Whether we should trigger a GC when the current alloccation fails. + /// The default value is `false`. This value is only set to `true` when + /// the binding requests a 'no-gc' alloc, and will be set back to `false` + /// when the allocation is done (successfully or not). + /// As allocators are thread-local data structures, this should be fine. + pub no_gc_on_fail: AtomicBool, pub state: Arc, pub options: Arc, pub gc_trigger: Arc>, @@ -142,6 +166,7 @@ pub struct AllocatorContext { impl AllocatorContext { pub fn new(mmtk: &MMTK) -> Self { Self { + no_gc_on_fail: AtomicBool::new(false), state: mmtk.state.clone(), options: mmtk.options.clone(), gc_trigger: mmtk.gc_trigger.clone(), @@ -149,6 +174,15 @@ impl AllocatorContext { analysis_manager: mmtk.analysis_manager.clone(), } } + + pub fn set_no_gc_on_fail(&self, v: bool) { + trace!("set_no_gc_on_fail: {}", v); + self.no_gc_on_fail.store(v, Ordering::Relaxed); + } + + pub fn is_no_gc_on_fail(&self) -> bool { + self.no_gc_on_fail.load(Ordering::Relaxed) + } } /// A trait which implements allocation routines. Every allocator needs to implements this trait. @@ -180,9 +214,13 @@ pub trait Allocator: Downcast { /// If an allocator supports thread local allocations, then the allocation will be serviced /// from its TLAB, otherwise it will default to using the slowpath, i.e. [`alloc_slow`](Allocator::alloc_slow). /// + /// If the heap is full, we trigger a GC and attempt to free up + /// more memory, and re-attempt the allocation. + /// /// Note that in the case where the VM is out of memory, we invoke /// [`Collection::out_of_memory`] to inform the binding and then return a null pointer back to /// it. We have no assumptions on whether the VM will continue executing or abort immediately. + /// If the VM continues execution, the function will return a null address. /// /// An allocator needs to make sure the object reference for the returned address is in the same /// chunk as the returned address (so the side metadata and the SFT for an object reference is valid). @@ -194,6 +232,20 @@ pub trait Allocator: Downcast { /// * `offset` the required offset in bytes. fn alloc(&mut self, size: usize, align: usize, offset: usize) -> Address; + /// An allocation attempt. Mostly the same as [`Allocator::alloc`], except that MMTk will + /// not trigger a GC on allocation failure. + /// + /// Arguments: + /// * `size`: the allocation size in bytes. + /// * `align`: the required alignment in bytes. + /// * `offset` the required offset in bytes. + fn alloc_no_gc(&mut self, size: usize, align: usize, offset: usize) -> Address { + self.get_context().set_no_gc_on_fail(true); + let ret = self.alloc(size, align, offset); + self.get_context().set_no_gc_on_fail(false); + ret + } + /// Slowpath allocation attempt. This function is explicitly not inlined for performance /// considerations. /// @@ -206,6 +258,24 @@ pub trait Allocator: Downcast { self.alloc_slow_inline(size, align, offset) } + /// Slowpath allocation attempt. Mostly the same as [`Allocator::alloc_slow`], except that MMTk will + /// not trigger a GC on allocation failure. + /// + /// This function is not used internally. It is mostly for the bindings. + /// [`Allocator::alloc_no_gc`] still calls the normal [`Allocator::alloc_slow`]. + /// + /// Arguments: + /// * `size`: the allocation size in bytes. + /// * `align`: the required alignment in bytes. + /// * `offset` the required offset in bytes. + fn alloc_slow_no_gc(&mut self, size: usize, align: usize, offset: usize) -> Address { + // The function is not used internally. We won't set no_gc_on_fail redundantly. + self.get_context().set_no_gc_on_fail(true); + let ret = self.alloc_slow(size, align, offset); + self.get_context().set_no_gc_on_fail(false); + ret + } + /// Slowpath allocation attempt. This function executes the actual slowpath allocation. A /// slowpath allocation in MMTk attempts to allocate the object using the per-allocator /// definition of [`alloc_slow_once`](Allocator::alloc_slow_once). This function also accounts for increasing the @@ -256,6 +326,10 @@ pub trait Allocator: Downcast { return result; } + if result.is_zero() && self.get_context().is_no_gc_on_fail() { + return result; + } + if !result.is_zero() { // Report allocation success to assist OutOfMemory handling. if !self diff --git a/src/util/alloc/bumpallocator.rs b/src/util/alloc/bumpallocator.rs index 76cc628c89..a0bfa1ee72 100644 --- a/src/util/alloc/bumpallocator.rs +++ b/src/util/alloc/bumpallocator.rs @@ -199,7 +199,11 @@ impl BumpAllocator { } let block_size = (size + BLOCK_MASK) & (!BLOCK_MASK); - let acquired_start = self.space.acquire(self.tls, bytes_to_pages_up(block_size)); + let acquired_start = self.space.acquire( + self.tls, + bytes_to_pages_up(block_size), + self.get_context().is_no_gc_on_fail(), + ); if acquired_start.is_zero() { trace!("Failed to acquire a new block"); acquired_start diff --git a/src/util/alloc/free_list_allocator.rs b/src/util/alloc/free_list_allocator.rs index 28958a1759..72c6955dd9 100644 --- a/src/util/alloc/free_list_allocator.rs +++ b/src/util/alloc/free_list_allocator.rs @@ -296,7 +296,7 @@ impl FreeListAllocator { ) -> Option { let bin = mi_bin::(size, align); loop { - match self.space.acquire_block(self.tls, size, align) { + match self.space.acquire_block(self.tls, size, align, self.get_context().is_no_gc_on_fail()) { crate::policy::marksweepspace::native_ms::BlockAcquireResult::Exhausted => { debug!("Acquire global block: None"); // GC diff --git a/src/util/alloc/immix_allocator.rs b/src/util/alloc/immix_allocator.rs index abff840937..51722aac8a 100644 --- a/src/util/alloc/immix_allocator.rs +++ b/src/util/alloc/immix_allocator.rs @@ -289,7 +289,11 @@ impl ImmixAllocator { // Get a clean block from ImmixSpace. fn acquire_clean_block(&mut self, size: usize, align: usize, offset: usize) -> Address { - match self.immix_space().get_clean_block(self.tls, self.copy) { + match self.immix_space().get_clean_block( + self.tls, + self.copy, + self.get_context().is_no_gc_on_fail(), + ) { None => Address::ZERO, Some(block) => { trace!( diff --git a/src/util/alloc/large_object_allocator.rs b/src/util/alloc/large_object_allocator.rs index baf0738274..b0b720eca7 100644 --- a/src/util/alloc/large_object_allocator.rs +++ b/src/util/alloc/large_object_allocator.rs @@ -55,7 +55,8 @@ impl Allocator for LargeObjectAllocator { let maxbytes = allocator::get_maximum_aligned_size::(size, align); let pages = crate::util::conversions::bytes_to_pages_up(maxbytes); - self.space.allocate_pages(self.tls, pages) + self.space + .allocate_pages(self.tls, pages, self.get_context().is_no_gc_on_fail()) } } diff --git a/src/vm/tests/mock_tests/mock_test_allocate_no_gc.rs b/src/vm/tests/mock_tests/mock_test_allocate_no_gc.rs new file mode 100644 index 0000000000..6138717097 --- /dev/null +++ b/src/vm/tests/mock_tests/mock_test_allocate_no_gc.rs @@ -0,0 +1,35 @@ +use super::mock_test_prelude::*; + +use crate::AllocationSemantics; + +/// This test will do alloc_no_gc in a loop, and evetually fill up the heap. +/// As alloc_no_gc will not trigger a GC, we expect to see a return value of zero, and no GC is triggered. +#[test] +pub fn allocate_no_gc() { + // 1MB heap + with_mockvm( + default_setup, + || { + const MB: usize = 1024 * 1024; + let mut fixture = MutatorFixture::create_with_heapsize(MB); + + // Attempt allocation: allocate 1024 bytes. We should fill up the heap by 10 allocations or fewer (some plans reserves more memory, such as semispace and generational GCs) + // Run a few more times to test if we set/unset no_gc_on_fail properly. + for _ in 0..20 { + let addr = memory_manager::alloc_no_gc( + &mut fixture.mutator, + 1024, + 8, + 0, + AllocationSemantics::Default, + ); + if addr.is_zero() { + read_mockvm(|mock| { + assert!(!mock.block_for_gc.is_called()); + }); + } + } + }, + no_cleanup, + ) +} diff --git a/src/vm/tests/mock_tests/mod.rs b/src/vm/tests/mock_tests/mod.rs index 114d8f1859..cd6ab1ecbe 100644 --- a/src/vm/tests/mock_tests/mod.rs +++ b/src/vm/tests/mock_tests/mod.rs @@ -24,6 +24,7 @@ pub(crate) mod mock_test_prelude { } mod mock_test_allocate_align_offset; +mod mock_test_allocate_no_gc; mod mock_test_allocate_with_disable_collection; mod mock_test_allocate_with_initialize_collection; mod mock_test_allocate_with_re_enable_collection; From 6b1ac05f6faced0045ebef4990914cee5b507fdb Mon Sep 17 00:00:00 2001 From: Yi Lin Date: Wed, 30 Oct 2024 23:21:52 +0000 Subject: [PATCH 2/8] Change comments according to the reviews --- src/memory_manager.rs | 66 +++++++++++++++++++++++++------------------ 1 file changed, 39 insertions(+), 27 deletions(-) diff --git a/src/memory_manager.rs b/src/memory_manager.rs index f6a0e3b1b8..ef6c7cbfb4 100644 --- a/src/memory_manager.rs +++ b/src/memory_manager.rs @@ -140,9 +140,25 @@ pub fn flush_mutator(mutator: &mut Mutator) { mutator.flush() } -/// Allocate memory for an object. This function is a GC safepoint. If MMTk fails -/// to allocate memory, it will attempt a GC to free up some memory and retry -/// the allocation. +/// Allocate memory for an object. +/// +/// When the allocation is successful, it returns the starting address of the new object. The +/// memory range for the new object is `size` bytes starting from the returned address, and +/// `RETURNED_ADDRESS + offset` is guaranteed to be aligned to the `align` parameter. The returned +/// address of a successful allocation will never be zero. +/// +/// If MMTk fails to allocate memory, it will attempt a GC to free up some memory and retry the +/// allocation. After triggering GC, it will call [`crate::vm::Collection::block_for_gc`] to suspend +/// the current thread that is allocating. Callers of this function must be aware of this behavior. +/// For example, JIT compilers that support +/// precise stack scanning need to make the call site a GC-safe point by generating stack maps. See +/// [`alloc_no_gc`] if it is undesirable to trigger GC at this allocation site. +/// +/// If MMTk has attempted at least one GC, and still cannot free up enough memory, it will call +/// [`crate::vm::Collection::out_of_memory`] to inform the binding. The VM binding +/// can implement that method to handle the out-of-memory event in a VM-specific way, including but +/// not limited to throwing exceptions or errors. If [`crate::vm::Collection::out_of_memory`] returns +/// normally without panicking or throwing exceptions, this function will return zero. /// /// This function in most cases returns a valid memory address. /// This function may return a zero address iif 1. MMTk attempts at least one GC, @@ -150,13 +166,8 @@ pub fn flush_mutator(mutator: &mut Mutator) { /// to the binding, and 4. the binding returns from [`crate::vm::Collection::out_of_memory`] /// instead of throwing an exception/error. /// -/// * Note -/// -/// For performance reasons, a VM should -/// implement the allocation fast-path on their side rather than just calling this function. -/// -/// If the VM provides a non-zero `offset` parameter, then the returned address will be -/// such that the `RETURNED_ADDRESS + offset` is aligned to the `align` parameter. +/// For performance reasons, a VM should implement the allocation fast-path on their side rather +/// than just calling this function. /// /// Arguments: /// * `mutator`: The mutator to perform this allocation request. @@ -177,15 +188,15 @@ pub fn alloc( mutator.alloc(size, align, offset, semantics) } -/// Allocate memory for an object. This function is NOT a GC safepoint. If MMTk fails -/// to allocate memory, it will not attempt a GC, nor call [`crate::vm::Collection::out_of_memory`], -/// MMTk instead return a zero address. +/// Allocate memory for an object. /// -/// Generally [`alloc`] is preferred over this function. This function should only be used -/// when the binding does not want GCs to happen during the particular allocationq requests, -/// and is willing to deal with the allocation failure. +/// The semantics of this function is the same as [`alloc`], except that when MMTk fails to allocate +/// memory, it will simply return zero. This function is guaranteed not to trigger GC and not to +/// call [`crate::vm::Collection::block_for_gc`] or [`crate::vm::Collection::out_of_memory`]. /// -/// Notes on [`alloc`] also apply to this function. +/// Generally [`alloc`] is preferred over this function. This function should only be used +/// when the binding does not want GCs to happen at certain allocation sites (for example, places +/// where stack maps cannot be generated), and is able to handle allocation failure if that happens. /// /// Arguments: /// * `mutator`: The mutator to perform this allocation request. @@ -206,13 +217,11 @@ pub fn alloc_no_gc( mutator.alloc_no_gc(size, align, offset, semantics) } -/// Invoke the allocation slow path. This function is a GC safepoint. If MMTk fails -/// to allocate memory, it will attempt a GC to free up some memory and retry -/// the allocation. See [`alloc`] for more details. -/// -/// * Notes +/// Invoke the allocation slow path of [`alloc`]. +/// Like [`alloc`], this function may trigger GC and call [`crate::vm::Collection::block_for_gc`] or +/// [`crate::vm::Collection::out_of_memory`]. The caller needs to be aware of that. /// -/// This is only intended for use when a binding implements the fastpath on +/// *Notes*: This is only intended for use when a binding implements the fastpath on /// the binding side. When the binding handles fast path allocation and the fast path fails, it can use this /// method for slow path allocation. Calling before exhausting fast path allocaiton buffer will lead to bad /// performance. @@ -233,11 +242,14 @@ pub fn alloc_slow( mutator.alloc_slow(size, align, offset, semantics) } -/// Invoke the allocation slow path. This function is NOT a GC safepoint. If MMTk fails -/// to allocate memory, it will not attempt a GC, nor call [`crate::vm::Collection::out_of_memory`], -/// MMTk instead return a zero address. See [`alloc_no_gc`] for more details. +/// Invoke the allocation slow path of [`alloc_no_gc`]. +/// +/// Like [`alloc_no_gc`], this function is guaranteed not to trigger GC and not to call +/// [`crate::vm::Collection::block_for_gc`] or [`crate::vm::Collection::out_of_memory`]. It returns zero on +/// allocation failure. /// -/// Notes on [`alloc_slow_no_gc`] also apply to this function. +/// Like [`alloc_slow`], this function is also only intended for use when a binding implements the +/// fastpath on the binding side. /// /// Arguments: /// * `mutator`: The mutator to perform this allocation request. From c120f115412d6f4bceae03613693aa3c2d463366 Mon Sep 17 00:00:00 2001 From: Yi Lin Date: Wed, 30 Oct 2024 23:31:24 +0000 Subject: [PATCH 3/8] Chagne the test case to make sure it triggers the failure case --- .../tests/mock_tests/mock_test_allocate_no_gc.rs | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/src/vm/tests/mock_tests/mock_test_allocate_no_gc.rs b/src/vm/tests/mock_tests/mock_test_allocate_no_gc.rs index 6138717097..b6bd070567 100644 --- a/src/vm/tests/mock_tests/mock_test_allocate_no_gc.rs +++ b/src/vm/tests/mock_tests/mock_test_allocate_no_gc.rs @@ -13,22 +13,30 @@ pub fn allocate_no_gc() { const MB: usize = 1024 * 1024; let mut fixture = MutatorFixture::create_with_heapsize(MB); - // Attempt allocation: allocate 1024 bytes. We should fill up the heap by 10 allocations or fewer (some plans reserves more memory, such as semispace and generational GCs) + let mut last_result = crate::util::Address::MAX; + + // Attempt allocation: allocate 1024 bytes. We should fill up the heap by 1024 allocations or fewer (some plans reserves more memory, such as semispace and generational GCs) // Run a few more times to test if we set/unset no_gc_on_fail properly. - for _ in 0..20 { - let addr = memory_manager::alloc_no_gc( + for _ in 0..1100 { + last_result = memory_manager::alloc_no_gc( &mut fixture.mutator, 1024, 8, 0, AllocationSemantics::Default, ); - if addr.is_zero() { + if last_result.is_zero() { read_mockvm(|mock| { assert!(!mock.block_for_gc.is_called()); }); + read_mockvm(|mock| { + assert!(!mock.out_of_memory.is_called()); + }); } } + + // The allocation should consume all the heap, and the last result should be zero (failure). + assert!(last_result.is_zero()); }, no_cleanup, ) From 03dfc5aafa08faf8f3ef14118e69d1757afc5004 Mon Sep 17 00:00:00 2001 From: Yi Lin Date: Thu, 31 Oct 2024 00:30:18 +0000 Subject: [PATCH 4/8] alloc_no_gc will not call out_of_memory when allocating an erroneously large object. Minor fixes for comments --- src/memory_manager.rs | 10 ++---- src/plan/mutator_context.rs | 10 +++--- src/policy/space.rs | 14 ++++---- src/util/alloc/allocator.rs | 4 +-- src/util/alloc/bumpallocator.rs | 5 ++- src/util/alloc/large_object_allocator.rs | 5 ++- ...mock_test_allocate_no_gc_oom_on_acquire.rs | 35 +++++++++++++++++++ ....rs => mock_test_allocate_no_gc_simple.rs} | 2 +- src/vm/tests/mock_tests/mod.rs | 3 +- 9 files changed, 62 insertions(+), 26 deletions(-) create mode 100644 src/vm/tests/mock_tests/mock_test_allocate_no_gc_oom_on_acquire.rs rename src/vm/tests/mock_tests/{mock_test_allocate_no_gc.rs => mock_test_allocate_no_gc_simple.rs} (97%) diff --git a/src/memory_manager.rs b/src/memory_manager.rs index ef6c7cbfb4..db107d6789 100644 --- a/src/memory_manager.rs +++ b/src/memory_manager.rs @@ -160,12 +160,6 @@ pub fn flush_mutator(mutator: &mut Mutator) { /// not limited to throwing exceptions or errors. If [`crate::vm::Collection::out_of_memory`] returns /// normally without panicking or throwing exceptions, this function will return zero. /// -/// This function in most cases returns a valid memory address. -/// This function may return a zero address iif 1. MMTk attempts at least one GC, -/// 2. the GC does not free up enough memory, 3. MMTk calls [`crate::vm::Collection::out_of_memory`] -/// to the binding, and 4. the binding returns from [`crate::vm::Collection::out_of_memory`] -/// instead of throwing an exception/error. -/// /// For performance reasons, a VM should implement the allocation fast-path on their side rather /// than just calling this function. /// @@ -183,7 +177,7 @@ pub fn alloc( semantics: AllocationSemantics, ) -> Address { #[cfg(debug_assertions)] - crate::util::alloc::allocator::asset_allocation_args::(size, align, offset); + crate::util::alloc::allocator::assert_allocation_args::(size, align, offset); mutator.alloc(size, align, offset, semantics) } @@ -212,7 +206,7 @@ pub fn alloc_no_gc( semantics: AllocationSemantics, ) -> Address { #[cfg(debug_assertions)] - crate::util::alloc::allocator::asset_allocation_args::(size, align, offset); + crate::util::alloc::allocator::assert_allocation_args::(size, align, offset); mutator.alloc_no_gc(size, align, offset, semantics) } diff --git a/src/plan/mutator_context.rs b/src/plan/mutator_context.rs index 9979f37b0c..295d8af91f 100644 --- a/src/plan/mutator_context.rs +++ b/src/plan/mutator_context.rs @@ -300,7 +300,7 @@ pub trait MutatorContext: Send + 'static { fn prepare(&mut self, tls: VMWorkerThread); /// Do the release work for this mutator. fn release(&mut self, tls: VMWorkerThread); - /// Allocate memory for an object. This is a GC safepoint. GC will be triggered if the allocation fails. + /// Allocate memory for an object. This function will trigger a GC on failed allocation. /// /// Arguments: /// * `size`: the number of bytes required for the object. @@ -314,8 +314,7 @@ pub trait MutatorContext: Send + 'static { offset: usize, allocator: AllocationSemantics, ) -> Address; - /// Allocate memory for an object. This is NOT a GC safepoint. If the allocation fails, - /// the function returns a zero value without triggering a GC. + /// Allocate memory for an object. This function will not trigger a GC on failed allocation. /// /// Arguments: /// * `size`: the number of bytes required for the object. @@ -329,7 +328,7 @@ pub trait MutatorContext: Send + 'static { offset: usize, allocator: AllocationSemantics, ) -> Address; - /// The slow path allocation. This is a GC safepoint. GC will be triggered if the allocation fails. + /// The slow path allocation for [`MutatorContext::alloc`]. This function will trigger a GC on failed allocation. /// /// This is only useful when the binding /// implements the fast path allocation, and would like to explicitly @@ -341,8 +340,7 @@ pub trait MutatorContext: Send + 'static { offset: usize, allocator: AllocationSemantics, ) -> Address; - /// The slow path allocation. This is NOT a GC safepoint. If the allocation fails, - /// the function returns a zero value without triggering a GC. + /// The slow path allocation for [`MutatorContext::alloc_no_gc`]. This function will not trigger a GC on failed allocation. /// /// This is only useful when the binding /// implements the fast path allocation, and would like to explicitly diff --git a/src/policy/space.rs b/src/policy/space.rs index 849d8eb787..1f769ad186 100644 --- a/src/policy/space.rs +++ b/src/policy/space.rs @@ -67,14 +67,16 @@ pub trait Space: 'static + SFT + Sync + Downcast { /// avoid arithmatic overflow. If we have to do computation in the allocation fastpath and /// overflow happens there, there is nothing we can do about it. /// Return a boolean to indicate if we will be out of memory, determined by the check. - fn will_oom_on_acquire(&self, tls: VMThread, size: usize) -> bool { + fn will_oom_on_acquire(&self, tls: VMThread, size: usize, allow_oom_call: bool) -> bool { let max_pages = self.get_gc_trigger().policy.get_max_heap_size_in_pages(); let requested_pages = size >> LOG_BYTES_IN_PAGE; if requested_pages > max_pages { - VM::VMCollection::out_of_memory( - tls, - crate::util::alloc::AllocationError::HeapOutOfMemory, - ); + if allow_oom_call { + VM::VMCollection::out_of_memory( + tls, + crate::util::alloc::AllocationError::HeapOutOfMemory, + ); + } return true; } false @@ -88,7 +90,7 @@ pub trait Space: 'static + SFT + Sync + Downcast { ); debug_assert!( - !self.will_oom_on_acquire(tls, pages << LOG_BYTES_IN_PAGE), + !self.will_oom_on_acquire(tls, pages << LOG_BYTES_IN_PAGE, !no_gc_on_fail), "The requested pages is larger than the max heap size. Is will_go_oom_on_acquire used before acquring memory?" ); diff --git a/src/util/alloc/allocator.rs b/src/util/alloc/allocator.rs index 70e3a70c4f..b0a95a187e 100644 --- a/src/util/alloc/allocator.rs +++ b/src/util/alloc/allocator.rs @@ -132,7 +132,7 @@ pub fn get_maximum_aligned_size_inner( } #[cfg(debug_assertions)] -pub(crate) fn asset_allocation_args(size: usize, align: usize, offset: usize) { +pub(crate) fn assert_allocation_args(size: usize, align: usize, offset: usize) { // MMTk has assumptions about minimal object size. // We need to make sure that all allocations comply with the min object size. // Ideally, we check the allocation size, and if it is smaller, we transparently allocate the min @@ -150,7 +150,7 @@ pub(crate) fn asset_allocation_args(size: usize, align: usize, of /// The context an allocator needs to access in order to perform allocation. pub struct AllocatorContext { - /// Whether we should trigger a GC when the current alloccation fails. + /// Whether we should avoid trigger a GC when the current allocation fails. /// The default value is `false`. This value is only set to `true` when /// the binding requests a 'no-gc' alloc, and will be set back to `false` /// when the allocation is done (successfully or not). diff --git a/src/util/alloc/bumpallocator.rs b/src/util/alloc/bumpallocator.rs index a0bfa1ee72..cc73917dee 100644 --- a/src/util/alloc/bumpallocator.rs +++ b/src/util/alloc/bumpallocator.rs @@ -194,7 +194,10 @@ impl BumpAllocator { offset: usize, stress_test: bool, ) -> Address { - if self.space.will_oom_on_acquire(self.tls, size) { + if self + .space + .will_oom_on_acquire(self.tls, size, !self.get_context().is_no_gc_on_fail()) + { return Address::ZERO; } diff --git a/src/util/alloc/large_object_allocator.rs b/src/util/alloc/large_object_allocator.rs index b0b720eca7..b51ec00330 100644 --- a/src/util/alloc/large_object_allocator.rs +++ b/src/util/alloc/large_object_allocator.rs @@ -49,7 +49,10 @@ impl Allocator for LargeObjectAllocator { } fn alloc_slow_once(&mut self, size: usize, align: usize, _offset: usize) -> Address { - if self.space.will_oom_on_acquire(self.tls, size) { + if self + .space + .will_oom_on_acquire(self.tls, size, !self.get_context().is_no_gc_on_fail()) + { return Address::ZERO; } diff --git a/src/vm/tests/mock_tests/mock_test_allocate_no_gc_oom_on_acquire.rs b/src/vm/tests/mock_tests/mock_test_allocate_no_gc_oom_on_acquire.rs new file mode 100644 index 0000000000..553636e3ee --- /dev/null +++ b/src/vm/tests/mock_tests/mock_test_allocate_no_gc_oom_on_acquire.rs @@ -0,0 +1,35 @@ +use super::mock_test_prelude::*; + +use crate::AllocationSemantics; + +/// This test will allocate an object that is larger than the heap size. The call will fail. +#[test] +pub fn allocate_no_gc_oom_on_acquire() { + // 1MB heap + with_mockvm( + default_setup, + || { + const KB: usize = 1024; + let mut fixture = MutatorFixture::create_with_heapsize(KB); + + // Attempt to allocate an object that is larger than the heap size. + let addr = memory_manager::alloc_no_gc( + &mut fixture.mutator, + 1024 * 10, + 8, + 0, + AllocationSemantics::Default, + ); + // We should get zero. + assert!(addr.is_zero()); + // block_for_gc and out_of_memory won't be called. + read_mockvm(|mock| { + assert!(!mock.block_for_gc.is_called()); + }); + read_mockvm(|mock| { + assert!(!mock.out_of_memory.is_called()); + }); + }, + no_cleanup, + ) +} diff --git a/src/vm/tests/mock_tests/mock_test_allocate_no_gc.rs b/src/vm/tests/mock_tests/mock_test_allocate_no_gc_simple.rs similarity index 97% rename from src/vm/tests/mock_tests/mock_test_allocate_no_gc.rs rename to src/vm/tests/mock_tests/mock_test_allocate_no_gc_simple.rs index b6bd070567..8cb9c93c94 100644 --- a/src/vm/tests/mock_tests/mock_test_allocate_no_gc.rs +++ b/src/vm/tests/mock_tests/mock_test_allocate_no_gc_simple.rs @@ -5,7 +5,7 @@ use crate::AllocationSemantics; /// This test will do alloc_no_gc in a loop, and evetually fill up the heap. /// As alloc_no_gc will not trigger a GC, we expect to see a return value of zero, and no GC is triggered. #[test] -pub fn allocate_no_gc() { +pub fn allocate_no_gc_simple() { // 1MB heap with_mockvm( default_setup, diff --git a/src/vm/tests/mock_tests/mod.rs b/src/vm/tests/mock_tests/mod.rs index cd6ab1ecbe..71f60f2be9 100644 --- a/src/vm/tests/mock_tests/mod.rs +++ b/src/vm/tests/mock_tests/mod.rs @@ -24,7 +24,8 @@ pub(crate) mod mock_test_prelude { } mod mock_test_allocate_align_offset; -mod mock_test_allocate_no_gc; +mod mock_test_allocate_no_gc_oom_on_acquire; +mod mock_test_allocate_no_gc_simple; mod mock_test_allocate_with_disable_collection; mod mock_test_allocate_with_initialize_collection; mod mock_test_allocate_with_re_enable_collection; From 0971cda6710bb4d689c97015875e66b644c8c017 Mon Sep 17 00:00:00 2001 From: Kunshan Wang Date: Thu, 31 Oct 2024 13:49:51 +0800 Subject: [PATCH 5/8] Split will_oom_on_acquire --- src/policy/space.rs | 13 ++++++++++--- src/util/alloc/bumpallocator.rs | 2 +- src/util/alloc/large_object_allocator.rs | 9 +++++---- 3 files changed, 16 insertions(+), 8 deletions(-) diff --git a/src/policy/space.rs b/src/policy/space.rs index 1f769ad186..7ed78322b5 100644 --- a/src/policy/space.rs +++ b/src/policy/space.rs @@ -67,10 +67,17 @@ pub trait Space: 'static + SFT + Sync + Downcast { /// avoid arithmatic overflow. If we have to do computation in the allocation fastpath and /// overflow happens there, there is nothing we can do about it. /// Return a boolean to indicate if we will be out of memory, determined by the check. - fn will_oom_on_acquire(&self, tls: VMThread, size: usize, allow_oom_call: bool) -> bool { + fn will_oom_on_acquire(&self, size: usize) -> bool { let max_pages = self.get_gc_trigger().policy.get_max_heap_size_in_pages(); let requested_pages = size >> LOG_BYTES_IN_PAGE; - if requested_pages > max_pages { + requested_pages > max_pages + } + + /// Check if the requested `size` is an obvious out-of-memory case using + /// [`Self::will_oom_on_acquire`] and, if it is, call `Collection::out_of_memory`. Return the + /// result of `will_oom_on_acquire`. + fn handle_obvious_oom_request(&self, tls: VMThread, size: usize, allow_oom_call: bool) -> bool { + if self.will_oom_on_acquire(size) { if allow_oom_call { VM::VMCollection::out_of_memory( tls, @@ -90,7 +97,7 @@ pub trait Space: 'static + SFT + Sync + Downcast { ); debug_assert!( - !self.will_oom_on_acquire(tls, pages << LOG_BYTES_IN_PAGE, !no_gc_on_fail), + !self.will_oom_on_acquire(pages << LOG_BYTES_IN_PAGE), "The requested pages is larger than the max heap size. Is will_go_oom_on_acquire used before acquring memory?" ); diff --git a/src/util/alloc/bumpallocator.rs b/src/util/alloc/bumpallocator.rs index cc73917dee..30d8f868fa 100644 --- a/src/util/alloc/bumpallocator.rs +++ b/src/util/alloc/bumpallocator.rs @@ -196,7 +196,7 @@ impl BumpAllocator { ) -> Address { if self .space - .will_oom_on_acquire(self.tls, size, !self.get_context().is_no_gc_on_fail()) + .handle_obvious_oom_request(self.tls, size, !self.get_context().is_no_gc_on_fail()) { return Address::ZERO; } diff --git a/src/util/alloc/large_object_allocator.rs b/src/util/alloc/large_object_allocator.rs index b51ec00330..9e924553e2 100644 --- a/src/util/alloc/large_object_allocator.rs +++ b/src/util/alloc/large_object_allocator.rs @@ -49,10 +49,11 @@ impl Allocator for LargeObjectAllocator { } fn alloc_slow_once(&mut self, size: usize, align: usize, _offset: usize) -> Address { - if self - .space - .will_oom_on_acquire(self.tls, size, !self.get_context().is_no_gc_on_fail()) - { + if self.space.handle_obvious_oom_request( + self.tls, + size, + !self.get_context().is_no_gc_on_fail(), + ) { return Address::ZERO; } From 07a01c7839bfd0c9c5f5a8796b6ff31aa9ff8250 Mon Sep 17 00:00:00 2001 From: Yi Lin Date: Thu, 31 Oct 2024 06:54:10 +0000 Subject: [PATCH 6/8] Cargo fmt --- src/util/alloc/bumpallocator.rs | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/src/util/alloc/bumpallocator.rs b/src/util/alloc/bumpallocator.rs index 30d8f868fa..b869b5e607 100644 --- a/src/util/alloc/bumpallocator.rs +++ b/src/util/alloc/bumpallocator.rs @@ -194,10 +194,11 @@ impl BumpAllocator { offset: usize, stress_test: bool, ) -> Address { - if self - .space - .handle_obvious_oom_request(self.tls, size, !self.get_context().is_no_gc_on_fail()) - { + if self.space.handle_obvious_oom_request( + self.tls, + size, + !self.get_context().is_no_gc_on_fail(), + ) { return Address::ZERO; } From 875968add133257185f1f0e6308badc991718ecb Mon Sep 17 00:00:00 2001 From: Yi Lin Date: Fri, 1 Nov 2024 15:01:30 +1300 Subject: [PATCH 7/8] Update src/memory_manager.rs Co-authored-by: Kunshan Wang --- src/memory_manager.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/memory_manager.rs b/src/memory_manager.rs index db107d6789..cb0fc79a41 100644 --- a/src/memory_manager.rs +++ b/src/memory_manager.rs @@ -149,9 +149,9 @@ pub fn flush_mutator(mutator: &mut Mutator) { /// /// If MMTk fails to allocate memory, it will attempt a GC to free up some memory and retry the /// allocation. After triggering GC, it will call [`crate::vm::Collection::block_for_gc`] to suspend -/// the current thread that is allocating. Callers of this function must be aware of this behavior. +/// the current thread that is allocating. Callers of `alloc` must be aware of this behavior. /// For example, JIT compilers that support -/// precise stack scanning need to make the call site a GC-safe point by generating stack maps. See +/// precise stack scanning need to make the call site of `alloc` a GC-safe point by generating stack maps. See /// [`alloc_no_gc`] if it is undesirable to trigger GC at this allocation site. /// /// If MMTk has attempted at least one GC, and still cannot free up enough memory, it will call From 03515ce1eb53343c79e9abafdd872243e79c194b Mon Sep 17 00:00:00 2001 From: Yi Lin Date: Wed, 6 Nov 2024 16:51:30 +1300 Subject: [PATCH 8/8] Update src/policy/space.rs Co-authored-by: Ben Gamari --- src/policy/space.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/policy/space.rs b/src/policy/space.rs index 7ed78322b5..f205928a0c 100644 --- a/src/policy/space.rs +++ b/src/policy/space.rs @@ -134,7 +134,7 @@ pub trait Space: 'static + SFT + Sync + Downcast { .on_pending_allocation(pages_reserved); VM::VMCollection::block_for_gc(VMMutatorThread(tls)); // We have checked that this is mutator - // Return zero -- the caller will handle re-allocation + // Return zero -- the caller will handle re-attempting allocation Address::ZERO } else { debug!("Collection not required");