From 12edfca2e75a4dacc03b2c5c49a3d8427eadf1bb Mon Sep 17 00:00:00 2001 From: "Gang Zhao (Hermes)" Date: Thu, 7 Nov 2024 17:13:56 -0800 Subject: [PATCH] Move memory layout and common methods of AlignedHeapSegment to AlignedHeapSegmentBase (#1510) Summary: The large heap segment type should have the same storage layout as current AlignedHeapSegment, and share a few common methods. Abstract these to a base class, and make both AlignedHeapSegment and JumboHeapSegment inherit from the base type. Differential Revision: D61675022 --- include/hermes/VM/AlignedHeapSegment.h | 392 +++++++++++---------- lib/VM/gcs/AlignedHeapSegment.cpp | 32 +- lib/VM/gcs/HadesGC.cpp | 48 +-- unittests/VMRuntime/MarkBitArrayNCTest.cpp | 37 +- 4 files changed, 263 insertions(+), 246 deletions(-) diff --git a/include/hermes/VM/AlignedHeapSegment.h b/include/hermes/VM/AlignedHeapSegment.h index 4a7d96b197e..0bd40713554 100644 --- a/include/hermes/VM/AlignedHeapSegment.h +++ b/include/hermes/VM/AlignedHeapSegment.h @@ -36,9 +36,9 @@ class StorageProvider; // TODO (T25527350): Debug Dump // TODO (T25527350): Heap Moving -/// An \c AlignedHeapSegment is a contiguous chunk of memory aligned to its own -/// storage size (which is a fixed power of two number of bytes). The storage -/// is further split up according to the diagram below: +/// An \c AlignedHeapSegmentBase manages a contiguous chunk of memory aligned to +/// kSegmentUnitSize. The storage is further split up according to the diagram +/// below: /// /// +----------------------------------------+ /// | (1) Card Table | @@ -52,83 +52,23 @@ class StorageProvider; /// | (End) | /// +----------------------------------------+ /// -/// The tables in (1), and (2) cover the contiguous allocation space (3) -/// into which GCCells are bump allocated. -class AlignedHeapSegment { +/// The tables in (1), and (2) cover the contiguous allocation space (3) into +/// which GCCells are bump allocated. They have fixed size computed from +/// kSegmentUnitSize. For segments with larger size (which must be multiples of +/// kSegmentUnitSize), card table allocates its internal arrays separately +/// instead. Any segment size smaller than kSegmentUnitSize is not supported. +class AlignedHeapSegmentBase { public: - /// @name Constants and utility functions for the aligned storage of \c - /// AlignedHeapSegment. - /// - /// @{ - /// The size and the alignment of the storage, in bytes. - static constexpr unsigned kLogSize = HERMESVM_LOG_HEAP_SEGMENT_SIZE; - static constexpr size_t kSize{1 << kLogSize}; - /// Mask for isolating the offset into a storage for a pointer. - static constexpr size_t kLowMask{kSize - 1}; - /// Mask for isolating the storage being pointed into by a pointer. - static constexpr size_t kHighMask{~kLowMask}; - - /// Returns the storage size, in bytes, of an \c AlignedHeapSegment. - static constexpr size_t storageSize() { - return kSize; - } - - /// Returns the pointer to the beginning of the storage containing \p ptr - /// (inclusive). Assuming such a storage exists. Note that - /// - /// storageStart(seg.hiLim()) != seg.lowLim() - /// - /// as \c seg.hiLim() is not contained in the bounds of \c seg -- it - /// is the first address not in the bounds. - static void *storageStart(const void *ptr) { - return reinterpret_cast( - reinterpret_cast(ptr) & kHighMask); - } - - /// Returns the pointer to the end of the storage containing \p ptr - /// (exclusive). Assuming such a storage exists. Note that - /// - /// storageEnd(seg.hiLim()) != seg.hiLim() - /// - /// as \c seg.hiLim() is not contained in the bounds of \c seg -- it - /// is the first address not in the bounds. - static void *storageEnd(const void *ptr) { - return reinterpret_cast(storageStart(ptr)) + kSize; - } - - /// Returns the offset in bytes to \p ptr from the start of its containing - /// storage. Assuming such a storage exists. Note that - /// - /// offset(seg.hiLim()) != seg.size() - /// - /// as \c seg.hiLim() is not contained in the bounds of \c seg -- it - /// is the first address not in the bounds. - static size_t offset(const char *ptr) { - return reinterpret_cast(ptr) & kLowMask; - } - /// @} - - /// Construct a null AlignedHeapSegment (one that does not own memory). - AlignedHeapSegment() = default; - /// \c AlignedHeapSegment is movable and assignable, but not copyable. - AlignedHeapSegment(AlignedHeapSegment &&); - AlignedHeapSegment &operator=(AlignedHeapSegment &&); - AlignedHeapSegment(const AlignedHeapSegment &) = delete; - - ~AlignedHeapSegment(); - - /// Create a AlignedHeapSegment by allocating memory with \p provider. - static llvh::ErrorOr create(StorageProvider *provider); - static llvh::ErrorOr create( - StorageProvider *provider, - const char *name); + static constexpr size_t kLogSize = HERMESVM_LOG_HEAP_SEGMENT_SIZE; + static constexpr size_t kSegmentUnitSize = (1 << kLogSize); /// Contents of the memory region managed by this segment. class Contents { public: /// The number of bits representing the total number of heap-aligned /// addresses in the segment storage. - static constexpr size_t kMarkBitArraySize = kSize >> LogHeapAlign; + static constexpr size_t kMarkBitArraySize = + kSegmentUnitSize >> LogHeapAlign; /// BitArray for marking allocation region of a segment. using MarkBitArray = BitArray; @@ -138,6 +78,7 @@ class AlignedHeapSegment { private: friend class AlignedHeapSegment; + friend class AlignedHeapSegmentBase; /// Note that because of the Contents object, the first few bytes of the /// card table are unused, we instead use them to store a small @@ -179,10 +120,11 @@ class AlignedHeapSegment { "SHSegmentInfo does not fit in available unused CardTable space."); /// The offset from the beginning of a segment of the allocatable region. - static constexpr size_t offsetOfAllocRegion{offsetof(Contents, allocRegion_)}; + static constexpr size_t kOffsetOfAllocRegion{ + offsetof(Contents, allocRegion_)}; static_assert( - isSizeHeapAligned(offsetOfAllocRegion), + isSizeHeapAligned(kOffsetOfAllocRegion), "Allocation region must start at a heap aligned offset"); static_assert( @@ -215,6 +157,189 @@ class AlignedHeapSegment { GCCell *cell_{nullptr}; }; + /// Returns the address that is the lower bound of the segment. + /// \post The returned pointer is guaranteed to be aligned to + /// kSegmentUnitSize. + char *lowLim() const { + return lowLim_; + } + + /// Returns the address at which the first allocation in this segment would + /// occur. + /// Disable UB sanitization because 'this' may be null during the tests. + char *start() const LLVM_NO_SANITIZE("undefined") { + return contents()->allocRegion_; + } + + /// Returns the address at which the next allocation, if any, will occur. + char *level() const { + return level_; + } + + /// Return a reference to the card table covering the memory region managed by + /// this segment. + CardTable &cardTable() const { + return contents()->cardTable_; + } + + /// Return a reference to the mark bit array covering the memory region + /// managed by this segment. + Contents::MarkBitArray &markBitArray() const { + return contents()->markBitArray_; + } + + /// Mark the given \p cell. Assumes the given address is a valid heap object. + static void setCellMarkBit(const GCCell *cell) { + auto *markBits = markBitArrayCovering(cell); + size_t ind = addressToMarkBitArrayIndex(cell); + markBits->set(ind, true); + } + + /// Return whether the given \p cell is marked. Assumes the given address is + /// a valid heap object. + static bool getCellMarkBit(const GCCell *cell) { + auto *markBits = markBitArrayCovering(cell); + size_t ind = addressToMarkBitArrayIndex(cell); + return markBits->at(ind); + } + + protected: + AlignedHeapSegmentBase() = default; + + /// Construct Contents() at the address of \p lowLim. + AlignedHeapSegmentBase(void *lowLim) + : lowLim_(reinterpret_cast(lowLim)) { + new (contents()) Contents(); + contents()->protectGuardPage(oscompat::ProtectMode::None); + } + + /// Return a pointer to the contents of the memory region managed by this + /// segment. + Contents *contents() const { + return reinterpret_cast(lowLim_); + } + + /// Given the \p lowLim of some valid segment's memory region, returns a + /// pointer to the Contents laid out in the storage, assuming it exists. + static Contents *contents(void *lowLim) { + return reinterpret_cast(lowLim); + } + + /// The start of the aligned segment. + char *lowLim_{nullptr}; + + /// The current address in this segment to allocate new object. This must be + /// positioned after lowLim_ to be correctly initialized. + char *level_{start()}; + + private: + /// Return the starting address for aligned region of size kSegmentUnitSize + /// that \p cell resides in. If \c cell resides in a JumboSegment, it's the + /// only cell there, this essentially returns its segment starting address. + static char *alignedStorageStart(const GCCell *cell) { + return reinterpret_cast( + reinterpret_cast(cell) & ~(kSegmentUnitSize - 1)); + } + + /// Given a \p cell, returns a pointer to the MarkBitArray covering the + /// segment that \p cell resides in. + /// + /// \pre There exists a currently alive heap that claims to contain \c cell. + static Contents::MarkBitArray *markBitArrayCovering(const GCCell *cell) { + auto *segStart = alignedStorageStart(cell); + return &contents(segStart)->markBitArray_; + } + + /// Translate the given address to a 0-based index in the MarkBitArray of its + /// segment. The base address is the start of the storage of this segment. For + /// JumboSegment, this should always return a constant index + /// kOffsetOfAllocRegion >> LogHeapAlign. + static size_t addressToMarkBitArrayIndex(const GCCell *cell) { + auto *cp = reinterpret_cast(cell); + auto *base = reinterpret_cast(alignedStorageStart(cell)); + return (cp - base) >> LogHeapAlign; + } +}; + +/// JumboHeapSegment has custom storage size that must be a multiple of +/// kSegmentUnitSize. Each such segment can only allocate a single object that +/// occupies the entire allocation space. Therefore, the inline MarkBitArray is +/// large enough, while CardTable needs to allocate its cards and boundaries +/// arrays separately. +class JumboHeapSegment : public AlignedHeapSegmentBase {}; + +/// AlignedHeapSegment has fixed storage size kSegmentUnitSize. Its CardTable +/// and MarkBitArray are stored inline right before the allocation space. This +/// is used for all allocations in YoungGen and normal object allocations in +/// OldGen. +class AlignedHeapSegment : public AlignedHeapSegmentBase { + public: + /// @name Constants and utility functions for the aligned storage of \c + /// AlignedHeapSegment. + /// + /// @{ + /// The size and the alignment of the storage, in bytes. + static constexpr size_t kSize = kSegmentUnitSize; + /// Mask for isolating the offset into a storage for a pointer. + static constexpr size_t kLowMask{kSize - 1}; + /// Mask for isolating the storage being pointed into by a pointer. + static constexpr size_t kHighMask{~kLowMask}; + + /// Returns the storage size, in bytes, of an \c AlignedHeapSegment. + static constexpr size_t storageSize() { + return kSize; + } + + /// Returns the pointer to the beginning of the storage containing \p ptr + /// (inclusive). Assuming such a storage exists. Note that + /// + /// storageStart(seg.hiLim()) != seg.lowLim() + /// + /// as \c seg.hiLim() is not contained in the bounds of \c seg -- it + /// is the first address not in the bounds. + static void *storageStart(const void *ptr) { + return reinterpret_cast( + reinterpret_cast(ptr) & kHighMask); + } + + /// Returns the pointer to the end of the storage containing \p ptr + /// (exclusive). Assuming such a storage exists. Note that + /// + /// storageEnd(seg.hiLim()) != seg.hiLim() + /// + /// as \c seg.hiLim() is not contained in the bounds of \c seg -- it + /// is the first address not in the bounds. + static void *storageEnd(const void *ptr) { + return reinterpret_cast(storageStart(ptr)) + kSize; + } + + /// Returns the offset in bytes to \p ptr from the start of its containing + /// storage. Assuming such a storage exists. Note that + /// + /// offset(seg.hiLim()) != seg.size() + /// + /// as \c seg.hiLim() is not contained in the bounds of \c seg -- it + /// is the first address not in the bounds. + static size_t offset(const char *ptr) { + return reinterpret_cast(ptr) & kLowMask; + } + /// @} + + /// Construct a null AlignedHeapSegment (one that does not own memory). + AlignedHeapSegment() = default; + /// \c AlignedHeapSegment is movable and assignable, but not copyable. + AlignedHeapSegment(AlignedHeapSegment &&); + AlignedHeapSegment &operator=(AlignedHeapSegment &&); + AlignedHeapSegment(const AlignedHeapSegment &) = delete; + + ~AlignedHeapSegment(); + + /// Create a AlignedHeapSegment by allocating memory with \p provider. + static llvh::ErrorOr create(StorageProvider *provider); + static llvh::ErrorOr create( + StorageProvider *provider, + const char *name); + /// Returns the index of the segment containing \p lowLim, which is required /// to be the start of its containing segment. (This can allow extra /// efficiency, in cases where the segment start has already been computed.) @@ -238,40 +363,12 @@ class AlignedHeapSegment { /// space, returns {nullptr, false}. inline AllocResult alloc(uint32_t size); - /// Given the \p lowLim of some valid segment's memory region, returns a - /// pointer to the AlignedHeapSegment::Contents laid out in that storage, - /// assuming it exists. - inline static Contents *contents(void *lowLim); - inline static const Contents *contents(const void *lowLim); - /// Given a \p ptr into the memory region of some valid segment \c s, returns /// a pointer to the CardTable covering the segment containing the pointer. /// /// \pre There exists a currently alive heap that claims to contain \c ptr. inline static CardTable *cardTableCovering(const void *ptr); - /// Given a \p ptr into the memory region of some valid segment \c s, returns - /// a pointer to the MarkBitArray covering the segment containing the - /// pointer. - /// - /// \pre There exists a currently alive heap that claims to contain \c ptr. - inline static Contents::MarkBitArray *markBitArrayCovering(const void *ptr); - - /// Translate the given address to a 0-based index in the MarkBitArray of its - /// segment. The base address is the start of the storage of this segment. - static size_t addressToMarkBitArrayIndex(const void *ptr) { - auto *cp = reinterpret_cast(ptr); - auto *base = reinterpret_cast(storageStart(cp)); - return (cp - base) >> LogHeapAlign; - } - - /// Mark the given \p cell. Assumes the given address is a valid heap object. - inline static void setCellMarkBit(const GCCell *cell); - - /// Return whether the given \p cell is marked. Assumes the given address is - /// a valid heap object. - inline static bool getCellMarkBit(const GCCell *cell); - /// Find the head of the first cell that extends into the card at index /// \p cardIdx. /// \return A cell such that @@ -294,23 +391,11 @@ class AlignedHeapSegment { /// The number of bytes in the segment that are available for allocation. inline size_t available() const; - /// Returns the address that is the lower bound of the segment. - /// \post The returned pointer is guaranteed to be aligned to a segment - /// boundary. - char *lowLim() const { - return lowLim_; - } - /// Returns the address that is the upper bound of the segment. char *hiLim() const { return lowLim() + storageSize(); } - /// Returns the address at which the first allocation in this segment would - /// occur. - /// Disable UB sanitization because 'this' may be null during the tests. - inline char *start() const LLVM_NO_SANITIZE("undefined"); - /// Returns the first address after the region in which allocations can occur, /// taking external memory credits into a account (they decrease the effective /// end). @@ -330,9 +415,6 @@ class AlignedHeapSegment { /// ignoring external memory credits. inline char *end() const; - /// Returns the address at which the next allocation, if any, will occur. - inline char *level() const; - /// Returns an iterator range corresponding to the cells in this segment. inline llvh::iterator_range cells(); @@ -340,15 +422,6 @@ class AlignedHeapSegment { /// AlignedHeapSegment. inline static bool containedInSame(const void *a, const void *b); - /// Return a reference to the card table covering the memory region managed by - /// this segment. - /// Disable sanitization because 'this' may be null in the tests. - inline CardTable &cardTable() const LLVM_NO_SANITIZE("null"); - - /// Return a reference to the mark bit array covering the memory region - /// managed by this segment. - inline Contents::MarkBitArray &markBitArray() const; - explicit operator bool() const { return lowLim(); } @@ -390,26 +463,15 @@ class AlignedHeapSegment { /// Set the contents of the segment to a dead value. void clear(); - /// Set the given range [start, end) to a dead value. - static void clear(char *start, char *end); /// Checks that dead values are present in the [start, end) range. static void checkUnwritten(char *start, char *end); #endif - protected: - /// Return a pointer to the contents of the memory region managed by this - /// segment. - inline Contents *contents() const; - - /// The start of the aligned segment. - char *lowLim_{nullptr}; - + private: /// The provider that created this segment. It will be used to properly /// destroy this. StorageProvider *provider_{nullptr}; - char *level_{start()}; - /// The upper limit of the space that we can currently allocated into; /// this may be decreased when externally allocated memory is credited to /// the generation owning this space. @@ -419,7 +481,6 @@ class AlignedHeapSegment { /// and swap idiom. friend void swap(AlignedHeapSegment &a, AlignedHeapSegment &b); - private: AlignedHeapSegment(StorageProvider *provider, void *lowLim); }; @@ -459,26 +520,6 @@ AllocResult AlignedHeapSegment::alloc(uint32_t size) { return {cell, true}; } -/*static*/ -AlignedHeapSegment::Contents::MarkBitArray * -AlignedHeapSegment::markBitArrayCovering(const void *ptr) { - return &contents(storageStart(ptr))->markBitArray_; -} - -/*static*/ -void AlignedHeapSegment::setCellMarkBit(const GCCell *cell) { - auto *markBits = markBitArrayCovering(cell); - size_t ind = addressToMarkBitArrayIndex(cell); - markBits->set(ind, true); -} - -/*static*/ -bool AlignedHeapSegment::getCellMarkBit(const GCCell *cell) { - auto *markBits = markBitArrayCovering(cell); - size_t ind = addressToMarkBitArrayIndex(cell); - return markBits->at(ind); -} - GCCell *AlignedHeapSegment::getFirstCellHead(size_t cardIdx) { CardTable &cards = cardTable(); GCCell *cell = cards.firstObjForCard(cardIdx); @@ -499,16 +540,6 @@ void AlignedHeapSegment::setCellHead(const GCCell *cellStart, const size_t sz) { } } -/* static */ AlignedHeapSegment::Contents *AlignedHeapSegment::contents( - void *lowLim) { - return reinterpret_cast(lowLim); -} - -/* static */ const AlignedHeapSegment::Contents *AlignedHeapSegment::contents( - const void *lowLim) { - return reinterpret_cast(lowLim); -} - /* static */ CardTable *AlignedHeapSegment::cardTableCovering(const void *ptr) { return &AlignedHeapSegment::contents(storageStart(ptr))->cardTable_; } @@ -529,10 +560,6 @@ size_t AlignedHeapSegment::available() const { return effectiveEnd() - level(); } -char *AlignedHeapSegment::start() const { - return contents()->allocRegion_; -} - char *AlignedHeapSegment::effectiveEnd() const { return effectiveEnd_; } @@ -541,10 +568,6 @@ char *AlignedHeapSegment::end() const { return start() + maxSize(); } -char *AlignedHeapSegment::level() const { - return level_; -} - llvh::iterator_range AlignedHeapSegment::cells() { return { @@ -558,19 +581,6 @@ bool AlignedHeapSegment::containedInSame(const void *a, const void *b) { storageSize(); } -CardTable &AlignedHeapSegment::cardTable() const { - return contents()->cardTable_; -} - -AlignedHeapSegment::Contents::MarkBitArray &AlignedHeapSegment::markBitArray() - const { - return contents()->markBitArray_; -} - -AlignedHeapSegment::Contents *AlignedHeapSegment::contents() const { - return contents(lowLim()); -} - } // namespace vm } // namespace hermes diff --git a/lib/VM/gcs/AlignedHeapSegment.cpp b/lib/VM/gcs/AlignedHeapSegment.cpp index 1509168194d..24d360f78c7 100644 --- a/lib/VM/gcs/AlignedHeapSegment.cpp +++ b/lib/VM/gcs/AlignedHeapSegment.cpp @@ -22,6 +22,17 @@ namespace hermes { namespace vm { +#ifndef NDEBUG +/// Set the given range [start, end) to a dead value. +static void clearRange(char *start, char *end) { +#if LLVM_ADDRESS_SANITIZER_BUILD + __asan_poison_memory_region(start, end - start); +#else + std::memset(start, kInvalidHeapValue, end - start); +#endif +} +#endif + void AlignedHeapSegment::Contents::protectGuardPage( oscompat::ProtectMode mode) { char *begin = &paddedGuardPage_[kGuardPagePadding]; @@ -45,11 +56,12 @@ llvh::ErrorOr AlignedHeapSegment::create( if (!result) { return result.getError(); } + assert(*result && "Heap segment storage allocation failure"); return AlignedHeapSegment{provider, *result}; } AlignedHeapSegment::AlignedHeapSegment(StorageProvider *provider, void *lowLim) - : lowLim_(static_cast(lowLim)), provider_(provider) { + : AlignedHeapSegmentBase(lowLim), provider_(provider) { assert( storageStart(lowLim_) == lowLim_ && "The lower limit of this storage must be aligned"); @@ -58,13 +70,9 @@ AlignedHeapSegment::AlignedHeapSegment(StorageProvider *provider, void *lowLim) assert( reinterpret_cast(hiLim()) % oscompat::page_size() == 0 && "The higher limit must be page aligned"); - if (*this) { - new (contents()) Contents(); - contents()->protectGuardPage(oscompat::ProtectMode::None); #ifndef NDEBUG - clear(); + clear(); #endif - } } void swap(AlignedHeapSegment &a, AlignedHeapSegment &b) { @@ -120,7 +128,7 @@ void AlignedHeapSegment::setLevel(char *lvl) { assert(dbgContainsLevel(lvl)); if (lvl < level_) { #ifndef NDEBUG - clear(lvl, level_); + clearRange(lvl, level_); #else if (MU == AdviseUnused::Yes) { const size_t PS = oscompat::page_size(); @@ -172,15 +180,7 @@ bool AlignedHeapSegment::validPointer(const void *p) const { } void AlignedHeapSegment::clear() { - clear(start(), end()); -} - -/* static */ void AlignedHeapSegment::clear(char *start, char *end) { -#if LLVM_ADDRESS_SANITIZER_BUILD - __asan_poison_memory_region(start, end - start); -#else - std::memset(start, kInvalidHeapValue, end - start); -#endif + clearRange(start(), end()); } /* static */ void AlignedHeapSegment::checkUnwritten(char *start, char *end) { diff --git a/lib/VM/gcs/HadesGC.cpp b/lib/VM/gcs/HadesGC.cpp index e9bf33f4b28..3e9ab6ba591 100644 --- a/lib/VM/gcs/HadesGC.cpp +++ b/lib/VM/gcs/HadesGC.cpp @@ -51,7 +51,7 @@ GCCell *HadesGC::OldGen::finishAlloc(GCCell *cell, uint32_t sz) { // Track the number of allocated bytes in a segment. incrementAllocatedBytes(sz); // Write a mark bit so this entry doesn't get free'd by the sweeper. - AlignedHeapSegment::setCellMarkBit(cell); + AlignedHeapSegmentBase::setCellMarkBit(cell); // Could overwrite the VTable, but the allocator will write a new one in // anyway. return cell; @@ -408,7 +408,7 @@ class HadesGC::EvacAcceptor final : public RootAndSlotAcceptor, LLVM_NODISCARD GCCell *acceptHeap(GCCell *ptr, void *heapLoc) { if (shouldForward(ptr)) { assert( - AlignedHeapSegment::getCellMarkBit(ptr) && + AlignedHeapSegmentBase::getCellMarkBit(ptr) && "Should only evacuate marked objects."); return forwardCell(ptr); } @@ -426,7 +426,7 @@ class HadesGC::EvacAcceptor final : public RootAndSlotAcceptor, if (shouldForward(cptr)) { GCCell *ptr = cptr.getNonNull(pointerBase_); assert( - AlignedHeapSegment::getCellMarkBit(ptr) && + AlignedHeapSegmentBase::getCellMarkBit(ptr) && "Should only evacuate marked objects."); return forwardCell(ptr); } @@ -442,7 +442,7 @@ class HadesGC::EvacAcceptor final : public RootAndSlotAcceptor, template LLVM_NODISCARD T forwardCell(GCCell *const cell) { assert( - AlignedHeapSegment::getCellMarkBit(cell) && + AlignedHeapSegmentBase::getCellMarkBit(cell) && "Cannot forward unmarked object"); if (cell->hasMarkedForwardingPointer()) { // Get the forwarding pointer from the header of the object. @@ -459,7 +459,7 @@ class HadesGC::EvacAcceptor final : public RootAndSlotAcceptor, HERMES_SLOW_ASSERT( gc.inOldGen(newCell) && "Evacuated cell not in the old gen"); assert( - AlignedHeapSegment::getCellMarkBit(newCell) && + AlignedHeapSegmentBase::getCellMarkBit(newCell) && "Cell must be marked when it is allocated into the old gen"); // Copy the contents of the existing cell over before modifying it. std::memcpy(newCell, cell, cellSize); @@ -547,7 +547,7 @@ class HadesGC::EvacAcceptor final : public RootAndSlotAcceptor, CopyListCell *const cell = static_cast(copyListHead_.getNonNull(pointerBase_)); assert( - AlignedHeapSegment::getCellMarkBit(cell) && + AlignedHeapSegmentBase::getCellMarkBit(cell) && "Discovered unmarked object"); copyListHead_ = cell->next_; return cell; @@ -649,7 +649,7 @@ class HadesGC::MarkAcceptor final : public RootAndSlotAcceptor { AlignedHeapSegment::cardTableCovering(heapLoc)->dirtyCardForAddress( heapLoc); } - if (AlignedHeapSegment::getCellMarkBit(cell)) { + if (AlignedHeapSegmentBase::getCellMarkBit(cell)) { // Points to an already marked object, do nothing. return; } @@ -661,7 +661,7 @@ class HadesGC::MarkAcceptor final : public RootAndSlotAcceptor { void acceptRoot(GCCell *cell) { assert(cell->isValid() && "Encountered an invalid cell"); - if (!AlignedHeapSegment::getCellMarkBit(cell)) + if (!AlignedHeapSegmentBase::getCellMarkBit(cell)) push(cell); } @@ -794,7 +794,7 @@ class HadesGC::MarkAcceptor final : public RootAndSlotAcceptor { "Shouldn't ever traverse a YG object in this loop"); HERMES_SLOW_ASSERT( gc.dbgContains(cell) && "Non-heap cell found in global worklist"); - if (!AlignedHeapSegment::getCellMarkBit(cell)) { + if (!AlignedHeapSegmentBase::getCellMarkBit(cell)) { // Cell has not yet been marked. push(cell); } @@ -807,7 +807,7 @@ class HadesGC::MarkAcceptor final : public RootAndSlotAcceptor { localWorklist_.pop(); assert(cell->isValid() && "Invalid cell in marking"); assert( - AlignedHeapSegment::getCellMarkBit(cell) && + AlignedHeapSegmentBase::getCellMarkBit(cell) && "Discovered unmarked object"); assert( !gc.inYoungGen(cell) && @@ -881,12 +881,12 @@ class HadesGC::MarkAcceptor final : public RootAndSlotAcceptor { void push(GCCell *cell) { assert( - !AlignedHeapSegment::getCellMarkBit(cell) && + !AlignedHeapSegmentBase::getCellMarkBit(cell) && "A marked object should never be pushed onto a worklist"); assert( !gc.inYoungGen(cell) && "Shouldn't ever push a YG object onto the worklist"); - AlignedHeapSegment::setCellMarkBit(cell); + AlignedHeapSegmentBase::setCellMarkBit(cell); // There could be a race here: however, the mutator will never change a // cell's kind after initialization. The GC thread might to a free cell, but // only during sweeping, not concurrently with this operation. Therefore @@ -957,7 +957,7 @@ class HadesGC::MarkWeakRootsAcceptor final : public WeakRootAcceptor { } GCCell *const cell = wr.getNoBarrierUnsafe(gc_.getPointerBase()); HERMES_SLOW_ASSERT(gc_.dbgContains(cell) && "ptr not in heap"); - if (AlignedHeapSegment::getCellMarkBit(cell)) { + if (AlignedHeapSegmentBase::getCellMarkBit(cell)) { // If the cell is marked, no need to do any writes. return; } @@ -1054,7 +1054,7 @@ bool HadesGC::OldGen::sweepNext(bool backgroundThread) { int32_t segmentSweptBytes = 0; for (GCCell *cell : segments_[sweepIterator_.segNumber].cells()) { assert(cell->isValid() && "Invalid cell in sweeping"); - if (AlignedHeapSegment::getCellMarkBit(cell)) { + if (AlignedHeapSegmentBase::getCellMarkBit(cell)) { // Cannot concurrently trim storage. Technically just checking // backgroundThread would suffice, but the kConcurrentGC lets us compile // away this check in incremental mode. @@ -1080,7 +1080,7 @@ bool HadesGC::OldGen::sweepNext(bool backgroundThread) { // Just create a FillerCell, the next iteration will free it. constructCell(newCell, trimmableBytes); assert( - !AlignedHeapSegment::getCellMarkBit(newCell) && + !AlignedHeapSegmentBase::getCellMarkBit(newCell) && "Trimmed space cannot be marked"); AlignedHeapSegment::setCellHead(newCell, trimmableBytes); #ifndef NDEBUG @@ -1769,12 +1769,12 @@ void HadesGC::markWeakMapEntrySlots() { return; GCCell *ownerMapCell = slot.owner.getNoBarrierUnsafe(getPointerBase()); // If the owner structure isn't reachable, no need to mark the values. - if (!AlignedHeapSegment::getCellMarkBit(ownerMapCell)) + if (!AlignedHeapSegmentBase::getCellMarkBit(ownerMapCell)) return; GCCell *cell = slot.key.getNoBarrierUnsafe(getPointerBase()); // The WeakRef object must be marked for the mapped value to // be marked (unless there are other strong refs to the value). - if (!AlignedHeapSegment::getCellMarkBit(cell)) + if (!AlignedHeapSegmentBase::getCellMarkBit(cell)) return; oldGenMarker_->accept(slot.mappedValue); }); @@ -1790,8 +1790,8 @@ void HadesGC::markWeakMapEntrySlots() { } GCCell *cell = slot.key.getNoBarrierUnsafe(getPointerBase()); GCCell *ownerMapCell = slot.owner.getNoBarrierUnsafe(getPointerBase()); - if (!AlignedHeapSegment::getCellMarkBit(cell) || - !AlignedHeapSegment::getCellMarkBit(ownerMapCell)) { + if (!AlignedHeapSegmentBase::getCellMarkBit(cell) || + !AlignedHeapSegmentBase::getCellMarkBit(ownerMapCell)) { slot.mappedValue = HermesValue::encodeEmptyValue(); } }); @@ -2094,7 +2094,7 @@ void HadesGC::forAllObjs(const std::function &callback) { // compaction, there might be some objects that are dead, and could // potentially have garbage in them. There's no need to check the // pointers of those objects. - if (AlignedHeapSegment::getCellMarkBit(cell)) { + if (AlignedHeapSegmentBase::getCellMarkBit(cell)) { callback(cell); } }; @@ -2230,7 +2230,7 @@ GCCell *HadesGC::OldGen::alloc(uint32_t sz) { // free list. addSegment(std::move(seg.get())); GCCell *newObj = static_cast(res.ptr); - AlignedHeapSegment::setCellMarkBit(newObj); + AlignedHeapSegmentBase::setCellMarkBit(newObj); return newObj; } @@ -2730,7 +2730,7 @@ void HadesGC::scanDirtyCardsForSegment( // expensive. // Mark the first object with respect to the dirty card boundaries. - if (visitUnmarked || AlignedHeapSegment::getCellMarkBit(obj)) + if (visitUnmarked || AlignedHeapSegmentBase::getCellMarkBit(obj)) markCellWithinRange(acceptor, obj, begin, end); obj = obj->nextCell(); @@ -2742,7 +2742,7 @@ void HadesGC::scanDirtyCardsForSegment( // object where next is within the card. for (GCCell *next = obj->nextCell(); next < boundary; next = next->nextCell()) { - if (visitUnmarked || AlignedHeapSegment::getCellMarkBit(obj)) + if (visitUnmarked || AlignedHeapSegmentBase::getCellMarkBit(obj)) markCell(acceptor, obj); obj = next; } @@ -2752,7 +2752,7 @@ void HadesGC::scanDirtyCardsForSegment( assert( obj < boundary && obj->nextCell() >= boundary && "Last object in card must touch or cross cross the card boundary"); - if (visitUnmarked || AlignedHeapSegment::getCellMarkBit(obj)) + if (visitUnmarked || AlignedHeapSegmentBase::getCellMarkBit(obj)) markCellWithinRange(acceptor, obj, begin, end); } diff --git a/unittests/VMRuntime/MarkBitArrayNCTest.cpp b/unittests/VMRuntime/MarkBitArrayNCTest.cpp index 455c1996fb1..a46536155d2 100644 --- a/unittests/VMRuntime/MarkBitArrayNCTest.cpp +++ b/unittests/VMRuntime/MarkBitArrayNCTest.cpp @@ -27,6 +27,13 @@ namespace { struct MarkBitArrayTest : public ::testing::Test { MarkBitArrayTest(); + static size_t addressToMarkBitArrayIndex(const void *addr) { + auto *cp = reinterpret_cast(addr); + auto *base = + reinterpret_cast(AlignedHeapSegment::storageStart(addr)); + return (cp - base) >> LogHeapAlign; + } + protected: std::unique_ptr provider; AlignedHeapSegment seg; @@ -66,7 +73,7 @@ TEST_F(MarkBitArrayTest, AddressToIndex) { char *addr = addrs.at(i); size_t ind = indices.at(i); - EXPECT_EQ(ind, AlignedHeapSegment::addressToMarkBitArrayIndex(addr)) + EXPECT_EQ(ind, addressToMarkBitArrayIndex(addr)) << "0x" << std::hex << (void *)addr << " -> " << ind; char *toAddr = seg.lowLim() + (ind << LogHeapAlign); EXPECT_EQ(toAddr, addr) @@ -78,7 +85,7 @@ TEST_F(MarkBitArrayTest, MarkGet) { const size_t lastIx = mba.size() - 1; for (char *addr : addrs) { - size_t ind = AlignedHeapSegment::addressToMarkBitArrayIndex(addr); + size_t ind = addressToMarkBitArrayIndex(addr); EXPECT_FALSE(ind > 0 && mba.at(ind - 1)) << "initial " << ind << " - 1"; EXPECT_FALSE(mba.at(ind)) << "initial " << ind; @@ -97,37 +104,37 @@ TEST_F(MarkBitArrayTest, MarkGet) { TEST_F(MarkBitArrayTest, Initial) { for (char *addr : addrs) { - size_t ind = AlignedHeapSegment::addressToMarkBitArrayIndex(addr); + size_t ind = addressToMarkBitArrayIndex(addr); EXPECT_FALSE(mba.at(ind)); } } TEST_F(MarkBitArrayTest, Clear) { for (char *addr : addrs) { - size_t ind = AlignedHeapSegment::addressToMarkBitArrayIndex(addr); + size_t ind = addressToMarkBitArrayIndex(addr); ASSERT_FALSE(mba.at(ind)); } for (char *addr : addrs) { - size_t ind = AlignedHeapSegment::addressToMarkBitArrayIndex(addr); + size_t ind = addressToMarkBitArrayIndex(addr); mba.set(ind, true); } for (char *addr : addrs) { - size_t ind = AlignedHeapSegment::addressToMarkBitArrayIndex(addr); + size_t ind = addressToMarkBitArrayIndex(addr); ASSERT_TRUE(mba.at(ind)); } mba.reset(); for (char *addr : addrs) { - size_t ind = AlignedHeapSegment::addressToMarkBitArrayIndex(addr); + size_t ind = addressToMarkBitArrayIndex(addr); EXPECT_FALSE(mba.at(ind)); } } TEST_F(MarkBitArrayTest, NextMarkedBitImmediate) { char *addr = addrs.at(addrs.size() / 2); - size_t ind = AlignedHeapSegment::addressToMarkBitArrayIndex(addr); + size_t ind = addressToMarkBitArrayIndex(addr); mba.set(ind, true); EXPECT_EQ(ind, mba.findNextSetBitFrom(ind)); @@ -140,7 +147,7 @@ TEST_F(MarkBitArrayTest, NextMarkedBit) { EXPECT_EQ(FOUND_NONE, mba.findNextSetBitFrom(0)); std::queue indices; for (char *addr : addrs) { - auto ind = AlignedHeapSegment::addressToMarkBitArrayIndex(addr); + auto ind = addressToMarkBitArrayIndex(addr); mba.set(ind, true); indices.push(ind); } @@ -154,7 +161,7 @@ TEST_F(MarkBitArrayTest, NextMarkedBit) { TEST_F(MarkBitArrayTest, NextUnmarkedBitImmediate) { char *addr = addrs.at(addrs.size() / 2); - size_t ind = AlignedHeapSegment::addressToMarkBitArrayIndex(addr); + size_t ind = addressToMarkBitArrayIndex(addr); mba.set(); mba.set(ind, false); EXPECT_EQ(ind, mba.findNextZeroBitFrom(ind)); @@ -167,7 +174,7 @@ TEST_F(MarkBitArrayTest, NextUnmarkedBit) { EXPECT_EQ(FOUND_NONE, mba.findNextZeroBitFrom(0)); std::queue indices; for (char *addr : addrs) { - auto ind = AlignedHeapSegment::addressToMarkBitArrayIndex(addr); + auto ind = addressToMarkBitArrayIndex(addr); mba.set(ind, false); indices.push(ind); } @@ -182,7 +189,7 @@ TEST_F(MarkBitArrayTest, NextUnmarkedBit) { TEST_F(MarkBitArrayTest, PrevMarkedBitImmediate) { char *addr = addrs.at(addrs.size() / 2); - size_t ind = AlignedHeapSegment::addressToMarkBitArrayIndex(addr); + size_t ind = addressToMarkBitArrayIndex(addr); mba.set(ind, true); EXPECT_EQ(ind, mba.findPrevSetBitFrom(ind + 1)); } @@ -196,7 +203,7 @@ TEST_F(MarkBitArrayTest, PrevMarkedBit) { std::queue indices; size_t addrIdx = addrs.size(); while (addrIdx-- > 0) { - auto ind = AlignedHeapSegment::addressToMarkBitArrayIndex(addrs[addrIdx]); + auto ind = addressToMarkBitArrayIndex(addrs[addrIdx]); mba.set(ind, true); indices.push(ind); } @@ -209,7 +216,7 @@ TEST_F(MarkBitArrayTest, PrevMarkedBit) { TEST_F(MarkBitArrayTest, PrevUnmarkedBitImmediate) { char *addr = addrs.at(addrs.size() / 2); - size_t ind = AlignedHeapSegment::addressToMarkBitArrayIndex(addr); + size_t ind = addressToMarkBitArrayIndex(addr); mba.set(); mba.set(ind, false); EXPECT_EQ(ind, mba.findPrevZeroBitFrom(ind + 1)); @@ -225,7 +232,7 @@ TEST_F(MarkBitArrayTest, PrevUnmarkedBit) { std::queue indices; size_t addrIdx = addrs.size(); while (addrIdx-- > 0) { - auto ind = AlignedHeapSegment::addressToMarkBitArrayIndex(addrs[addrIdx]); + auto ind = addressToMarkBitArrayIndex(addrs[addrIdx]); mba.set(ind, false); indices.push(ind); }