diff --git a/include/hermes/VM/AlignedHeapSegment.h b/include/hermes/VM/AlignedHeapSegment.h index 81acde4377e..63e25bda695 100644 --- a/include/hermes/VM/AlignedHeapSegment.h +++ b/include/hermes/VM/AlignedHeapSegment.h @@ -194,6 +194,14 @@ class AlignedHeapSegmentBase { return contents()->cardTable_; } + /// Given a \p cell into the memory region of some valid segment \c s, returns + /// a pointer to the CardTable covering the segment containing the cell. + /// + /// \pre There exists a currently alive heap in which \p cell is allocated. + static CardTable *cardTableCovering(const GCCell *cell) { + return &contents(alignedStorageStart(cell))->cardTable_; + } + /// Return a reference to the mark bit array covering the memory region /// managed by this segment. Contents::MarkBitArray &markBitArray() const { @@ -215,6 +223,15 @@ class AlignedHeapSegmentBase { return markBits->at(ind); } +#ifndef NDEBUG + /// Get the storage end of segment that \p cell resides in. + static char *storageEnd(const GCCell *cell) { + auto *start = alignedStorageStart(cell); + auto *segmentInfo = reinterpret_cast(start); + return start + segmentInfo->segmentSize; + } +#endif + protected: AlignedHeapSegmentBase() = default; diff --git a/include/hermes/VM/ArrayStorage.h b/include/hermes/VM/ArrayStorage.h index 15d90f83e3b..6247d0e2c84 100644 --- a/include/hermes/VM/ArrayStorage.h +++ b/include/hermes/VM/ArrayStorage.h @@ -237,7 +237,7 @@ class ArrayStorageBase final auto *fromStart = other->data(); auto *fromEnd = fromStart + otherSz; GCHVType::uninitialized_copy( - fromStart, fromEnd, data() + sz, runtime.getHeap()); + fromStart, fromEnd, data() + sz, runtime.getHeap(), this); size_.store(sz + otherSz, std::memory_order_release); } diff --git a/include/hermes/VM/GCBase.h b/include/hermes/VM/GCBase.h index 4a3c89ecbab..a10b63d15c3 100644 --- a/include/hermes/VM/GCBase.h +++ b/include/hermes/VM/GCBase.h @@ -1162,6 +1162,7 @@ class GCBase { void constructorWriteBarrier(const GCPointerBase *loc, const GCCell *value); template void constructorWriteBarrierRange( + const GCCell *cell, const GCHermesValueBase *start, uint32_t numHVs); template diff --git a/include/hermes/VM/HadesGC.h b/include/hermes/VM/HadesGC.h index cb52aff734d..f8e0227f0e1 100644 --- a/include/hermes/VM/HadesGC.h +++ b/include/hermes/VM/HadesGC.h @@ -216,27 +216,30 @@ class HadesGC final : public GCBase { template void constructorWriteBarrierRange( + const GCCell *cell, const GCHermesValueBase *start, uint32_t numHVs) { // A pointer that lives in YG never needs any write barriers. if (LLVM_UNLIKELY(!inYoungGen(start))) - constructorWriteBarrierRangeSlow(start, numHVs); + constructorWriteBarrierRangeSlow(cell, start, numHVs); } template void constructorWriteBarrierRangeSlow( + const GCCell *cell, const GCHermesValueBase *start, uint32_t numHVs) { assert( - AlignedHeapSegment::containedInSame(start, start + numHVs) && + reinterpret_cast(start + numHVs) < + AlignedHeapSegmentBase::storageEnd(cell) && "Range must start and end within a heap segment."); - // Most constructors should be running in the YG, so in the common case, we - // can avoid doing anything for the whole range. If the range is in the OG, - // then just dirty all the cards corresponding to it, and we can scan them - // for pointers later. This is less precise but makes the write barrier - // faster. + // Most constructors should be running in the YG, so in the common case, + // we can avoid doing anything for the whole range. If the range is in + // the OG, then just dirty all the cards corresponding to it, and we can + // scan them for pointers later. This is less precise but makes the + // write barrier faster. - AlignedHeapSegment::cardTableCovering(start)->dirtyCardsForAddressRange( + AlignedHeapSegmentBase::cardTableCovering(cell)->dirtyCardsForAddressRange( start, start + numHVs); } diff --git a/include/hermes/VM/HermesValue-inline.h b/include/hermes/VM/HermesValue-inline.h index d38a9219879..4bd456aeca1 100644 --- a/include/hermes/VM/HermesValue-inline.h +++ b/include/hermes/VM/HermesValue-inline.h @@ -182,7 +182,8 @@ inline GCHermesValueBase *GCHermesValueBase::uninitialized_copy( GCHermesValueBase *first, GCHermesValueBase *last, GCHermesValueBase *result, - GC &gc) { + GC &gc, + const GCCell *cell) { #ifndef NDEBUG uintptr_t fromFirst = reinterpret_cast(first), fromLast = reinterpret_cast(last); @@ -194,7 +195,7 @@ inline GCHermesValueBase *GCHermesValueBase::uninitialized_copy( "Uninitialized range cannot overlap with an initialized one."); #endif - gc.constructorWriteBarrierRange(result, last - first); + gc.constructorWriteBarrierRange(cell, result, last - first); // memcpy is fine for an uninitialized copy. std::memcpy( reinterpret_cast(result), first, (last - first) * sizeof(HVType)); diff --git a/include/hermes/VM/HermesValue.h b/include/hermes/VM/HermesValue.h index 13bc13bb69c..9a892e90cc7 100644 --- a/include/hermes/VM/HermesValue.h +++ b/include/hermes/VM/HermesValue.h @@ -590,7 +590,8 @@ class GCHermesValueBase final : public HVType { GCHermesValueBase *first, GCHermesValueBase *last, GCHermesValueBase *result, - GC &gc); + GC &gc, + const GCCell *cell); /// Copies a range of values and performs a write barrier on each. template diff --git a/include/hermes/VM/MallocGC.h b/include/hermes/VM/MallocGC.h index b77c51d7615..8b41a33331b 100644 --- a/include/hermes/VM/MallocGC.h +++ b/include/hermes/VM/MallocGC.h @@ -243,6 +243,7 @@ class MallocGC final : public GCBase { void writeBarrierRange(const GCHermesValueBase *, uint32_t) {} template void constructorWriteBarrierRange( + const GCCell *cell, const GCHermesValueBase *, uint32_t) {} template diff --git a/lib/VM/ArrayStorage.cpp b/lib/VM/ArrayStorage.cpp index a4fc8acba0c..c655bf0166e 100644 --- a/lib/VM/ArrayStorage.cpp +++ b/lib/VM/ArrayStorage.cpp @@ -103,7 +103,8 @@ ExecutionStatus ArrayStorageBase::reallocateToLarger( { GCHVType *from = self->data() + fromFirst; GCHVType *to = newSelf->data() + toFirst; - GCHVType::uninitialized_copy(from, from + copySize, to, runtime.getHeap()); + GCHVType::uninitialized_copy( + from, from + copySize, to, runtime.getHeap(), self); } // Initialize the elements before the first copied element. diff --git a/lib/VM/GCBase.cpp b/lib/VM/GCBase.cpp index 6b29d1979fc..0ed5aa8e4b7 100644 --- a/lib/VM/GCBase.cpp +++ b/lib/VM/GCBase.cpp @@ -979,9 +979,14 @@ GCBASE_BARRIER_2( const GCCell *); GCBASE_BARRIER_2(writeBarrierRange, const GCHermesValue *, uint32_t); GCBASE_BARRIER_2(writeBarrierRange, const GCSmallHermesValue *, uint32_t); -GCBASE_BARRIER_2(constructorWriteBarrierRange, const GCHermesValue *, uint32_t); GCBASE_BARRIER_2( constructorWriteBarrierRange, + const GCCell *, + const GCHermesValue *, + uint32_t); +GCBASE_BARRIER_2( + constructorWriteBarrierRange, + const GCCell *, const GCSmallHermesValue *, uint32_t); GCBASE_BARRIER_1(snapshotWriteBarrier, const GCHermesValue *); diff --git a/lib/VM/SegmentedArray.cpp b/lib/VM/SegmentedArray.cpp index 93c4af99437..1ef68f43a1d 100644 --- a/lib/VM/SegmentedArray.cpp +++ b/lib/VM/SegmentedArray.cpp @@ -292,7 +292,8 @@ ExecutionStatus SegmentedArrayBase::growRight( self->inlineStorage(), self->inlineStorage() + numSlotsUsed, newSegmentedArray->inlineStorage(), - runtime.getHeap()); + runtime.getHeap(), + *self); // Set the size of the new array to be the same as the old array's size. newSegmentedArray->numSlotsUsed_.store( numSlotsUsed, std::memory_order_release);