Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Move memory layout and common methods of AlignedHeapSegment to AlignedHeapSegmentBase #1510

Open
wants to merge 2 commits into
base: static_h
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
552 changes: 274 additions & 278 deletions include/hermes/VM/AlignedHeapSegment.h

Large diffs are not rendered by default.

2 changes: 1 addition & 1 deletion include/hermes/VM/CardTableNC.h
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ namespace vm {

/// The card table optimizes young gen collections by restricting the amount of
/// heap belonging to the old gen that must be scanned. The card table expects
/// to be constructed inside an AlignedHeapSegment's storage, at some position
/// to be constructed inside an FixedSizeHeapSegment's storage, at some position
/// before the allocation region, and covers the extent of that storage's
/// memory.
///
Expand Down
2 changes: 1 addition & 1 deletion include/hermes/VM/GCBase.h
Original file line number Diff line number Diff line change
Expand Up @@ -226,7 +226,7 @@ enum XorPtrKeyID {
/// Return the maximum amount of bytes holdable by this heap.
/// gcheapsize_t max() const;
/// Return the total amount of bytes of storage this GC will require.
/// This will be a multiple of AlignedHeapSegment::storageSize().
/// This will be a multiple of FixedSizeHeapSegment::storageSize().
/// gcheapsize_t storageFootprint() const;
///
class GCBase {
Expand Down
50 changes: 25 additions & 25 deletions include/hermes/VM/HadesGC.h
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ class HadesGC final : public GCBase {
static constexpr uint32_t maxAllocationSizeImpl() {
// The largest allocation allowable in Hades is the max size a single
// segment supports.
return AlignedHeapSegment::maxSize();
return FixedSizeHeapSegment::maxSize();
}

static constexpr uint32_t minAllocationSizeImpl() {
Expand Down Expand Up @@ -297,7 +297,7 @@ class HadesGC final : public GCBase {

/// \return true if the pointer lives in the young generation.
bool inYoungGen(const void *p) const override {
return youngGen_.lowLim() == AlignedHeapSegment::storageStart(p);
return youngGen_.lowLim() == FixedSizeHeapSegment::storageStart(p);
}
bool inYoungGen(CompressedPointer p) const {
return p.getSegmentStart() == youngGenCP_;
Expand Down Expand Up @@ -361,34 +361,34 @@ class HadesGC final : public GCBase {
/// Call \p callback on every non-freelist cell allocated in this segment.
template <typename CallbackFunction>
static void forAllObjsInSegment(
AlignedHeapSegment &seg,
FixedSizeHeapSegment &seg,
CallbackFunction callback);
/// Only call the callback on cells without forwarding pointers.
template <typename CallbackFunction>
static void forCompactedObjsInSegment(
AlignedHeapSegment &seg,
FixedSizeHeapSegment &seg,
CallbackFunction callback,
PointerBase &base);

class OldGen final {
public:
explicit OldGen(HadesGC &gc);

std::deque<AlignedHeapSegment>::iterator begin();
std::deque<AlignedHeapSegment>::iterator end();
std::deque<AlignedHeapSegment>::const_iterator begin() const;
std::deque<AlignedHeapSegment>::const_iterator end() const;
std::deque<FixedSizeHeapSegment>::iterator begin();
std::deque<FixedSizeHeapSegment>::iterator end();
std::deque<FixedSizeHeapSegment>::const_iterator begin() const;
std::deque<FixedSizeHeapSegment>::const_iterator end() const;

size_t numSegments() const;

AlignedHeapSegment &operator[](size_t i);
FixedSizeHeapSegment &operator[](size_t i);

/// Take ownership of the given segment.
void addSegment(AlignedHeapSegment seg);
void addSegment(FixedSizeHeapSegment seg);

/// Remove the last segment from the OG.
/// \return the segment that was removed.
AlignedHeapSegment popSegment();
FixedSizeHeapSegment popSegment();

/// Indicate that OG should target having a size of \p targetSizeBytes.
void setTargetSizeBytes(size_t targetSizeBytes);
Expand Down Expand Up @@ -507,7 +507,7 @@ class HadesGC final : public GCBase {
static constexpr size_t kMinSizeForLargeBlock = 1
<< kLogMinSizeForLargeBlock;
static constexpr size_t kNumLargeFreelistBuckets =
llvh::detail::ConstantLog2<AlignedHeapSegment::maxSize()>::value -
llvh::detail::ConstantLog2<FixedSizeHeapSegment::maxSize()>::value -
kLogMinSizeForLargeBlock + 1;
static constexpr size_t kNumFreelistBuckets =
kNumSmallFreelistBuckets + kNumLargeFreelistBuckets;
Expand Down Expand Up @@ -578,7 +578,7 @@ class HadesGC final : public GCBase {

/// Use a std::deque instead of a std::vector so that references into it
/// remain valid across a push_back.
std::deque<AlignedHeapSegment> segments_;
std::deque<FixedSizeHeapSegment> segments_;

/// See \c targetSizeBytes() above.
ExponentialMovingAverage targetSizeBytes_{0, 0};
Expand Down Expand Up @@ -660,9 +660,9 @@ class HadesGC final : public GCBase {
/// Keeps the storage provider alive until after the GC is fully destructed.
std::shared_ptr<StorageProvider> provider_;

/// youngGen is a bump-pointer space, so it can re-use AlignedHeapSegment.
/// youngGen is a bump-pointer space, so it can re-use FixedSizeHeapSegment.
/// Protected by gcMutex_.
AlignedHeapSegment youngGen_;
FixedSizeHeapSegment youngGen_;
AssignableCompressedPointer youngGenCP_;

/// List of cells in YG that have finalizers. Iterate through this to clean
Expand All @@ -672,7 +672,7 @@ class HadesGC final : public GCBase {

/// Since YG collection times are the primary driver of pause times, it is
/// useful to have a knob to reduce the effective size of the YG. This number
/// is the fraction of AlignedHeapSegment::maxSize() that we should use for
/// is the fraction of FixedSizeHeapSegment::maxSize() that we should use for
/// the YG.. Note that we only set the YG size using this at the end of the
/// first real YG, since doing it for direct promotions would waste OG memory
/// without a pause time benefit.
Expand Down Expand Up @@ -772,7 +772,7 @@ class HadesGC final : public GCBase {
/// \return true if the pointer lives in the segment that is being marked or
/// evacuated for compaction.
bool contains(const void *p) const {
return start == AlignedHeapSegment::storageStart(p);
return start == FixedSizeHeapSegment::storageStart(p);
}
bool contains(CompressedPointer p) const {
return p.getSegmentStart() == startCP;
Expand All @@ -781,7 +781,7 @@ class HadesGC final : public GCBase {
/// \return true if the pointer lives in the segment that is currently being
/// evacuated for compaction.
bool evacContains(const void *p) const {
return evacStart == AlignedHeapSegment::storageStart(p);
return evacStart == FixedSizeHeapSegment::storageStart(p);
}
bool evacContains(CompressedPointer p) const {
return p.getSegmentStart() == evacStartCP;
Expand Down Expand Up @@ -829,7 +829,7 @@ class HadesGC final : public GCBase {
/// The segment being compacted. This should be removed from the OG right
/// after it is identified, and freed entirely once the compaction is
/// complete.
std::shared_ptr<AlignedHeapSegment> segment;
std::shared_ptr<FixedSizeHeapSegment> segment;
} compactee_;

/// The number of compactions this GC has performed.
Expand Down Expand Up @@ -964,7 +964,7 @@ class HadesGC final : public GCBase {
template <bool CompactionEnabled>
void scanDirtyCardsForSegment(
EvacAcceptor<CompactionEnabled> &acceptor,
AlignedHeapSegment &segment);
FixedSizeHeapSegment &segment);

/// Find all pointers from OG into the YG/compactee during a YG collection.
/// This is done quickly through use of write barriers that detect the
Expand Down Expand Up @@ -1011,19 +1011,19 @@ class HadesGC final : public GCBase {
uint64_t heapFootprint() const;

/// Accessor for the YG.
AlignedHeapSegment &youngGen() {
FixedSizeHeapSegment &youngGen() {
return youngGen_;
}
const AlignedHeapSegment &youngGen() const {
const FixedSizeHeapSegment &youngGen() const {
return youngGen_;
}

/// Create a new segment (to be used by either YG or OG).
llvh::ErrorOr<AlignedHeapSegment> createSegment();
llvh::ErrorOr<FixedSizeHeapSegment> createSegment();

/// Set a given segment as the YG segment.
/// \return the previous YG segment.
AlignedHeapSegment setYoungGen(AlignedHeapSegment seg);
FixedSizeHeapSegment setYoungGen(FixedSizeHeapSegment seg);

/// Get/set the current number of external bytes used by the YG.
size_t getYoungGenExternalBytes() const;
Expand All @@ -1048,7 +1048,7 @@ class HadesGC final : public GCBase {
/// \param extraName append this to the name of the segment. Must be
/// non-empty.
void addSegmentExtentToCrashManager(
const AlignedHeapSegment &seg,
const FixedSizeHeapSegment &seg,
const std::string &extraName);

/// Deletes a segment from the CrashManager's custom data.
Expand Down
3 changes: 2 additions & 1 deletion include/hermes/VM/HeapRuntime.h
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,8 @@ class HeapRuntime {
if (!ptrOrError)
hermes_fatal("Cannot initialize Runtime storage.", ptrOrError.getError());
static_assert(
sizeof(RT) < AlignedHeapSegment::storageSize(), "Segments too small.");
sizeof(RT) < FixedSizeHeapSegment::storageSize(),
"Segments too small.");
runtime_ = static_cast<RT *>(*ptrOrError);
}

Expand Down
7 changes: 4 additions & 3 deletions include/hermes/VM/StorageProvider.h
Original file line number Diff line number Diff line change
Expand Up @@ -43,13 +43,14 @@ class StorageProvider {
}
/// Create a new segment memory space and give this memory the name \p name.
/// \return A pointer to a block of memory that has
/// AlignedHeapSegment::storageSize() bytes, and is aligned on
/// AlignedHeapSegment::storageSize().
/// FixedSizeHeapSegment::storageSize() bytes, and is aligned on
/// FixedSizeHeapSegment::storageSize().
llvh::ErrorOr<void *> newStorage(const char *name);

/// Delete the given segment's memory space, and make it available for re-use.
/// \post Nothing in the range [storage, storage +
/// AlignedHeapSegment::storageSize()) is valid memory to be read or written.
/// FixedSizeHeapSegment::storageSize()) is valid memory to be read or
/// written.
void deleteStorage(void *storage);

/// The number of storages this provider has allocated in its lifetime.
Expand Down
6 changes: 3 additions & 3 deletions lib/VM/LimitedStorageProvider.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -14,10 +14,10 @@ namespace hermes {
namespace vm {

llvh::ErrorOr<void *> LimitedStorageProvider::newStorageImpl(const char *name) {
if (limit_ < AlignedHeapSegment::storageSize()) {
if (limit_ < FixedSizeHeapSegment::storageSize()) {
return make_error_code(OOMError::TestVMLimitReached);
}
limit_ -= AlignedHeapSegment::storageSize();
limit_ -= FixedSizeHeapSegment::storageSize();
return delegate_->newStorage(name);
}

Expand All @@ -26,7 +26,7 @@ void LimitedStorageProvider::deleteStorageImpl(void *storage) {
return;
}
delegate_->deleteStorage(storage);
limit_ += AlignedHeapSegment::storageSize();
limit_ += FixedSizeHeapSegment::storageSize();
}

} // namespace vm
Expand Down
9 changes: 7 additions & 2 deletions lib/VM/Runtime.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -159,7 +159,7 @@ std::shared_ptr<Runtime> Runtime::create(const RuntimeConfig &runtimeConfig) {
uint64_t maxHeapSize = runtimeConfig.getGCConfig().getMaxHeapSize();
// Allow some extra segments for the runtime, and as a buffer for the GC.
uint64_t providerSize = std::min<uint64_t>(
1ULL << 32, maxHeapSize + AlignedHeapSegment::storageSize() * 4);
1ULL << 32, maxHeapSize + FixedSizeHeapSegment::storageSize() * 4);
std::shared_ptr<StorageProvider> sp =
StorageProvider::contiguousVAProvider(providerSize);
auto rt = HeapRuntime<Runtime>::create(sp);
Expand Down Expand Up @@ -252,7 +252,12 @@ void RuntimeBase::registerHeapSegment(unsigned idx, void *lowLim) {
reinterpret_cast<char *>(lowLim) - (idx << AlignedHeapSegment::kLogSize);
segmentMap[idx] = bias;
#endif
assert(lowLim == AlignedHeapSegment::storageStart(lowLim) && "Precondition");
// Ideally we need to assert that lowLim is the start address of the segment,
// but the approach for computing segment start address does not work for
// JumboHeapSegment.
assert(
(uintptr_t)(lowLim) % AlignedHeapSegment::kSegmentUnitSize == 0 &&
"Segment start address should be aligned to kSegmentUnitSize");
AlignedHeapSegment::setSegmentIndexFromStart(lowLim, idx);
}

Expand Down
35 changes: 18 additions & 17 deletions lib/VM/StorageProvider.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -57,12 +57,12 @@ namespace {

bool isAligned(void *p) {
return (reinterpret_cast<uintptr_t>(p) &
(AlignedHeapSegment::storageSize() - 1)) == 0;
(FixedSizeHeapSegment::storageSize() - 1)) == 0;
}

char *alignAlloc(void *p) {
return reinterpret_cast<char *>(llvh::alignTo(
reinterpret_cast<uintptr_t>(p), AlignedHeapSegment::storageSize()));
reinterpret_cast<uintptr_t>(p), FixedSizeHeapSegment::storageSize()));
}

void *getMmapHint() {
Expand All @@ -85,9 +85,9 @@ class VMAllocateStorageProvider final : public StorageProvider {
class ContiguousVAStorageProvider final : public StorageProvider {
public:
ContiguousVAStorageProvider(size_t size)
: size_(llvh::alignTo<AlignedHeapSegment::storageSize()>(size)) {
: size_(llvh::alignTo<FixedSizeHeapSegment::storageSize()>(size)) {
auto result = oscompat::vm_reserve_aligned(
size_, AlignedHeapSegment::storageSize(), getMmapHint());
size_, FixedSizeHeapSegment::storageSize(), getMmapHint());
if (!result)
hermes_fatal("Contiguous storage allocation failed.", result.getError());
level_ = start_ = static_cast<char *>(*result);
Expand All @@ -104,26 +104,27 @@ class ContiguousVAStorageProvider final : public StorageProvider {
freelist_.pop_back();
} else if (level_ < start_ + size_) {
storage =
std::exchange(level_, level_ + AlignedHeapSegment::storageSize());
std::exchange(level_, level_ + FixedSizeHeapSegment::storageSize());
} else {
return make_error_code(OOMError::MaxStorageReached);
}
auto res = oscompat::vm_commit(storage, AlignedHeapSegment::storageSize());
auto res =
oscompat::vm_commit(storage, FixedSizeHeapSegment::storageSize());
if (res) {
oscompat::vm_name(storage, AlignedHeapSegment::storageSize(), name);
oscompat::vm_name(storage, FixedSizeHeapSegment::storageSize(), name);
}
return res;
}

void deleteStorageImpl(void *storage) override {
assert(
!llvh::alignmentAdjustment(
storage, AlignedHeapSegment::storageSize()) &&
storage, FixedSizeHeapSegment::storageSize()) &&
"Storage not aligned");
assert(storage >= start_ && storage < level_ && "Storage not in region");
oscompat::vm_name(
storage, AlignedHeapSegment::storageSize(), kFreeRegionName);
oscompat::vm_uncommit(storage, AlignedHeapSegment::storageSize());
storage, FixedSizeHeapSegment::storageSize(), kFreeRegionName);
oscompat::vm_uncommit(storage, FixedSizeHeapSegment::storageSize());
freelist_.push_back(storage);
}

Expand All @@ -149,11 +150,11 @@ class MallocStorageProvider final : public StorageProvider {

llvh::ErrorOr<void *> VMAllocateStorageProvider::newStorageImpl(
const char *name) {
assert(AlignedHeapSegment::storageSize() % oscompat::page_size() == 0);
assert(FixedSizeHeapSegment::storageSize() % oscompat::page_size() == 0);
// Allocate the space, hoping it will be the correct alignment.
auto result = oscompat::vm_allocate_aligned(
AlignedHeapSegment::storageSize(),
AlignedHeapSegment::storageSize(),
FixedSizeHeapSegment::storageSize(),
FixedSizeHeapSegment::storageSize(),
getMmapHint());
if (!result) {
return result;
Expand All @@ -162,25 +163,25 @@ llvh::ErrorOr<void *> VMAllocateStorageProvider::newStorageImpl(
assert(isAligned(mem));
(void)&isAligned;
#ifdef HERMESVM_ALLOW_HUGE_PAGES
oscompat::vm_hugepage(mem, AlignedHeapSegment::storageSize());
oscompat::vm_hugepage(mem, FixedSizeHeapSegment::storageSize());
#endif

// Name the memory region on platforms that support naming.
oscompat::vm_name(mem, AlignedHeapSegment::storageSize(), name);
oscompat::vm_name(mem, FixedSizeHeapSegment::storageSize(), name);
return mem;
}

void VMAllocateStorageProvider::deleteStorageImpl(void *storage) {
if (!storage) {
return;
}
oscompat::vm_free_aligned(storage, AlignedHeapSegment::storageSize());
oscompat::vm_free_aligned(storage, FixedSizeHeapSegment::storageSize());
}

llvh::ErrorOr<void *> MallocStorageProvider::newStorageImpl(const char *name) {
// name is unused, can't name malloc memory.
(void)name;
void *mem = checkedMalloc2(AlignedHeapSegment::storageSize(), 2u);
void *mem = checkedMalloc2(FixedSizeHeapSegment::storageSize(), 2u);
void *lowLim = alignAlloc(mem);
assert(isAligned(lowLim) && "New storage should be aligned");
lowLimToAllocHandle_[lowLim] = mem;
Expand Down
Loading
Loading