Skip to content

Commit

Permalink
Add support of allocating different sizes of storage in StorageProvid…
Browse files Browse the repository at this point in the history
…er (#1504)

Summary:
Pull Request resolved: #1504

Large segment needs to be backed by a large storage size.
StorageProvider currently always allocate fixed size of storage
determined by HERMESVM_LOG_HEAP_SEGMENT_SIZE.

This diffs adds support of allocating larger storage with below
changes:
1. `newStorage()` and `deleteStorage()` takes additional `sz` parameter.
2. For `MallocStorageProvider` and `VMAllocateStorageProvider`, simply
change the previous fixed storage size to passed in `sz`.
3. For `ContiguousVAStorageProvider`, use a BitVector to manage
allocations and deallocations. This can be improved later if we observe
fragmentations.

The support of enabling different sizes of heap segment will be added
later.

Differential Revision: D61676721
  • Loading branch information
lavenzg authored and facebook-github-bot committed Nov 20, 2024
1 parent 9bdff60 commit 3a3cca1
Show file tree
Hide file tree
Showing 8 changed files with 253 additions and 132 deletions.
9 changes: 4 additions & 5 deletions include/hermes/VM/HeapRuntime.h
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ class HeapRuntime {
public:
~HeapRuntime() {
runtime_->~RT();
sp_->deleteStorage(runtime_);
sp_->deleteStorage(runtime_, kHeapRuntimeStorageSize);
}

/// Allocate a segment and create an aliased shared_ptr that points to the
Expand All @@ -36,17 +36,16 @@ class HeapRuntime {

private:
HeapRuntime(std::shared_ptr<StorageProvider> sp) : sp_{std::move(sp)} {
auto ptrOrError = sp_->newStorage("hermes-rt");
auto ptrOrError = sp_->newStorage(kHeapRuntimeStorageSize, "hermes-rt");
if (!ptrOrError)
hermes_fatal("Cannot initialize Runtime storage.", ptrOrError.getError());
static_assert(
sizeof(RT) < FixedSizeHeapSegment::storageSize(),
"Segments too small.");
static_assert(sizeof(RT) < kHeapRuntimeStorageSize, "Segments too small.");
runtime_ = static_cast<RT *>(*ptrOrError);
}

std::shared_ptr<StorageProvider> sp_;
RT *runtime_;
static constexpr size_t kHeapRuntimeStorageSize = FixedSizeHeapSegment::kSize;
};
} // namespace vm
} // namespace hermes
Expand Down
4 changes: 2 additions & 2 deletions include/hermes/VM/LimitedStorageProvider.h
Original file line number Diff line number Diff line change
Expand Up @@ -29,9 +29,9 @@ class LimitedStorageProvider final : public StorageProvider {
: delegate_(std::move(provider)), limit_(limit) {}

protected:
llvh::ErrorOr<void *> newStorageImpl(const char *name) override;
llvh::ErrorOr<void *> newStorageImpl(size_t sz, const char *name) override;

void deleteStorageImpl(void *storage) override;
void deleteStorageImpl(void *storage, size_t sz) override;
};

} // namespace vm
Expand Down
30 changes: 15 additions & 15 deletions include/hermes/VM/StorageProvider.h
Original file line number Diff line number Diff line change
Expand Up @@ -37,21 +37,17 @@ class StorageProvider {

/// @}

/// Create a new segment memory space.
llvh::ErrorOr<void *> newStorage() {
return newStorage(nullptr);
}
/// Create a new segment memory space and give this memory the name \p name.
/// \return A pointer to a block of memory that has
/// FixedSizeHeapSegment::storageSize() bytes, and is aligned on
/// FixedSizeHeapSegment::storageSize().
llvh::ErrorOr<void *> newStorage(const char *name);
/// \return A pointer to a block of memory that has \p sz bytes, and is
/// aligned on AlignedHeapSegment::kSegmentUnitSize. Note that \p sz must
/// be non-zero and equals to a multiple of
/// AlignedHeapSegment::kSegmentUnitSize.
llvh::ErrorOr<void *> newStorage(size_t sz, const char *name = nullptr);

/// Delete the given segment's memory space, and make it available for re-use.
/// \post Nothing in the range [storage, storage +
/// FixedSizeHeapSegment::storageSize()) is valid memory to be read or
/// written.
void deleteStorage(void *storage);
/// Note that \p sz must be the same as used to allocating \p storage.
/// \post Nothing in the range [storage, storage + sz) is valid memory to be
/// read or written.
void deleteStorage(void *storage, size_t sz);

/// The number of storages this provider has allocated in its lifetime.
size_t numSucceededAllocs() const;
Expand All @@ -68,8 +64,12 @@ class StorageProvider {
size_t numLiveAllocs() const;

protected:
virtual llvh::ErrorOr<void *> newStorageImpl(const char *name) = 0;
virtual void deleteStorageImpl(void *storage) = 0;
/// \pre \p sz is non-zero and equals to a multiple of
/// AlignedHeapSegment::kSegmentUnitSize.
virtual llvh::ErrorOr<void *> newStorageImpl(size_t sz, const char *name) = 0;
/// \pre \p sz is non-zero and equals to a multiple of
/// AlignedHeapSegment::kSegmentUnitSize.
virtual void deleteStorageImpl(void *storage, size_t sz) = 0;

private:
size_t numSucceededAllocs_{0};
Expand Down
14 changes: 8 additions & 6 deletions lib/VM/LimitedStorageProvider.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -13,20 +13,22 @@
namespace hermes {
namespace vm {

llvh::ErrorOr<void *> LimitedStorageProvider::newStorageImpl(const char *name) {
llvh::ErrorOr<void *> LimitedStorageProvider::newStorageImpl(
size_t sz,
const char *name) {
if (limit_ < FixedSizeHeapSegment::storageSize()) {
return make_error_code(OOMError::TestVMLimitReached);
}
limit_ -= FixedSizeHeapSegment::storageSize();
return delegate_->newStorage(name);
limit_ -= sz;
return delegate_->newStorage(sz, name);
}

void LimitedStorageProvider::deleteStorageImpl(void *storage) {
void LimitedStorageProvider::deleteStorageImpl(void *storage, size_t sz) {
if (!storage) {
return;
}
delegate_->deleteStorage(storage);
limit_ += FixedSizeHeapSegment::storageSize();
delegate_->deleteStorage(storage, sz);
limit_ += sz;
}

} // namespace vm
Expand Down
158 changes: 106 additions & 52 deletions lib/VM/StorageProvider.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -7,11 +7,13 @@

#include "hermes/VM/StorageProvider.h"

#include "hermes/ADT/BitArray.h"
#include "hermes/Support/CheckedMalloc.h"
#include "hermes/Support/Compiler.h"
#include "hermes/Support/OSCompat.h"
#include "hermes/VM/AlignedHeapSegment.h"

#include "llvh/ADT/BitVector.h"
#include "llvh/ADT/DenseMap.h"
#include "llvh/Support/ErrorHandling.h"
#include "llvh/Support/MathExtras.h"
Expand Down Expand Up @@ -55,14 +57,17 @@ namespace vm {

namespace {

/// Minimum segment storage size. Any larger segment size should be a multiple
/// of it.
constexpr auto kSegmentUnitSize = AlignedHeapSegment::kSegmentUnitSize;

bool isAligned(void *p) {
return (reinterpret_cast<uintptr_t>(p) &
(FixedSizeHeapSegment::storageSize() - 1)) == 0;
return (reinterpret_cast<uintptr_t>(p) & (kSegmentUnitSize - 1)) == 0;
}

char *alignAlloc(void *p) {
return reinterpret_cast<char *>(llvh::alignTo(
reinterpret_cast<uintptr_t>(p), FixedSizeHeapSegment::storageSize()));
return reinterpret_cast<char *>(
llvh::alignTo(reinterpret_cast<uintptr_t>(p), kSegmentUnitSize));
}

void *getMmapHint() {
Expand All @@ -78,68 +83,108 @@ void *getMmapHint() {

class VMAllocateStorageProvider final : public StorageProvider {
public:
llvh::ErrorOr<void *> newStorageImpl(const char *name) override;
void deleteStorageImpl(void *storage) override;
llvh::ErrorOr<void *> newStorageImpl(size_t sz, const char *name) override;
void deleteStorageImpl(void *storage, size_t sz) override;
};

class ContiguousVAStorageProvider final : public StorageProvider {
public:
ContiguousVAStorageProvider(size_t size)
: size_(llvh::alignTo<FixedSizeHeapSegment::storageSize()>(size)) {
auto result = oscompat::vm_reserve_aligned(
size_, FixedSizeHeapSegment::storageSize(), getMmapHint());
: size_(llvh::alignTo<kSegmentUnitSize>(size)),
statusBits_(size_ / kSegmentUnitSize) {
auto result =
oscompat::vm_reserve_aligned(size_, kSegmentUnitSize, getMmapHint());
if (!result)
hermes_fatal("Contiguous storage allocation failed.", result.getError());
level_ = start_ = static_cast<char *>(*result);
start_ = static_cast<char *>(*result);
oscompat::vm_name(start_, size_, kFreeRegionName);
}
~ContiguousVAStorageProvider() override {
oscompat::vm_release_aligned(start_, size_);
}

llvh::ErrorOr<void *> newStorageImpl(const char *name) override {
llvh::ErrorOr<void *> newStorageImpl(size_t sz, const char *name) override {
// No available space to use.
if (LLVM_UNLIKELY(firstFreeBit_ == -1)) {
return make_error_code(OOMError::MaxStorageReached);
}

assert(
statusBits_.find_first_unset() == firstFreeBit_ &&
"firstFreeBit_ should always be the first unset bit");

void *storage;
if (!freelist_.empty()) {
storage = freelist_.back();
freelist_.pop_back();
} else if (level_ < start_ + size_) {
storage =
std::exchange(level_, level_ + FixedSizeHeapSegment::storageSize());
} else {
int numUnits = sz / kSegmentUnitSize;
int nextUsedBit = statusBits_.find_next(firstFreeBit_);
int curFreeBit = firstFreeBit_;
// Search for a large enough continuous bit range.
while (nextUsedBit != -1 && (nextUsedBit - curFreeBit < numUnits)) {
curFreeBit = statusBits_.find_next_unset(nextUsedBit);
if (curFreeBit == -1) {
return make_error_code(OOMError::MaxStorageReached);
}
nextUsedBit = statusBits_.find_next(curFreeBit);
}
// nextUsedBit could be -1, so check if there is enough space left.
if (nextUsedBit == -1 && curFreeBit + numUnits > (int)statusBits_.size()) {
return make_error_code(OOMError::MaxStorageReached);
}
auto res =
oscompat::vm_commit(storage, FixedSizeHeapSegment::storageSize());

storage = start_ + curFreeBit * kSegmentUnitSize;
statusBits_.set(curFreeBit, curFreeBit + numUnits);
// Reset it to the new leftmost free bit.
// If curFreeBit != firstFreeBit_, it means the current firstFreeBit_ is
// still unset, and is certainly leftmost free bit.
if (curFreeBit == firstFreeBit_) {
// Subtracted by 1 since curFreeBit + numUnits might be the end.
firstFreeBit_ = statusBits_.find_next_unset(curFreeBit + numUnits - 1);
}

auto res = oscompat::vm_commit(storage, sz);
if (res) {
oscompat::vm_name(storage, FixedSizeHeapSegment::storageSize(), name);
oscompat::vm_name(storage, sz, name);
}
return res;
}

void deleteStorageImpl(void *storage) override {
void deleteStorageImpl(void *storage, size_t sz) override {
assert(
!llvh::alignmentAdjustment(
storage, FixedSizeHeapSegment::storageSize()) &&
!llvh::alignmentAdjustment(storage, kSegmentUnitSize) &&
"Storage not aligned");
assert(storage >= start_ && storage < level_ && "Storage not in region");
oscompat::vm_name(
storage, FixedSizeHeapSegment::storageSize(), kFreeRegionName);
oscompat::vm_uncommit(storage, FixedSizeHeapSegment::storageSize());
freelist_.push_back(storage);
assert(
storage >= start_ && storage < start_ + size_ &&
"Storage not in region");
oscompat::vm_name(storage, sz, kFreeRegionName);
oscompat::vm_uncommit(storage, sz);
size_t numUnits = sz / kSegmentUnitSize;
// Reset all bits for this storage.
int startIndex = (static_cast<char *>(storage) - start_) / kSegmentUnitSize;
statusBits_.reset(startIndex, startIndex + numUnits);
if (startIndex < firstFreeBit_)
firstFreeBit_ = startIndex;
}

private:
static constexpr const char *kFreeRegionName = "hermes-free-heap";
size_t size_;
char *start_;
char *level_;
llvh::SmallVector<void *, 0> freelist_;
/// First free bit in \c statusBits_. We always make new allocation from the
/// leftmost free bit, based on heuristics:
/// 1. Usually the reserved address space is not full.
/// 2. Storage with size kSegmentUnitSize is allocated and deleted more
/// frequently than larger storage.
/// 3. Likely small storage will find space available from leftmost free bit,
/// leaving enough space at the right side for large storage.
int firstFreeBit_{0};
/// One bit for each kSegmentUnitSize space in the entire reserved virtual
/// address space. A bit is set if the corresponding space is used.
llvh::BitVector statusBits_;
};

class MallocStorageProvider final : public StorageProvider {
public:
llvh::ErrorOr<void *> newStorageImpl(const char *name) override;
void deleteStorageImpl(void *storage) override;
llvh::ErrorOr<void *> newStorageImpl(size_t sz, const char *name) override;
void deleteStorageImpl(void *storage, size_t sz) override;

private:
/// Map aligned starts to actual starts for freeing.
Expand All @@ -149,46 +194,48 @@ class MallocStorageProvider final : public StorageProvider {
};

llvh::ErrorOr<void *> VMAllocateStorageProvider::newStorageImpl(
size_t sz,
const char *name) {
assert(FixedSizeHeapSegment::storageSize() % oscompat::page_size() == 0);
assert(kSegmentUnitSize % oscompat::page_size() == 0);
// Allocate the space, hoping it will be the correct alignment.
auto result = oscompat::vm_allocate_aligned(
FixedSizeHeapSegment::storageSize(),
FixedSizeHeapSegment::storageSize(),
getMmapHint());
auto result =
oscompat::vm_allocate_aligned(sz, kSegmentUnitSize, getMmapHint());
if (!result) {
return result;
}
void *mem = *result;
assert(isAligned(mem));
(void)&isAligned;
#ifdef HERMESVM_ALLOW_HUGE_PAGES
oscompat::vm_hugepage(mem, FixedSizeHeapSegment::storageSize());
#endif

oscompat::vm_hugepage(mem, sz);
// Name the memory region on platforms that support naming.
oscompat::vm_name(mem, FixedSizeHeapSegment::storageSize(), name);
oscompat::vm_name(mem, sz, name);
return mem;
}

void VMAllocateStorageProvider::deleteStorageImpl(void *storage) {
void VMAllocateStorageProvider::deleteStorageImpl(void *storage, size_t sz) {
if (!storage) {
return;
}
oscompat::vm_free_aligned(storage, FixedSizeHeapSegment::storageSize());
oscompat::vm_free_aligned(storage, sz);
}

llvh::ErrorOr<void *> MallocStorageProvider::newStorageImpl(const char *name) {
llvh::ErrorOr<void *> MallocStorageProvider::newStorageImpl(
size_t sz,
const char *name) {
// name is unused, can't name malloc memory.
(void)name;
void *mem = checkedMalloc2(FixedSizeHeapSegment::storageSize(), 2u);
// Allocate size of sz + kSegmentUnitSize so that we could get an address
// aligned to kSegmentUnitSize.
void *mem = checkedMalloc2(/*count*/ 1u, sz + kSegmentUnitSize);
void *lowLim = alignAlloc(mem);
assert(isAligned(lowLim) && "New storage should be aligned");
lowLimToAllocHandle_[lowLim] = mem;
return lowLim;
}

void MallocStorageProvider::deleteStorageImpl(void *storage) {
void MallocStorageProvider::deleteStorageImpl(void *storage, size_t sz) {
// free() does not need the memory size.
(void)sz;
if (!storage) {
return;
}
Expand Down Expand Up @@ -218,8 +265,11 @@ std::unique_ptr<StorageProvider> StorageProvider::mallocProvider() {
return std::unique_ptr<StorageProvider>(new MallocStorageProvider);
}

llvh::ErrorOr<void *> StorageProvider::newStorage(const char *name) {
auto res = newStorageImpl(name);
llvh::ErrorOr<void *> StorageProvider::newStorage(size_t sz, const char *name) {
assert(
sz && (sz % kSegmentUnitSize == 0) &&
"Allocated storage size must be multiples of kSegmentUnitSize");
auto res = newStorageImpl(sz, name);

if (res) {
numSucceededAllocs_++;
Expand All @@ -230,13 +280,17 @@ llvh::ErrorOr<void *> StorageProvider::newStorage(const char *name) {
return res;
}

void StorageProvider::deleteStorage(void *storage) {
void StorageProvider::deleteStorage(void *storage, size_t sz) {
if (!storage) {
return;
}

assert(
sz && (sz % kSegmentUnitSize == 0) &&
"Allocated storage size must be multiples of kSegmentUnitSize");

numDeletedAllocs_++;
deleteStorageImpl(storage);
return deleteStorageImpl(storage, sz);
}

llvh::ErrorOr<std::pair<void *, size_t>>
Expand Down
Loading

0 comments on commit 3a3cca1

Please sign in to comment.