From c437c6e966a3fa7677e46d490c0ea6158b1b95c6 Mon Sep 17 00:00:00 2001 From: "Gang Zhao (Hermes)" Date: Mon, 21 Oct 2024 16:20:39 -0700 Subject: [PATCH 1/5] Move memory layout and common methods of AlignedHeapSegment to AlignedHeapSegmentBase (#1510) Summary: The large heap segment type should have the same storage layout as current AlignedHeapSegment, and share a few common methods. Abstract these to a base class, and make both AlignedHeapSegment and JumboHeapSegment inherit from the base type. Differential Revision: D61675022 --- include/hermes/VM/AlignedHeapSegment.h | 372 +++++++++++---------- lib/VM/gcs/AlignedHeapSegment.cpp | 32 +- unittests/VMRuntime/MarkBitArrayNCTest.cpp | 37 +- 3 files changed, 228 insertions(+), 213 deletions(-) diff --git a/include/hermes/VM/AlignedHeapSegment.h b/include/hermes/VM/AlignedHeapSegment.h index 4a7d96b197e..19901e6e88d 100644 --- a/include/hermes/VM/AlignedHeapSegment.h +++ b/include/hermes/VM/AlignedHeapSegment.h @@ -36,9 +36,9 @@ class StorageProvider; // TODO (T25527350): Debug Dump // TODO (T25527350): Heap Moving -/// An \c AlignedHeapSegment is a contiguous chunk of memory aligned to its own -/// storage size (which is a fixed power of two number of bytes). The storage -/// is further split up according to the diagram below: +/// An \c AlignedHeapSegmentBase manages a contiguous chunk of memory aligned to +/// kSegmentUnitSize. The storage is further split up according to the diagram +/// below: /// /// +----------------------------------------+ /// | (1) Card Table | @@ -52,83 +52,23 @@ class StorageProvider; /// | (End) | /// +----------------------------------------+ /// -/// The tables in (1), and (2) cover the contiguous allocation space (3) -/// into which GCCells are bump allocated. -class AlignedHeapSegment { +/// The tables in (1), and (2) cover the contiguous allocation space (3) into +/// which GCCells are bump allocated. They have fixed size computed from +/// kSegmentUnitSize. For segments with larger size (which must be multiples of +/// kSegmentUnitSize), card table allocates its internal arrays separately +/// instead. Any segment size smaller than kSegmentUnitSize is not supported. +class AlignedHeapSegmentBase { public: - /// @name Constants and utility functions for the aligned storage of \c - /// AlignedHeapSegment. - /// - /// @{ - /// The size and the alignment of the storage, in bytes. - static constexpr unsigned kLogSize = HERMESVM_LOG_HEAP_SEGMENT_SIZE; - static constexpr size_t kSize{1 << kLogSize}; - /// Mask for isolating the offset into a storage for a pointer. - static constexpr size_t kLowMask{kSize - 1}; - /// Mask for isolating the storage being pointed into by a pointer. - static constexpr size_t kHighMask{~kLowMask}; - - /// Returns the storage size, in bytes, of an \c AlignedHeapSegment. - static constexpr size_t storageSize() { - return kSize; - } - - /// Returns the pointer to the beginning of the storage containing \p ptr - /// (inclusive). Assuming such a storage exists. Note that - /// - /// storageStart(seg.hiLim()) != seg.lowLim() - /// - /// as \c seg.hiLim() is not contained in the bounds of \c seg -- it - /// is the first address not in the bounds. - static void *storageStart(const void *ptr) { - return reinterpret_cast( - reinterpret_cast(ptr) & kHighMask); - } - - /// Returns the pointer to the end of the storage containing \p ptr - /// (exclusive). Assuming such a storage exists. Note that - /// - /// storageEnd(seg.hiLim()) != seg.hiLim() - /// - /// as \c seg.hiLim() is not contained in the bounds of \c seg -- it - /// is the first address not in the bounds. - static void *storageEnd(const void *ptr) { - return reinterpret_cast(storageStart(ptr)) + kSize; - } - - /// Returns the offset in bytes to \p ptr from the start of its containing - /// storage. Assuming such a storage exists. Note that - /// - /// offset(seg.hiLim()) != seg.size() - /// - /// as \c seg.hiLim() is not contained in the bounds of \c seg -- it - /// is the first address not in the bounds. - static size_t offset(const char *ptr) { - return reinterpret_cast(ptr) & kLowMask; - } - /// @} - - /// Construct a null AlignedHeapSegment (one that does not own memory). - AlignedHeapSegment() = default; - /// \c AlignedHeapSegment is movable and assignable, but not copyable. - AlignedHeapSegment(AlignedHeapSegment &&); - AlignedHeapSegment &operator=(AlignedHeapSegment &&); - AlignedHeapSegment(const AlignedHeapSegment &) = delete; - - ~AlignedHeapSegment(); - - /// Create a AlignedHeapSegment by allocating memory with \p provider. - static llvh::ErrorOr create(StorageProvider *provider); - static llvh::ErrorOr create( - StorageProvider *provider, - const char *name); + static constexpr size_t kLogSize = HERMESVM_LOG_HEAP_SEGMENT_SIZE; + static constexpr size_t kSegmentUnitSize = (1 << kLogSize); /// Contents of the memory region managed by this segment. class Contents { public: /// The number of bits representing the total number of heap-aligned /// addresses in the segment storage. - static constexpr size_t kMarkBitArraySize = kSize >> LogHeapAlign; + static constexpr size_t kMarkBitArraySize = + kSegmentUnitSize >> LogHeapAlign; /// BitArray for marking allocation region of a segment. using MarkBitArray = BitArray; @@ -138,6 +78,7 @@ class AlignedHeapSegment { private: friend class AlignedHeapSegment; + friend class AlignedHeapSegmentBase; /// Note that because of the Contents object, the first few bytes of the /// card table are unused, we instead use them to store a small @@ -179,10 +120,11 @@ class AlignedHeapSegment { "SHSegmentInfo does not fit in available unused CardTable space."); /// The offset from the beginning of a segment of the allocatable region. - static constexpr size_t offsetOfAllocRegion{offsetof(Contents, allocRegion_)}; + static constexpr size_t kOffsetOfAllocRegion{ + offsetof(Contents, allocRegion_)}; static_assert( - isSizeHeapAligned(offsetOfAllocRegion), + isSizeHeapAligned(kOffsetOfAllocRegion), "Allocation region must start at a heap aligned offset"); static_assert( @@ -215,6 +157,178 @@ class AlignedHeapSegment { GCCell *cell_{nullptr}; }; + /// Returns the address that is the lower bound of the segment. + /// \post The returned pointer is guaranteed to be aligned to + /// kSegmentUnitSize. + char *lowLim() const { + return lowLim_; + } + + /// Returns the address at which the first allocation in this segment would + /// occur. + /// Disable UB sanitization because 'this' may be null during the tests. + char *start() const LLVM_NO_SANITIZE("undefined") { + return contents()->allocRegion_; + } + + /// Return a reference to the card table covering the memory region managed by + /// this segment. + CardTable &cardTable() const { + return contents()->cardTable_; + } + + /// Return a reference to the mark bit array covering the memory region + /// managed by this segment. + Contents::MarkBitArray &markBitArray() const { + return contents()->markBitArray_; + } + + /// Mark the given \p cell. Assumes the given address is a valid heap object. + static void setCellMarkBit(const GCCell *cell) { + auto *markBits = markBitArrayCovering(cell); + size_t ind = addressToMarkBitArrayIndex(cell); + markBits->set(ind, true); + } + + /// Return whether the given \p cell is marked. Assumes the given address is + /// a valid heap object. + static bool getCellMarkBit(const GCCell *cell) { + auto *markBits = markBitArrayCovering(cell); + size_t ind = addressToMarkBitArrayIndex(cell); + return markBits->at(ind); + } + + protected: + AlignedHeapSegmentBase() = default; + + /// Construct Contents() at the address of \p lowLim. + AlignedHeapSegmentBase(void *lowLim) + : lowLim_(reinterpret_cast(lowLim)) { + new (contents()) Contents(); + contents()->protectGuardPage(oscompat::ProtectMode::None); + } + + /// Return a pointer to the contents of the memory region managed by this + /// segment. + Contents *contents() const { + return reinterpret_cast(lowLim_); + } + + /// Given the \p lowLim of some valid segment's memory region, returns a + /// pointer to the Contents laid out in the storage, assuming it exists. + static Contents *contents(void *lowLim) { + return reinterpret_cast(lowLim); + } + + /// The start of the aligned segment. + char *lowLim_{nullptr}; + + private: + /// Return the starting address for aligned region of size kSegmentUnitSize + /// that \p cell resides in. If \c cell resides in a JumboSegment, it's the + /// only cell there, this essentially returns its segment starting address. + static char *alignedStorageStart(const GCCell *cell) { + return reinterpret_cast( + reinterpret_cast(cell) & ~(kSegmentUnitSize - 1)); + } + + /// Given a \p cell, returns a pointer to the MarkBitArray covering the + /// segment that \p cell resides in. + /// + /// \pre There exists a currently alive heap that claims to contain \c ptr. + static Contents::MarkBitArray *markBitArrayCovering(const GCCell *cell) { + auto *segStart = alignedStorageStart(cell); + return &contents(segStart)->markBitArray_; + } + + /// Translate the given address to a 0-based index in the MarkBitArray of its + /// segment. The base address is the start of the storage of this segment. For + /// JumboSegment, this should always return a constant index + /// kOffsetOfAllocRegion >> LogHeapAlign. + static size_t addressToMarkBitArrayIndex(const GCCell *cell) { + auto *cp = reinterpret_cast(cell); + auto *base = reinterpret_cast(alignedStorageStart(cell)); + return (cp - base) >> LogHeapAlign; + } +}; + +/// JumboHeapSegment has custom storage size that must be a multiple of +/// kSegmentUnitSize. Each such segment can only allocate a single object that +/// occupies the entire allocation space. Therefore, the inline MarkBitArray is +/// large enough, while the CardTable is stored separately. +class JumboHeapSegment : public AlignedHeapSegmentBase {}; + +/// AlignedHeapSegment has fixed storage size kSegmentUnitSize. Its CardTable +/// and MarkBitArray are stored inline right before the allocation space. This +/// is used for all normal object allcations in YoungGen and OldGen. +class AlignedHeapSegment : public AlignedHeapSegmentBase { + public: + /// @name Constants and utility functions for the aligned storage of \c + /// AlignedHeapSegment. + /// + /// @{ + /// The size and the alignment of the storage, in bytes. + static constexpr size_t kSize = kSegmentUnitSize; + /// Mask for isolating the offset into a storage for a pointer. + static constexpr size_t kLowMask{kSize - 1}; + /// Mask for isolating the storage being pointed into by a pointer. + static constexpr size_t kHighMask{~kLowMask}; + + /// Returns the storage size, in bytes, of an \c AlignedHeapSegment. + static constexpr size_t storageSize() { + return kSize; + } + + /// Returns the pointer to the beginning of the storage containing \p ptr + /// (inclusive). Assuming such a storage exists. Note that + /// + /// storageStart(seg.hiLim()) != seg.lowLim() + /// + /// as \c seg.hiLim() is not contained in the bounds of \c seg -- it + /// is the first address not in the bounds. + static void *storageStart(const void *ptr) { + return reinterpret_cast( + reinterpret_cast(ptr) & kHighMask); + } + + /// Returns the pointer to the end of the storage containing \p ptr + /// (exclusive). Assuming such a storage exists. Note that + /// + /// storageEnd(seg.hiLim()) != seg.hiLim() + /// + /// as \c seg.hiLim() is not contained in the bounds of \c seg -- it + /// is the first address not in the bounds. + static void *storageEnd(const void *ptr) { + return reinterpret_cast(storageStart(ptr)) + kSize; + } + + /// Returns the offset in bytes to \p ptr from the start of its containing + /// storage. Assuming such a storage exists. Note that + /// + /// offset(seg.hiLim()) != seg.size() + /// + /// as \c seg.hiLim() is not contained in the bounds of \c seg -- it + /// is the first address not in the bounds. + static size_t offset(const char *ptr) { + return reinterpret_cast(ptr) & kLowMask; + } + /// @} + + /// Construct a null AlignedHeapSegment (one that does not own memory). + AlignedHeapSegment() = default; + /// \c AlignedHeapSegment is movable and assignable, but not copyable. + AlignedHeapSegment(AlignedHeapSegment &&); + AlignedHeapSegment &operator=(AlignedHeapSegment &&); + AlignedHeapSegment(const AlignedHeapSegment &) = delete; + + ~AlignedHeapSegment(); + + /// Create a AlignedHeapSegment by allocating memory with \p provider. + static llvh::ErrorOr create(StorageProvider *provider); + static llvh::ErrorOr create( + StorageProvider *provider, + const char *name); + /// Returns the index of the segment containing \p lowLim, which is required /// to be the start of its containing segment. (This can allow extra /// efficiency, in cases where the segment start has already been computed.) @@ -238,40 +352,12 @@ class AlignedHeapSegment { /// space, returns {nullptr, false}. inline AllocResult alloc(uint32_t size); - /// Given the \p lowLim of some valid segment's memory region, returns a - /// pointer to the AlignedHeapSegment::Contents laid out in that storage, - /// assuming it exists. - inline static Contents *contents(void *lowLim); - inline static const Contents *contents(const void *lowLim); - /// Given a \p ptr into the memory region of some valid segment \c s, returns /// a pointer to the CardTable covering the segment containing the pointer. /// /// \pre There exists a currently alive heap that claims to contain \c ptr. inline static CardTable *cardTableCovering(const void *ptr); - /// Given a \p ptr into the memory region of some valid segment \c s, returns - /// a pointer to the MarkBitArray covering the segment containing the - /// pointer. - /// - /// \pre There exists a currently alive heap that claims to contain \c ptr. - inline static Contents::MarkBitArray *markBitArrayCovering(const void *ptr); - - /// Translate the given address to a 0-based index in the MarkBitArray of its - /// segment. The base address is the start of the storage of this segment. - static size_t addressToMarkBitArrayIndex(const void *ptr) { - auto *cp = reinterpret_cast(ptr); - auto *base = reinterpret_cast(storageStart(cp)); - return (cp - base) >> LogHeapAlign; - } - - /// Mark the given \p cell. Assumes the given address is a valid heap object. - inline static void setCellMarkBit(const GCCell *cell); - - /// Return whether the given \p cell is marked. Assumes the given address is - /// a valid heap object. - inline static bool getCellMarkBit(const GCCell *cell); - /// Find the head of the first cell that extends into the card at index /// \p cardIdx. /// \return A cell such that @@ -294,23 +380,11 @@ class AlignedHeapSegment { /// The number of bytes in the segment that are available for allocation. inline size_t available() const; - /// Returns the address that is the lower bound of the segment. - /// \post The returned pointer is guaranteed to be aligned to a segment - /// boundary. - char *lowLim() const { - return lowLim_; - } - /// Returns the address that is the upper bound of the segment. char *hiLim() const { return lowLim() + storageSize(); } - /// Returns the address at which the first allocation in this segment would - /// occur. - /// Disable UB sanitization because 'this' may be null during the tests. - inline char *start() const LLVM_NO_SANITIZE("undefined"); - /// Returns the first address after the region in which allocations can occur, /// taking external memory credits into a account (they decrease the effective /// end). @@ -340,15 +414,6 @@ class AlignedHeapSegment { /// AlignedHeapSegment. inline static bool containedInSame(const void *a, const void *b); - /// Return a reference to the card table covering the memory region managed by - /// this segment. - /// Disable sanitization because 'this' may be null in the tests. - inline CardTable &cardTable() const LLVM_NO_SANITIZE("null"); - - /// Return a reference to the mark bit array covering the memory region - /// managed by this segment. - inline Contents::MarkBitArray &markBitArray() const; - explicit operator bool() const { return lowLim(); } @@ -390,20 +455,11 @@ class AlignedHeapSegment { /// Set the contents of the segment to a dead value. void clear(); - /// Set the given range [start, end) to a dead value. - static void clear(char *start, char *end); /// Checks that dead values are present in the [start, end) range. static void checkUnwritten(char *start, char *end); #endif - protected: - /// Return a pointer to the contents of the memory region managed by this - /// segment. - inline Contents *contents() const; - - /// The start of the aligned segment. - char *lowLim_{nullptr}; - + private: /// The provider that created this segment. It will be used to properly /// destroy this. StorageProvider *provider_{nullptr}; @@ -419,7 +475,6 @@ class AlignedHeapSegment { /// and swap idiom. friend void swap(AlignedHeapSegment &a, AlignedHeapSegment &b); - private: AlignedHeapSegment(StorageProvider *provider, void *lowLim); }; @@ -459,26 +514,6 @@ AllocResult AlignedHeapSegment::alloc(uint32_t size) { return {cell, true}; } -/*static*/ -AlignedHeapSegment::Contents::MarkBitArray * -AlignedHeapSegment::markBitArrayCovering(const void *ptr) { - return &contents(storageStart(ptr))->markBitArray_; -} - -/*static*/ -void AlignedHeapSegment::setCellMarkBit(const GCCell *cell) { - auto *markBits = markBitArrayCovering(cell); - size_t ind = addressToMarkBitArrayIndex(cell); - markBits->set(ind, true); -} - -/*static*/ -bool AlignedHeapSegment::getCellMarkBit(const GCCell *cell) { - auto *markBits = markBitArrayCovering(cell); - size_t ind = addressToMarkBitArrayIndex(cell); - return markBits->at(ind); -} - GCCell *AlignedHeapSegment::getFirstCellHead(size_t cardIdx) { CardTable &cards = cardTable(); GCCell *cell = cards.firstObjForCard(cardIdx); @@ -499,16 +534,6 @@ void AlignedHeapSegment::setCellHead(const GCCell *cellStart, const size_t sz) { } } -/* static */ AlignedHeapSegment::Contents *AlignedHeapSegment::contents( - void *lowLim) { - return reinterpret_cast(lowLim); -} - -/* static */ const AlignedHeapSegment::Contents *AlignedHeapSegment::contents( - const void *lowLim) { - return reinterpret_cast(lowLim); -} - /* static */ CardTable *AlignedHeapSegment::cardTableCovering(const void *ptr) { return &AlignedHeapSegment::contents(storageStart(ptr))->cardTable_; } @@ -529,10 +554,6 @@ size_t AlignedHeapSegment::available() const { return effectiveEnd() - level(); } -char *AlignedHeapSegment::start() const { - return contents()->allocRegion_; -} - char *AlignedHeapSegment::effectiveEnd() const { return effectiveEnd_; } @@ -558,19 +579,6 @@ bool AlignedHeapSegment::containedInSame(const void *a, const void *b) { storageSize(); } -CardTable &AlignedHeapSegment::cardTable() const { - return contents()->cardTable_; -} - -AlignedHeapSegment::Contents::MarkBitArray &AlignedHeapSegment::markBitArray() - const { - return contents()->markBitArray_; -} - -AlignedHeapSegment::Contents *AlignedHeapSegment::contents() const { - return contents(lowLim()); -} - } // namespace vm } // namespace hermes diff --git a/lib/VM/gcs/AlignedHeapSegment.cpp b/lib/VM/gcs/AlignedHeapSegment.cpp index 1509168194d..24d360f78c7 100644 --- a/lib/VM/gcs/AlignedHeapSegment.cpp +++ b/lib/VM/gcs/AlignedHeapSegment.cpp @@ -22,6 +22,17 @@ namespace hermes { namespace vm { +#ifndef NDEBUG +/// Set the given range [start, end) to a dead value. +static void clearRange(char *start, char *end) { +#if LLVM_ADDRESS_SANITIZER_BUILD + __asan_poison_memory_region(start, end - start); +#else + std::memset(start, kInvalidHeapValue, end - start); +#endif +} +#endif + void AlignedHeapSegment::Contents::protectGuardPage( oscompat::ProtectMode mode) { char *begin = &paddedGuardPage_[kGuardPagePadding]; @@ -45,11 +56,12 @@ llvh::ErrorOr AlignedHeapSegment::create( if (!result) { return result.getError(); } + assert(*result && "Heap segment storage allocation failure"); return AlignedHeapSegment{provider, *result}; } AlignedHeapSegment::AlignedHeapSegment(StorageProvider *provider, void *lowLim) - : lowLim_(static_cast(lowLim)), provider_(provider) { + : AlignedHeapSegmentBase(lowLim), provider_(provider) { assert( storageStart(lowLim_) == lowLim_ && "The lower limit of this storage must be aligned"); @@ -58,13 +70,9 @@ AlignedHeapSegment::AlignedHeapSegment(StorageProvider *provider, void *lowLim) assert( reinterpret_cast(hiLim()) % oscompat::page_size() == 0 && "The higher limit must be page aligned"); - if (*this) { - new (contents()) Contents(); - contents()->protectGuardPage(oscompat::ProtectMode::None); #ifndef NDEBUG - clear(); + clear(); #endif - } } void swap(AlignedHeapSegment &a, AlignedHeapSegment &b) { @@ -120,7 +128,7 @@ void AlignedHeapSegment::setLevel(char *lvl) { assert(dbgContainsLevel(lvl)); if (lvl < level_) { #ifndef NDEBUG - clear(lvl, level_); + clearRange(lvl, level_); #else if (MU == AdviseUnused::Yes) { const size_t PS = oscompat::page_size(); @@ -172,15 +180,7 @@ bool AlignedHeapSegment::validPointer(const void *p) const { } void AlignedHeapSegment::clear() { - clear(start(), end()); -} - -/* static */ void AlignedHeapSegment::clear(char *start, char *end) { -#if LLVM_ADDRESS_SANITIZER_BUILD - __asan_poison_memory_region(start, end - start); -#else - std::memset(start, kInvalidHeapValue, end - start); -#endif + clearRange(start(), end()); } /* static */ void AlignedHeapSegment::checkUnwritten(char *start, char *end) { diff --git a/unittests/VMRuntime/MarkBitArrayNCTest.cpp b/unittests/VMRuntime/MarkBitArrayNCTest.cpp index 455c1996fb1..a46536155d2 100644 --- a/unittests/VMRuntime/MarkBitArrayNCTest.cpp +++ b/unittests/VMRuntime/MarkBitArrayNCTest.cpp @@ -27,6 +27,13 @@ namespace { struct MarkBitArrayTest : public ::testing::Test { MarkBitArrayTest(); + static size_t addressToMarkBitArrayIndex(const void *addr) { + auto *cp = reinterpret_cast(addr); + auto *base = + reinterpret_cast(AlignedHeapSegment::storageStart(addr)); + return (cp - base) >> LogHeapAlign; + } + protected: std::unique_ptr provider; AlignedHeapSegment seg; @@ -66,7 +73,7 @@ TEST_F(MarkBitArrayTest, AddressToIndex) { char *addr = addrs.at(i); size_t ind = indices.at(i); - EXPECT_EQ(ind, AlignedHeapSegment::addressToMarkBitArrayIndex(addr)) + EXPECT_EQ(ind, addressToMarkBitArrayIndex(addr)) << "0x" << std::hex << (void *)addr << " -> " << ind; char *toAddr = seg.lowLim() + (ind << LogHeapAlign); EXPECT_EQ(toAddr, addr) @@ -78,7 +85,7 @@ TEST_F(MarkBitArrayTest, MarkGet) { const size_t lastIx = mba.size() - 1; for (char *addr : addrs) { - size_t ind = AlignedHeapSegment::addressToMarkBitArrayIndex(addr); + size_t ind = addressToMarkBitArrayIndex(addr); EXPECT_FALSE(ind > 0 && mba.at(ind - 1)) << "initial " << ind << " - 1"; EXPECT_FALSE(mba.at(ind)) << "initial " << ind; @@ -97,37 +104,37 @@ TEST_F(MarkBitArrayTest, MarkGet) { TEST_F(MarkBitArrayTest, Initial) { for (char *addr : addrs) { - size_t ind = AlignedHeapSegment::addressToMarkBitArrayIndex(addr); + size_t ind = addressToMarkBitArrayIndex(addr); EXPECT_FALSE(mba.at(ind)); } } TEST_F(MarkBitArrayTest, Clear) { for (char *addr : addrs) { - size_t ind = AlignedHeapSegment::addressToMarkBitArrayIndex(addr); + size_t ind = addressToMarkBitArrayIndex(addr); ASSERT_FALSE(mba.at(ind)); } for (char *addr : addrs) { - size_t ind = AlignedHeapSegment::addressToMarkBitArrayIndex(addr); + size_t ind = addressToMarkBitArrayIndex(addr); mba.set(ind, true); } for (char *addr : addrs) { - size_t ind = AlignedHeapSegment::addressToMarkBitArrayIndex(addr); + size_t ind = addressToMarkBitArrayIndex(addr); ASSERT_TRUE(mba.at(ind)); } mba.reset(); for (char *addr : addrs) { - size_t ind = AlignedHeapSegment::addressToMarkBitArrayIndex(addr); + size_t ind = addressToMarkBitArrayIndex(addr); EXPECT_FALSE(mba.at(ind)); } } TEST_F(MarkBitArrayTest, NextMarkedBitImmediate) { char *addr = addrs.at(addrs.size() / 2); - size_t ind = AlignedHeapSegment::addressToMarkBitArrayIndex(addr); + size_t ind = addressToMarkBitArrayIndex(addr); mba.set(ind, true); EXPECT_EQ(ind, mba.findNextSetBitFrom(ind)); @@ -140,7 +147,7 @@ TEST_F(MarkBitArrayTest, NextMarkedBit) { EXPECT_EQ(FOUND_NONE, mba.findNextSetBitFrom(0)); std::queue indices; for (char *addr : addrs) { - auto ind = AlignedHeapSegment::addressToMarkBitArrayIndex(addr); + auto ind = addressToMarkBitArrayIndex(addr); mba.set(ind, true); indices.push(ind); } @@ -154,7 +161,7 @@ TEST_F(MarkBitArrayTest, NextMarkedBit) { TEST_F(MarkBitArrayTest, NextUnmarkedBitImmediate) { char *addr = addrs.at(addrs.size() / 2); - size_t ind = AlignedHeapSegment::addressToMarkBitArrayIndex(addr); + size_t ind = addressToMarkBitArrayIndex(addr); mba.set(); mba.set(ind, false); EXPECT_EQ(ind, mba.findNextZeroBitFrom(ind)); @@ -167,7 +174,7 @@ TEST_F(MarkBitArrayTest, NextUnmarkedBit) { EXPECT_EQ(FOUND_NONE, mba.findNextZeroBitFrom(0)); std::queue indices; for (char *addr : addrs) { - auto ind = AlignedHeapSegment::addressToMarkBitArrayIndex(addr); + auto ind = addressToMarkBitArrayIndex(addr); mba.set(ind, false); indices.push(ind); } @@ -182,7 +189,7 @@ TEST_F(MarkBitArrayTest, NextUnmarkedBit) { TEST_F(MarkBitArrayTest, PrevMarkedBitImmediate) { char *addr = addrs.at(addrs.size() / 2); - size_t ind = AlignedHeapSegment::addressToMarkBitArrayIndex(addr); + size_t ind = addressToMarkBitArrayIndex(addr); mba.set(ind, true); EXPECT_EQ(ind, mba.findPrevSetBitFrom(ind + 1)); } @@ -196,7 +203,7 @@ TEST_F(MarkBitArrayTest, PrevMarkedBit) { std::queue indices; size_t addrIdx = addrs.size(); while (addrIdx-- > 0) { - auto ind = AlignedHeapSegment::addressToMarkBitArrayIndex(addrs[addrIdx]); + auto ind = addressToMarkBitArrayIndex(addrs[addrIdx]); mba.set(ind, true); indices.push(ind); } @@ -209,7 +216,7 @@ TEST_F(MarkBitArrayTest, PrevMarkedBit) { TEST_F(MarkBitArrayTest, PrevUnmarkedBitImmediate) { char *addr = addrs.at(addrs.size() / 2); - size_t ind = AlignedHeapSegment::addressToMarkBitArrayIndex(addr); + size_t ind = addressToMarkBitArrayIndex(addr); mba.set(); mba.set(ind, false); EXPECT_EQ(ind, mba.findPrevZeroBitFrom(ind + 1)); @@ -225,7 +232,7 @@ TEST_F(MarkBitArrayTest, PrevUnmarkedBit) { std::queue indices; size_t addrIdx = addrs.size(); while (addrIdx-- > 0) { - auto ind = AlignedHeapSegment::addressToMarkBitArrayIndex(addrs[addrIdx]); + auto ind = addressToMarkBitArrayIndex(addrs[addrIdx]); mba.set(ind, false); indices.push(ind); } From c0d290043361e220709b7f69c5671e6c1f4f3b51 Mon Sep 17 00:00:00 2001 From: "Gang Zhao (Hermes)" Date: Mon, 21 Oct 2024 16:20:39 -0700 Subject: [PATCH 2/5] Add support of allocating different sizes of storage in StorageProvider (#1504) Summary: Large segment needs to be backed by a large storage size. StorageProvider currently always allocate fixed size of storage determined by HERMESVM_LOG_HEAP_SEGMENT_SIZE. This diffs adds support of allocating larger storage with below changes: 1. `newStorage()` and `deleteStoragr()` takes addition `sz` parameter. 2. For `MallocStorageProvider` and `VMAllocateStorageProvider`, simply change the previous fixed storage size to passed in `sz`. 3. For `ContiguousVAStorageProvider`, use a BitVector to manage allocations and deallocations. This can be improved later if we observe fragmentations. The support of enabling different sizes of heap segment will be added later. Differential Revision: D61676721 --- include/hermes/VM/HeapRuntime.h | 9 +- include/hermes/VM/LimitedStorageProvider.h | 4 +- include/hermes/VM/StorageProvider.h | 27 +-- lib/VM/LimitedStorageProvider.cpp | 14 +- lib/VM/StorageProvider.cpp | 147 ++++++++++------ lib/VM/gcs/AlignedHeapSegment.cpp | 4 +- .../VMRuntime/AlignedHeapSegmentTest.cpp | 5 +- unittests/VMRuntime/StorageProviderTest.cpp | 158 ++++++++++++------ 8 files changed, 242 insertions(+), 126 deletions(-) diff --git a/include/hermes/VM/HeapRuntime.h b/include/hermes/VM/HeapRuntime.h index c87aed40d76..a6fbbe55a9d 100644 --- a/include/hermes/VM/HeapRuntime.h +++ b/include/hermes/VM/HeapRuntime.h @@ -22,7 +22,7 @@ class HeapRuntime { public: ~HeapRuntime() { runtime_->~RT(); - sp_->deleteStorage(runtime_); + sp_->deleteStorage(runtime_, kHeapRuntimeStorageSize); } /// Allocate a segment and create an aliased shared_ptr that points to the @@ -36,16 +36,17 @@ class HeapRuntime { private: HeapRuntime(std::shared_ptr sp) : sp_{std::move(sp)} { - auto ptrOrError = sp_->newStorage("hermes-rt"); + auto ptrOrError = sp_->newStorage("hermes-rt", kHeapRuntimeStorageSize); if (!ptrOrError) hermes_fatal("Cannot initialize Runtime storage.", ptrOrError.getError()); - static_assert( - sizeof(RT) < AlignedHeapSegment::storageSize(), "Segments too small."); + static_assert(sizeof(RT) < kHeapRuntimeStorageSize, "Segments too small."); runtime_ = static_cast(*ptrOrError); } std::shared_ptr sp_; RT *runtime_; + static constexpr size_t kHeapRuntimeStorageSize = + AlignedHeapSegment::storageSize(); }; } // namespace vm } // namespace hermes diff --git a/include/hermes/VM/LimitedStorageProvider.h b/include/hermes/VM/LimitedStorageProvider.h index a060435027b..44d7c8adf39 100644 --- a/include/hermes/VM/LimitedStorageProvider.h +++ b/include/hermes/VM/LimitedStorageProvider.h @@ -29,9 +29,9 @@ class LimitedStorageProvider final : public StorageProvider { : delegate_(std::move(provider)), limit_(limit) {} protected: - llvh::ErrorOr newStorageImpl(const char *name) override; + llvh::ErrorOr newStorageImpl(const char *name, size_t sz) override; - void deleteStorageImpl(void *storage) override; + void deleteStorageImpl(void *storage, size_t sz) override; }; } // namespace vm diff --git a/include/hermes/VM/StorageProvider.h b/include/hermes/VM/StorageProvider.h index 41d87f82ac5..be3887c755a 100644 --- a/include/hermes/VM/StorageProvider.h +++ b/include/hermes/VM/StorageProvider.h @@ -37,20 +37,21 @@ class StorageProvider { /// @} - /// Create a new segment memory space. - llvh::ErrorOr newStorage() { - return newStorage(nullptr); + /// Create a new segment memory space with given size \p sz. + llvh::ErrorOr newStorage(size_t sz) { + return newStorage(nullptr, sz); } - /// Create a new segment memory space and give this memory the name \p name. - /// \return A pointer to a block of memory that has - /// AlignedHeapSegment::storageSize() bytes, and is aligned on - /// AlignedHeapSegment::storageSize(). - llvh::ErrorOr newStorage(const char *name); + /// \return A pointer to a block of memory that has \p sz bytes, and is + /// aligned on AlignedHeapSegmentBase::kSegmentUnitSize. Note that \p sz + /// must be equals to or a multiple of + /// AlignedHeapSegmentBase::kSegmentUnitSize. + llvh::ErrorOr newStorage(const char *name, size_t sz); /// Delete the given segment's memory space, and make it available for re-use. - /// \post Nothing in the range [storage, storage + - /// AlignedHeapSegment::storageSize()) is valid memory to be read or written. - void deleteStorage(void *storage); + /// Note that \p sz must be the same as used to allocating \p storage. + /// \post Nothing in the range [storage, storage + sz) is valid memory to be + /// read or written. + void deleteStorage(void *storage, size_t sz); /// The number of storages this provider has allocated in its lifetime. size_t numSucceededAllocs() const; @@ -67,8 +68,8 @@ class StorageProvider { size_t numLiveAllocs() const; protected: - virtual llvh::ErrorOr newStorageImpl(const char *name) = 0; - virtual void deleteStorageImpl(void *storage) = 0; + virtual llvh::ErrorOr newStorageImpl(const char *name, size_t sz) = 0; + virtual void deleteStorageImpl(void *storage, size_t sz) = 0; private: size_t numSucceededAllocs_{0}; diff --git a/lib/VM/LimitedStorageProvider.cpp b/lib/VM/LimitedStorageProvider.cpp index 90e3e6138b5..e9e0146a809 100644 --- a/lib/VM/LimitedStorageProvider.cpp +++ b/lib/VM/LimitedStorageProvider.cpp @@ -13,20 +13,22 @@ namespace hermes { namespace vm { -llvh::ErrorOr LimitedStorageProvider::newStorageImpl(const char *name) { +llvh::ErrorOr LimitedStorageProvider::newStorageImpl( + const char *name, + size_t sz) { if (limit_ < AlignedHeapSegment::storageSize()) { return make_error_code(OOMError::TestVMLimitReached); } - limit_ -= AlignedHeapSegment::storageSize(); - return delegate_->newStorage(name); + limit_ -= sz; + return delegate_->newStorage(name, sz); } -void LimitedStorageProvider::deleteStorageImpl(void *storage) { +void LimitedStorageProvider::deleteStorageImpl(void *storage, size_t sz) { if (!storage) { return; } - delegate_->deleteStorage(storage); - limit_ += AlignedHeapSegment::storageSize(); + delegate_->deleteStorage(storage, sz); + limit_ += sz; } } // namespace vm diff --git a/lib/VM/StorageProvider.cpp b/lib/VM/StorageProvider.cpp index 67fed1eb8d3..b79cab1325a 100644 --- a/lib/VM/StorageProvider.cpp +++ b/lib/VM/StorageProvider.cpp @@ -7,11 +7,13 @@ #include "hermes/VM/StorageProvider.h" +#include "hermes/ADT/BitArray.h" #include "hermes/Support/CheckedMalloc.h" #include "hermes/Support/Compiler.h" #include "hermes/Support/OSCompat.h" #include "hermes/VM/AlignedHeapSegment.h" +#include "llvh/ADT/BitVector.h" #include "llvh/ADT/DenseMap.h" #include "llvh/Support/ErrorHandling.h" #include "llvh/Support/MathExtras.h" @@ -55,14 +57,18 @@ namespace vm { namespace { +/// Minimum segment storage size. Any larger segment size should be a multiple +/// of it. +static constexpr size_t kSegmentUnitSize = + AlignedHeapSegmentBase::kSegmentUnitSize; + bool isAligned(void *p) { - return (reinterpret_cast(p) & - (AlignedHeapSegment::storageSize() - 1)) == 0; + return (reinterpret_cast(p) & (kSegmentUnitSize - 1)) == 0; } char *alignAlloc(void *p) { - return reinterpret_cast(llvh::alignTo( - reinterpret_cast(p), AlignedHeapSegment::storageSize())); + return reinterpret_cast( + llvh::alignTo(reinterpret_cast(p), kSegmentUnitSize)); } void *getMmapHint() { @@ -78,67 +84,104 @@ void *getMmapHint() { class VMAllocateStorageProvider final : public StorageProvider { public: - llvh::ErrorOr newStorageImpl(const char *name) override; - void deleteStorageImpl(void *storage) override; + llvh::ErrorOr newStorageImpl(const char *name, size_t sz) override; + void deleteStorageImpl(void *storage, size_t sz) override; }; class ContiguousVAStorageProvider final : public StorageProvider { public: ContiguousVAStorageProvider(size_t size) - : size_(llvh::alignTo(size)) { - auto result = oscompat::vm_reserve_aligned( - size_, AlignedHeapSegment::storageSize(), getMmapHint()); + : size_(llvh::alignTo(size)), + statusBits_(size_ / kSegmentUnitSize) { + auto result = + oscompat::vm_reserve_aligned(size_, kSegmentUnitSize, getMmapHint()); if (!result) hermes_fatal("Contiguous storage allocation failed.", result.getError()); - level_ = start_ = static_cast(*result); + start_ = static_cast(*result); oscompat::vm_name(start_, size_, kFreeRegionName); } ~ContiguousVAStorageProvider() override { oscompat::vm_release_aligned(start_, size_); } - llvh::ErrorOr newStorageImpl(const char *name) override { + private: + llvh::ErrorOr newStorageImpl(const char *name, size_t sz) override { + // No available space to use. + if (LLVM_UNLIKELY(firstFreeBit_ == -1)) { + return make_error_code(OOMError::MaxStorageReached); + } + + assert( + statusBits_.find_first_unset() == firstFreeBit_ && + "firstFreeBit_ should always be the first unset bit"); + void *storage; - if (!freelist_.empty()) { - storage = freelist_.back(); - freelist_.pop_back(); - } else if (level_ < start_ + size_) { - storage = - std::exchange(level_, level_ + AlignedHeapSegment::storageSize()); - } else { + int numUnits = sz / kSegmentUnitSize; + int nextUsedBit = statusBits_.find_next(firstFreeBit_); + int curFreeBit = firstFreeBit_; + // Search for a large enough continuous bit range. + while (nextUsedBit != -1 && (nextUsedBit - curFreeBit < numUnits)) { + curFreeBit = statusBits_.find_next_unset(nextUsedBit); + if (curFreeBit == -1) { + return make_error_code(OOMError::MaxStorageReached); + } + nextUsedBit = statusBits_.find_next(curFreeBit); + } + // nextUsedBit could be -1, so check if there is enough space left. + if (nextUsedBit == -1 && curFreeBit + numUnits > (int)statusBits_.size()) { return make_error_code(OOMError::MaxStorageReached); } - auto res = oscompat::vm_commit(storage, AlignedHeapSegment::storageSize()); + + storage = start_ + curFreeBit * kSegmentUnitSize; + statusBits_.set(curFreeBit, curFreeBit + numUnits); + // Reset it to the new leftmost free bit. + firstFreeBit_ = statusBits_.find_first_unset(); + + auto res = oscompat::vm_commit(storage, sz); if (res) { - oscompat::vm_name(storage, AlignedHeapSegment::storageSize(), name); + oscompat::vm_name(storage, sz, name); } return res; } - void deleteStorageImpl(void *storage) override { + void deleteStorageImpl(void *storage, size_t sz) override { assert( - !llvh::alignmentAdjustment( - storage, AlignedHeapSegment::storageSize()) && + !llvh::alignmentAdjustment(storage, kSegmentUnitSize) && "Storage not aligned"); - assert(storage >= start_ && storage < level_ && "Storage not in region"); - oscompat::vm_name( - storage, AlignedHeapSegment::storageSize(), kFreeRegionName); - oscompat::vm_uncommit(storage, AlignedHeapSegment::storageSize()); - freelist_.push_back(storage); + assert( + storage >= start_ && storage < start_ + size_ && + "Storage not in region"); + size_t numUnits = sz / kSegmentUnitSize; + oscompat::vm_name(storage, sz, kFreeRegionName); + oscompat::vm_uncommit(storage, sz); + // Reset all bits for this storage. + int startIndex = (static_cast(storage) - start_) / kSegmentUnitSize; + statusBits_.reset(startIndex, startIndex + numUnits); + if (startIndex < firstFreeBit_) + firstFreeBit_ = startIndex; } private: static constexpr const char *kFreeRegionName = "hermes-free-heap"; size_t size_; char *start_; - char *level_; - llvh::SmallVector freelist_; + /// First free bit in \c statusBits_. We always make new allocation from the + /// leftmost free bit, based on heuristics: + /// 1. Usually the reserved address space is not full. + /// 2. Storage with size kSegmentUnitSize is allocated and deleted more + /// frequently than larger storage. + /// 3. Likely small storage will find space available from leftmost free bit, + /// leaving enough space at the right side for large storage. + int firstFreeBit_{0}; + /// One bit for each kSegmentUnitSize space in the entire reserved virtual + /// address space. A bit is set if the corresponding space is used. + llvh::BitVector statusBits_; }; class MallocStorageProvider final : public StorageProvider { public: - llvh::ErrorOr newStorageImpl(const char *name) override; - void deleteStorageImpl(void *storage) override; + llvh::ErrorOr newStorageImpl(const char *name, size_t sz) override; + void deleteStorageImpl(void *storage, size_t sz) override; private: /// Map aligned starts to actual starts for freeing. @@ -148,13 +191,12 @@ class MallocStorageProvider final : public StorageProvider { }; llvh::ErrorOr VMAllocateStorageProvider::newStorageImpl( - const char *name) { - assert(AlignedHeapSegment::storageSize() % oscompat::page_size() == 0); + const char *name, + size_t sz) { + assert(kSegmentUnitSize % oscompat::page_size() == 0); // Allocate the space, hoping it will be the correct alignment. - auto result = oscompat::vm_allocate_aligned( - AlignedHeapSegment::storageSize(), - AlignedHeapSegment::storageSize(), - getMmapHint()); + auto result = + oscompat::vm_allocate_aligned(sz, kSegmentUnitSize, getMmapHint()); if (!result) { return result; } @@ -162,32 +204,36 @@ llvh::ErrorOr VMAllocateStorageProvider::newStorageImpl( assert(isAligned(mem)); (void)&isAligned; #ifdef HERMESVM_ALLOW_HUGE_PAGES - oscompat::vm_hugepage(mem, AlignedHeapSegment::storageSize()); + oscompat::vm_hugepage(mem, sz); #endif // Name the memory region on platforms that support naming. - oscompat::vm_name(mem, AlignedHeapSegment::storageSize(), name); + oscompat::vm_name(mem, sz, name); return mem; } -void VMAllocateStorageProvider::deleteStorageImpl(void *storage) { +void VMAllocateStorageProvider::deleteStorageImpl(void *storage, size_t sz) { if (!storage) { return; } - oscompat::vm_free_aligned(storage, AlignedHeapSegment::storageSize()); + oscompat::vm_free_aligned(storage, sz); } -llvh::ErrorOr MallocStorageProvider::newStorageImpl(const char *name) { +llvh::ErrorOr MallocStorageProvider::newStorageImpl( + const char *name, + size_t sz) { // name is unused, can't name malloc memory. (void)name; - void *mem = checkedMalloc2(AlignedHeapSegment::storageSize(), 2u); + void *mem = checkedMalloc2(2u, sz); void *lowLim = alignAlloc(mem); assert(isAligned(lowLim) && "New storage should be aligned"); lowLimToAllocHandle_[lowLim] = mem; return lowLim; } -void MallocStorageProvider::deleteStorageImpl(void *storage) { +void MallocStorageProvider::deleteStorageImpl(void *storage, size_t sz) { + // free() does not need the memory size. + (void)sz; if (!storage) { return; } @@ -217,8 +263,11 @@ std::unique_ptr StorageProvider::mallocProvider() { return std::unique_ptr(new MallocStorageProvider); } -llvh::ErrorOr StorageProvider::newStorage(const char *name) { - auto res = newStorageImpl(name); +llvh::ErrorOr StorageProvider::newStorage(const char *name, size_t sz) { + assert( + sz && (sz % kSegmentUnitSize == 0) && + "Allocated storage size must be multiples of kSegmentUnitSize"); + auto res = newStorageImpl(name, sz); if (res) { numSucceededAllocs_++; @@ -229,13 +278,13 @@ llvh::ErrorOr StorageProvider::newStorage(const char *name) { return res; } -void StorageProvider::deleteStorage(void *storage) { +void StorageProvider::deleteStorage(void *storage, size_t sz) { if (!storage) { return; } numDeletedAllocs_++; - deleteStorageImpl(storage); + return deleteStorageImpl(storage, sz); } llvh::ErrorOr> diff --git a/lib/VM/gcs/AlignedHeapSegment.cpp b/lib/VM/gcs/AlignedHeapSegment.cpp index 24d360f78c7..62b0c416904 100644 --- a/lib/VM/gcs/AlignedHeapSegment.cpp +++ b/lib/VM/gcs/AlignedHeapSegment.cpp @@ -52,7 +52,7 @@ llvh::ErrorOr AlignedHeapSegment::create( llvh::ErrorOr AlignedHeapSegment::create( StorageProvider *provider, const char *name) { - auto result = provider->newStorage(name); + auto result = provider->newStorage(name, storageSize()); if (!result) { return result.getError(); } @@ -103,7 +103,7 @@ AlignedHeapSegment::~AlignedHeapSegment() { __asan_unpoison_memory_region(start(), end() - start()); if (provider_) { - provider_->deleteStorage(lowLim_); + provider_->deleteStorage(lowLim_, storageSize()); } } diff --git a/unittests/VMRuntime/AlignedHeapSegmentTest.cpp b/unittests/VMRuntime/AlignedHeapSegmentTest.cpp index 6362b80d6f1..29a34106d85 100644 --- a/unittests/VMRuntime/AlignedHeapSegmentTest.cpp +++ b/unittests/VMRuntime/AlignedHeapSegmentTest.cpp @@ -115,7 +115,8 @@ TEST_F(AlignedHeapSegmentTest, AdviseUnused) { // We can't use the storage of s here since it contains guard pages and also // s.start() may not align to actual page boundary. - void *storage = provider_->newStorage().get(); + void *storage = + provider_->newStorage(AlignedHeapSegment::storageSize()).get(); char *start = reinterpret_cast(storage); char *end = start + AlignedHeapSegment::storageSize(); @@ -139,7 +140,7 @@ TEST_F(AlignedHeapSegmentTest, AdviseUnused) { EXPECT_EQ(*initial + TOTAL_PAGES, *touched); EXPECT_EQ(*touched - FREED_PAGES, *marked); - provider_->deleteStorage(storage); + provider_->deleteStorage(storage, AlignedHeapSegment::storageSize()); #endif } diff --git a/unittests/VMRuntime/StorageProviderTest.cpp b/unittests/VMRuntime/StorageProviderTest.cpp index e189bcabce0..5de5b8a0b69 100644 --- a/unittests/VMRuntime/StorageProviderTest.cpp +++ b/unittests/VMRuntime/StorageProviderTest.cpp @@ -12,8 +12,6 @@ #include "hermes/VM/AlignedHeapSegment.h" #include "hermes/VM/LimitedStorageProvider.h" -#include "llvh/ADT/STLExtras.h" - using namespace hermes; using namespace hermes::vm; @@ -24,8 +22,8 @@ struct NullStorageProvider : public StorageProvider { static std::unique_ptr create(); protected: - llvh::ErrorOr newStorageImpl(const char *) override; - void deleteStorageImpl(void *) override; + llvh::ErrorOr newStorageImpl(const char *, size_t sz) override; + void deleteStorageImpl(void *, size_t sz) override; }; /* static */ @@ -33,7 +31,9 @@ std::unique_ptr NullStorageProvider::create() { return std::make_unique(); } -llvh::ErrorOr NullStorageProvider::newStorageImpl(const char *) { +llvh::ErrorOr NullStorageProvider::newStorageImpl( + const char *, + size_t sz) { // Doesn't matter what code is returned here. return make_error_code(OOMError::TestVMLimitReached); } @@ -43,33 +43,43 @@ enum StorageProviderType { ContiguousVAProvider, }; +struct StorageProviderParam { + StorageProviderType providerType; + size_t storageSize; + size_t vaSize; +}; + static std::unique_ptr GetStorageProvider( - StorageProviderType type) { + StorageProviderType type, + size_t vaSize) { switch (type) { case MmapProvider: return StorageProvider::mmapProvider(); case ContiguousVAProvider: - return StorageProvider::contiguousVAProvider( - AlignedHeapSegment::storageSize()); + return StorageProvider::contiguousVAProvider(vaSize); default: return nullptr; } } class StorageProviderTest - : public ::testing::TestWithParam {}; + : public ::testing::TestWithParam {}; -void NullStorageProvider::deleteStorageImpl(void *) {} +void NullStorageProvider::deleteStorageImpl(void *, size_t sz) {} + +/// Minimum segment storage size. +static constexpr size_t SIZE = AlignedHeapSegment::storageSize(); TEST_P(StorageProviderTest, StorageProviderSucceededAllocsLogCount) { - auto provider{GetStorageProvider(GetParam())}; + auto ¶ms = GetParam(); + auto provider{GetStorageProvider(params.providerType, params.vaSize)}; ASSERT_EQ(0, provider->numSucceededAllocs()); ASSERT_EQ(0, provider->numFailedAllocs()); ASSERT_EQ(0, provider->numDeletedAllocs()); ASSERT_EQ(0, provider->numLiveAllocs()); - auto result = provider->newStorage("Test"); + auto result = provider->newStorage("Test", params.storageSize); ASSERT_TRUE(result); void *s = result.get(); @@ -78,7 +88,7 @@ TEST_P(StorageProviderTest, StorageProviderSucceededAllocsLogCount) { EXPECT_EQ(0, provider->numDeletedAllocs()); EXPECT_EQ(1, provider->numLiveAllocs()); - provider->deleteStorage(s); + provider->deleteStorage(s, params.storageSize); EXPECT_EQ(1, provider->numSucceededAllocs()); EXPECT_EQ(0, provider->numFailedAllocs()); @@ -94,7 +104,7 @@ TEST(StorageProviderTest, StorageProviderFailedAllocsLogCount) { ASSERT_EQ(0, provider->numDeletedAllocs()); ASSERT_EQ(0, provider->numLiveAllocs()); - auto result = provider->newStorage("Test"); + auto result = provider->newStorage("Test", SIZE); ASSERT_FALSE(result); EXPECT_EQ(0, provider->numSucceededAllocs()); @@ -107,20 +117,20 @@ TEST(StorageProviderTest, LimitedStorageProviderEnforce) { constexpr size_t LIM = 2; LimitedStorageProvider provider{ StorageProvider::mmapProvider(), - AlignedHeapSegment::storageSize() * LIM, + SIZE * LIM, }; void *live[LIM]; for (size_t i = 0; i < LIM; ++i) { - auto result = provider.newStorage("Live"); + auto result = provider.newStorage("Live", SIZE); ASSERT_TRUE(result); live[i] = result.get(); } - EXPECT_FALSE(provider.newStorage("Dead")); + EXPECT_FALSE(provider.newStorage("Dead", SIZE)); // Clean-up for (auto s : live) { - provider.deleteStorage(s); + provider.deleteStorage(s, SIZE); } } @@ -128,16 +138,16 @@ TEST(StorageProviderTest, LimitedStorageProviderTrackDelete) { constexpr size_t LIM = 2; LimitedStorageProvider provider{ StorageProvider::mmapProvider(), - AlignedHeapSegment::storageSize() * LIM, + SIZE * LIM, }; // If the storage gets deleted, we should be able to re-allocate it, even if // the total number of allocations exceeds the limit. for (size_t i = 0; i < LIM + 1; ++i) { - auto result = provider.newStorage("Live"); + auto result = provider.newStorage("Live", SIZE); ASSERT_TRUE(result); auto *s = result.get(); - provider.deleteStorage(s); + provider.deleteStorage(s, SIZE); } } @@ -145,13 +155,13 @@ TEST(StorageProviderTest, LimitedStorageProviderDeleteNull) { constexpr size_t LIM = 2; LimitedStorageProvider provider{ StorageProvider::mmapProvider(), - AlignedHeapSegment::storageSize() * LIM, + SIZE * LIM, }; void *live[LIM]; for (size_t i = 0; i < LIM; ++i) { - auto result = provider.newStorage("Live"); + auto result = provider.newStorage("Live", SIZE); ASSERT_TRUE(result); live[i] = result.get(); } @@ -159,27 +169,25 @@ TEST(StorageProviderTest, LimitedStorageProviderDeleteNull) { // The allocations should fail because we have hit the limit, and the // deletions should not affect the limit, because they are of null storages. for (size_t i = 0; i < 2; ++i) { - auto result = provider.newStorage("Live"); + auto result = provider.newStorage("Live", SIZE); EXPECT_FALSE(result); } // Clean-up for (auto s : live) { - provider.deleteStorage(s); + provider.deleteStorage(s, SIZE); } } TEST(StorageProviderTest, StorageProviderAllocsCount) { constexpr size_t LIM = 2; - auto provider = - std::unique_ptr{new LimitedStorageProvider{ - StorageProvider::mmapProvider(), - AlignedHeapSegment::storageSize() * LIM}}; + auto provider = std::unique_ptr{ + new LimitedStorageProvider{StorageProvider::mmapProvider(), SIZE * LIM}}; constexpr size_t FAILS = 3; void *storages[LIM]; for (size_t i = 0; i < LIM; ++i) { - auto result = provider->newStorage(); + auto result = provider->newStorage(SIZE); ASSERT_TRUE(result); storages[i] = result.get(); } @@ -188,7 +196,7 @@ TEST(StorageProviderTest, StorageProviderAllocsCount) { EXPECT_EQ(LIM, provider->numLiveAllocs()); for (size_t i = 0; i < FAILS; ++i) { - auto result = provider->newStorage(); + auto result = provider->newStorage(SIZE); ASSERT_FALSE(result); } @@ -196,21 +204,63 @@ TEST(StorageProviderTest, StorageProviderAllocsCount) { // Clean-up for (auto s : storages) { - provider->deleteStorage(s); + provider->deleteStorage(s, SIZE); } EXPECT_EQ(0, provider->numLiveAllocs()); EXPECT_EQ(LIM, provider->numDeletedAllocs()); } +TEST(StorageProviderTest, ContinuousProviderTest) { + auto provider = + GetStorageProvider(StorageProviderType::ContiguousVAProvider, SIZE * 10); + + size_t sz1 = SIZE * 5; + auto result = provider->newStorage(sz1); + ASSERT_TRUE(result); + auto *s1 = *result; + + size_t sz2 = SIZE * 3; + result = provider->newStorage(sz2); + ASSERT_TRUE(result); + auto *s2 = *result; + + size_t sz3 = SIZE * 3; + result = provider->newStorage(sz3); + ASSERT_FALSE(result); + + provider->deleteStorage(s1, sz1); + + result = provider->newStorage(sz3); + ASSERT_TRUE(result); + auto *s3 = *result; + + size_t sz4 = SIZE * 2; + result = provider->newStorage(sz4); + ASSERT_TRUE(result); + auto *s4 = *result; + + result = provider->newStorage(sz4); + ASSERT_TRUE(result); + auto *s5 = *result; + + provider->deleteStorage(s2, sz2); + provider->deleteStorage(s3, sz3); + provider->deleteStorage(s4, sz4); + provider->deleteStorage(s5, sz4); +} + /// StorageGuard will free storage on scope exit. class StorageGuard final { public: - StorageGuard(std::shared_ptr provider, void *storage) - : provider_(std::move(provider)), storage_(storage) {} + StorageGuard( + std::shared_ptr provider, + void *storage, + size_t sz) + : provider_(std::move(provider)), storage_(storage), sz_(sz) {} ~StorageGuard() { - provider_->deleteStorage(storage_); + provider_->deleteStorage(storage_, sz_); } void *raw() const { @@ -220,6 +270,7 @@ class StorageGuard final { private: std::shared_ptr provider_; void *storage_; + size_t sz_; }; #ifndef NDEBUG @@ -235,8 +286,8 @@ class SetVALimit final { } }; -static const size_t KB = 1 << 10; -static const size_t MB = KB * KB; +static constexpr size_t KB = 1 << 10; +static constexpr size_t MB = KB * KB; TEST(StorageProviderTest, SucceedsWithoutReducing) { // Should succeed without reducing the size at all. @@ -261,16 +312,13 @@ TEST(StorageProviderTest, SucceedsAfterReducing) { } { // Test using the aligned storage alignment - SetVALimit limit{50 * AlignedHeapSegment::storageSize()}; - auto result = vmAllocateAllowLess( - 100 * AlignedHeapSegment::storageSize(), - 30 * AlignedHeapSegment::storageSize(), - AlignedHeapSegment::storageSize()); + SetVALimit limit{50 * SIZE}; + auto result = vmAllocateAllowLess(100 * SIZE, 30 * SIZE, SIZE); ASSERT_TRUE(result); auto memAndSize = result.get(); EXPECT_TRUE(memAndSize.first != nullptr); - EXPECT_GE(memAndSize.second, 30 * AlignedHeapSegment::storageSize()); - EXPECT_LE(memAndSize.second, 50 * AlignedHeapSegment::storageSize()); + EXPECT_GE(memAndSize.second, 30 * SIZE); + EXPECT_LE(memAndSize.second, 50 * SIZE); } } @@ -282,11 +330,14 @@ TEST(StorageProviderTest, FailsDueToLimitLowerThanMin) { } TEST_P(StorageProviderTest, VirtualMemoryFreed) { - SetVALimit limit{10 * MB}; + SetVALimit limit{25 * MB}; + auto ¶ms = GetParam(); for (size_t i = 0; i < 20; i++) { - std::shared_ptr sp = GetStorageProvider(GetParam()); - StorageGuard sg{sp, *sp->newStorage()}; + std::shared_ptr sp = + GetStorageProvider(params.providerType, params.vaSize); + StorageGuard sg{ + sp, *sp->newStorage(params.storageSize), params.storageSize}; } } @@ -295,6 +346,17 @@ TEST_P(StorageProviderTest, VirtualMemoryFreed) { INSTANTIATE_TEST_CASE_P( StorageProviderTests, StorageProviderTest, - ::testing::Values(MmapProvider, ContiguousVAProvider)); + ::testing::Values( + StorageProviderParam{ + MmapProvider, + SIZE, + 0, + }, + StorageProviderParam{ + ContiguousVAProvider, + SIZE, + SIZE, + }, + StorageProviderParam{ContiguousVAProvider, SIZE * 5, SIZE * 5})); } // namespace From 7f06c3390f7dc4e4e175bc6100940540338410b6 Mon Sep 17 00:00:00 2001 From: "Gang Zhao (Hermes)" Date: Mon, 21 Oct 2024 16:20:39 -0700 Subject: [PATCH 3/5] Make SHSegmentInfo explicit in CardTable (#1505) Summary: Currently `SHSegmentInfo` lives in the prefix of CardTable inline storage (to be specific, prefix of the `cards_` array). But this is only defined in one comment. Add it into a union with the `cards_` array to make it clear. It also simplifies the reasoning of following diffs, in which we need to add more fields to `SHSegmentInfo`. In addition, `kFirstUsedIndex` should take into account of the size of `SHSegmentInfo`, since the size of `SHSegmentInfo` could be larger than `(2 * kCardTableSize) >> kLogCardSize)` for small segment size. Differential Revision: D61747499 --- include/hermes/VM/CardTableNC.h | 40 ++++++++++++++----------- unittests/VMRuntime/CardTableNCTest.cpp | 7 +++-- 2 files changed, 27 insertions(+), 20 deletions(-) diff --git a/include/hermes/VM/CardTableNC.h b/include/hermes/VM/CardTableNC.h index 5bfa40f2102..f65e73c0581 100644 --- a/include/hermes/VM/CardTableNC.h +++ b/include/hermes/VM/CardTableNC.h @@ -77,21 +77,22 @@ class CardTable { /// guaranteed by a static_assert below. static constexpr size_t kHeapBytesPerCardByte = kCardSize; - /// A prefix of every segment is occupied by auxilary data - /// structures. The card table is the first such data structure. - /// The card table maps to the segment. Only the suffix of the card - /// table that maps to the suffix of entire segment that is used for - /// allocation is ever used; the prefix that maps to the card table - /// itself is not used. (Nor is the portion that of the card table - /// that maps to the other auxiliary data structure, the mark bit - /// array, but we don't attempt to calculate that here.) - /// It is useful to know the size of this unused region of - /// the card table, so it can be used for other purposes. - /// Note that the total size of the card table is 2 times - /// kCardTableSize, since the CardTable contains two byte arrays of - /// that size (cards_ and _boundaries_). + /// A prefix of every segment is occupied by auxiliary data structures. The + /// card table is the first such data structure. The card table maps to the + /// segment. Only the suffix of the card table that maps to the suffix of + /// entire segment that is used for allocation is ever used; the prefix that + /// maps to the card table itself is not used, nor is the portion of the card + /// table that maps to the other auxiliary data structure: the mark bit array + /// and guard pages. This small space can be used for other purpose, such as + /// storing the SHSegmentInfo. The actual first used index should take into + /// account of this. Here we only calculate for CardTable and size of + /// SHSegmentInfo. It's only used as starting index for clearing/dirtying + /// range of bits. + /// Note that the total size of the card table is 2 times kCardTableSize, + /// since the CardTable contains two byte arrays of that size (cards_ and + /// boundaries_). static constexpr size_t kFirstUsedIndex = - (2 * kCardTableSize) >> kLogCardSize; + std::max(sizeof(SHSegmentInfo), (2 * kCardTableSize) >> kLogCardSize); CardTable() = default; /// CardTable is not copyable or movable: It must be constructed in-place. @@ -255,9 +256,14 @@ class CardTable { void cleanOrDirtyRange(size_t from, size_t to, CardStatus cleanOrDirty); - /// This needs to be atomic so that the background thread in Hades can safely - /// dirty cards when compacting. - std::array, kCardTableSize> cards_{}; + union { + /// The bytes occupied by segmentInfo_ are guaranteed to be not override by + /// writes to cards_ array. See static assertions in AlignedHeapSegmentBase. + SHSegmentInfo segmentInfo_; + /// This needs to be atomic so that the background thread in Hades can + /// safely dirty cards when compacting. + std::array, kCardTableSize> cards_{}; + }; /// See the comment at kHeapBytesPerCardByte above to see why this is /// necessary. diff --git a/unittests/VMRuntime/CardTableNCTest.cpp b/unittests/VMRuntime/CardTableNCTest.cpp index adaffe0651d..745aa4300b3 100644 --- a/unittests/VMRuntime/CardTableNCTest.cpp +++ b/unittests/VMRuntime/CardTableNCTest.cpp @@ -58,9 +58,10 @@ void CardTableNCTest::dirtyRangeTest( CardTableNCTest::CardTableNCTest() { // For purposes of this test, we'll assume the first writeable byte of - // the segment comes just after the card table (which is at the - // start of the segment). - auto first = seg.lowLim() + sizeof(CardTable); + // the segment comes just after the memory region that can be mapped by + // kFirstUsedIndex bytes. + auto first = seg.lowLim() + + CardTable::kFirstUsedIndex * CardTable::kHeapBytesPerCardByte; auto last = reinterpret_cast(llvh::alignDown( reinterpret_cast(seg.hiLim() - 1), CardTable::kCardSize)); From d7a07aa869d96c081ae75818552e598922cae51b Mon Sep 17 00:00:00 2001 From: "Gang Zhao (Hermes)" Date: Mon, 21 Oct 2024 16:20:39 -0700 Subject: [PATCH 4/5] Store segment size in SHSegmentInfo (#1506) Summary: We need the segment size in several places, such as CardTable, heap segment itself and getting sizes for large object GCCell. Add this size into `SHSegmentInfo`. In addition, in CardTableNCTest, when search dirty bits, we should start from kFirstUsedIndex instead of 0, since the value of `SHSegmentInfo` may write some bytes to non-zero. Differential Revision: D61807366 --- include/hermes/VM/AlignedHeapSegment.h | 22 +++++++++---- include/hermes/VM/CardTableNC.h | 43 ++++++++++++++++--------- include/hermes/VM/sh_segment_info.h | 3 ++ lib/VM/gcs/CardTableNC.cpp | 18 ++++------- unittests/VMRuntime/CardTableNCTest.cpp | 18 ++++++----- 5 files changed, 63 insertions(+), 41 deletions(-) diff --git a/include/hermes/VM/AlignedHeapSegment.h b/include/hermes/VM/AlignedHeapSegment.h index 19901e6e88d..8278fc0a8be 100644 --- a/include/hermes/VM/AlignedHeapSegment.h +++ b/include/hermes/VM/AlignedHeapSegment.h @@ -164,6 +164,19 @@ class AlignedHeapSegmentBase { return lowLim_; } + /// Read storage size from SHSegmentInfo. + size_t storageSize() const { + auto *segmentInfo = reinterpret_cast(lowLim_); + return segmentInfo->segmentSize; + } + + /// Returns the address that is the upper bound of the segment. + /// This is only used in debugging code and computing memory footprint, so + /// just read the segment size from SHSegmentInfo. + char *hiLim() const { + return lowLim_ + storageSize(); + } + /// Returns the address at which the first allocation in this segment would /// occur. /// Disable UB sanitization because 'this' may be null during the tests. @@ -274,7 +287,9 @@ class AlignedHeapSegment : public AlignedHeapSegmentBase { /// Mask for isolating the storage being pointed into by a pointer. static constexpr size_t kHighMask{~kLowMask}; - /// Returns the storage size, in bytes, of an \c AlignedHeapSegment. + /// Returns the storage size, in bytes, of an \c AlignedHeapSegment. This + /// replaces AlignedHeapSegmentBase::storageSize, which reads the size from + /// SHSegmentInfo. static constexpr size_t storageSize() { return kSize; } @@ -380,11 +395,6 @@ class AlignedHeapSegment : public AlignedHeapSegmentBase { /// The number of bytes in the segment that are available for allocation. inline size_t available() const; - /// Returns the address that is the upper bound of the segment. - char *hiLim() const { - return lowLim() + storageSize(); - } - /// Returns the first address after the region in which allocations can occur, /// taking external memory credits into a account (they decrease the effective /// end). diff --git a/include/hermes/VM/CardTableNC.h b/include/hermes/VM/CardTableNC.h index f65e73c0581..f4b2c62a5cf 100644 --- a/include/hermes/VM/CardTableNC.h +++ b/include/hermes/VM/CardTableNC.h @@ -63,11 +63,9 @@ class CardTable { static constexpr size_t kCardSize = 1 << kLogCardSize; // ==> 512-byte cards. static constexpr size_t kSegmentSize = 1 << HERMESVM_LOG_HEAP_SEGMENT_SIZE; - /// The number of valid indices into the card table. - static constexpr size_t kValidIndices = kSegmentSize >> kLogCardSize; - - /// The size of the card table. - static constexpr size_t kCardTableSize = kValidIndices; + /// The size of the maximum inline card table. CardStatus array and boundary + /// array for larger segment has larger size and is storage separately. + static constexpr size_t kInlineCardTableSize = kSegmentSize >> kLogCardSize; /// For convenience, this is a conversion factor to determine how many bytes /// in the heap correspond to a single byte in the card table. This is @@ -91,10 +89,14 @@ class CardTable { /// Note that the total size of the card table is 2 times kCardTableSize, /// since the CardTable contains two byte arrays of that size (cards_ and /// boundaries_). - static constexpr size_t kFirstUsedIndex = - std::max(sizeof(SHSegmentInfo), (2 * kCardTableSize) >> kLogCardSize); + static constexpr size_t kFirstUsedIndex = std::max( + sizeof(SHSegmentInfo), + (2 * kInlineCardTableSize) >> kLogCardSize); - CardTable() = default; + CardTable() { + // Preserve the segment size. + segmentInfo_.segmentSize = kSegmentSize; + } /// CardTable is not copyable or movable: It must be constructed in-place. CardTable(const CardTable &) = delete; CardTable(CardTable &&) = delete; @@ -185,6 +187,11 @@ class CardTable { /// is the first object.) GCCell *firstObjForCard(unsigned index) const; + /// The end index of the card table (all valid indices should be smaller). + size_t getEndIndex() const { + return getSegmentSize() >> kLogCardSize; + } + #ifdef HERMES_EXTRA_DEBUG /// Temporary debugging hack: yield the numeric value of the boundaries_ array /// for the given \p index. @@ -215,10 +222,16 @@ class CardTable { #endif // HERMES_SLOW_DEBUG private: + unsigned getSegmentSize() const { + return segmentInfo_.segmentSize; + } + #ifndef NDEBUG - /// Returns the pointer to the end of the storage containing \p ptr - /// (exclusive). - static void *storageEnd(const void *ptr); + /// Returns the pointer to the end of the storage starting at \p lowLim. + void *storageEnd(const void *lowLim) const { + return reinterpret_cast( + reinterpret_cast(lowLim) + getSegmentSize()); + } #endif enum class CardStatus : char { Clean = 0, Dirty = 1 }; @@ -262,7 +275,7 @@ class CardTable { SHSegmentInfo segmentInfo_; /// This needs to be atomic so that the background thread in Hades can /// safely dirty cards when compacting. - std::array, kCardTableSize> cards_{}; + std::array, kInlineCardTableSize> cards_{}; }; /// See the comment at kHeapBytesPerCardByte above to see why this is @@ -281,7 +294,7 @@ class CardTable { /// time: If we allocate a large object that crosses many cards, the first /// crossed cards gets a non-negative value, and each subsequent one uses the /// maximum exponent that stays within the card range for the object. - int8_t boundaries_[kCardTableSize]; + int8_t boundaries_[kInlineCardTableSize]; }; /// Implementations of inlines. @@ -311,7 +324,7 @@ inline size_t CardTable::addressToIndex(const void *addr) const { } inline const char *CardTable::indexToAddress(size_t index) const { - assert(index <= kValidIndices && "index must be within the index range"); + assert(index <= getEndIndex() && "index must be within the index range"); const char *res = base() + (index << kLogCardSize); assert( base() <= res && res <= storageEnd(base()) && @@ -329,7 +342,7 @@ inline bool CardTable::isCardForAddressDirty(const void *addr) const { } inline bool CardTable::isCardForIndexDirty(size_t index) const { - assert(index < kValidIndices && "index is required to be in range."); + assert(index < getEndIndex() && "index is required to be in range."); return cards_[index].load(std::memory_order_relaxed) == CardStatus::Dirty; } diff --git a/include/hermes/VM/sh_segment_info.h b/include/hermes/VM/sh_segment_info.h index ae4c7ebdf51..0bc4539c52d 100644 --- a/include/hermes/VM/sh_segment_info.h +++ b/include/hermes/VM/sh_segment_info.h @@ -12,6 +12,9 @@ /// contain segment-specific information. typedef struct SHSegmentInfo { unsigned index; + /// The storage size for this segment. We practically don't support segment + /// with size larger than UINT32_MAX. + unsigned segmentSize; } SHSegmentInfo; #endif diff --git a/lib/VM/gcs/CardTableNC.cpp b/lib/VM/gcs/CardTableNC.cpp index ec94d5e5710..a3d94078364 100644 --- a/lib/VM/gcs/CardTableNC.cpp +++ b/lib/VM/gcs/CardTableNC.cpp @@ -20,12 +20,6 @@ namespace hermes { namespace vm { -#ifndef NDEBUG -/* static */ void *CardTable::storageEnd(const void *ptr) { - return AlignedHeapSegment::storageEnd(ptr); -} -#endif - void CardTable::dirtyCardsForAddressRange(const void *low, const void *high) { // If high is in the middle of some card, ensure that we dirty that card. high = reinterpret_cast(high) + kCardSize - 1; @@ -44,19 +38,19 @@ OptValue CardTable::findNextCardWithStatus( } void CardTable::clear() { - cleanRange(kFirstUsedIndex, kValidIndices); + cleanRange(kFirstUsedIndex, getEndIndex()); } void CardTable::updateAfterCompaction(const void *newLevel) { const char *newLevelPtr = static_cast(newLevel); size_t firstCleanCardIndex = addressToIndex(newLevelPtr + kCardSize - 1); assert( - firstCleanCardIndex <= kValidIndices && + firstCleanCardIndex <= getEndIndex() && firstCleanCardIndex >= kFirstUsedIndex && "Invalid index."); // Dirty the occupied cards (below the level), and clean the cards above the // level. dirtyRange(kFirstUsedIndex, firstCleanCardIndex); - cleanRange(firstCleanCardIndex, kValidIndices); + cleanRange(firstCleanCardIndex, getEndIndex()); } void CardTable::cleanRange(size_t from, size_t to) { @@ -147,12 +141,12 @@ protectBoundaryTableWork(void *table, size_t sz, oscompat::ProtectMode mode) { void CardTable::protectBoundaryTable() { protectBoundaryTableWork( - &boundaries_[0], kValidIndices, oscompat::ProtectMode::None); + &boundaries_[0], getEndIndex(), oscompat::ProtectMode::None); } void CardTable::unprotectBoundaryTable() { protectBoundaryTableWork( - &boundaries_[0], kValidIndices, oscompat::ProtectMode::ReadWrite); + &boundaries_[0], getEndIndex(), oscompat::ProtectMode::ReadWrite); } #endif // HERMES_EXTRA_DEBUG @@ -160,7 +154,7 @@ void CardTable::unprotectBoundaryTable() { void CardTable::verifyBoundaries(char *start, char *level) const { // Start should be card-aligned. assert(isCardAligned(start)); - for (unsigned index = addressToIndex(start); index < kValidIndices; index++) { + for (unsigned index = addressToIndex(start); index < getEndIndex(); index++) { const char *boundary = indexToAddress(index); if (level <= boundary) { break; diff --git a/unittests/VMRuntime/CardTableNCTest.cpp b/unittests/VMRuntime/CardTableNCTest.cpp index 745aa4300b3..67d1450e566 100644 --- a/unittests/VMRuntime/CardTableNCTest.cpp +++ b/unittests/VMRuntime/CardTableNCTest.cpp @@ -80,7 +80,7 @@ CardTableNCTest::CardTableNCTest() { TEST_F(CardTableNCTest, AddressToIndex) { // Expected indices in the card table corresponding to the probe // addresses into the storage. - const size_t lastIx = CardTable::kValidIndices - 1; + const size_t lastIx = table->getEndIndex() - 1; std::vector indices{ CardTable::kFirstUsedIndex, CardTable::kFirstUsedIndex + 1, @@ -105,13 +105,13 @@ TEST_F(CardTableNCTest, AddressToIndexBoundary) { // the storage. ASSERT_EQ(seg.lowLim(), reinterpret_cast(table)); - const size_t hiLim = CardTable::kValidIndices; + const size_t hiLim = table->getEndIndex(); EXPECT_EQ(0, table->addressToIndex(seg.lowLim())); EXPECT_EQ(hiLim, table->addressToIndex(seg.hiLim())); } TEST_F(CardTableNCTest, DirtyAddress) { - const size_t lastIx = CardTable::kValidIndices - 1; + const size_t lastIx = table->getEndIndex() - 1; for (char *addr : addrs) { size_t ind = table->addressToIndex(addr); @@ -138,7 +138,8 @@ TEST_F(CardTableNCTest, DirtyAddress) { TEST_F(CardTableNCTest, DirtyAddressRangeEmpty) { char *addr = addrs.at(0); table->dirtyCardsForAddressRange(addr, addr); - EXPECT_FALSE(table->findNextDirtyCard(0, CardTable::kValidIndices)); + EXPECT_FALSE(table->findNextDirtyCard( + CardTable::kFirstUsedIndex, table->getEndIndex())); } /// Dirty an address range smaller than a single card. @@ -215,7 +216,7 @@ TEST_F(CardTableNCTest, NextDirtyCardImmediate) { size_t ind = table->addressToIndex(addr); table->dirtyCardForAddress(addr); - auto dirty = table->findNextDirtyCard(ind, CardTable::kValidIndices); + auto dirty = table->findNextDirtyCard(ind, table->getEndIndex()); ASSERT_TRUE(dirty); EXPECT_EQ(ind, *dirty); @@ -223,9 +224,10 @@ TEST_F(CardTableNCTest, NextDirtyCardImmediate) { TEST_F(CardTableNCTest, NextDirtyCard) { /// Empty case: No dirty cards - EXPECT_FALSE(table->findNextDirtyCard(0, CardTable::kValidIndices)); + EXPECT_FALSE(table->findNextDirtyCard( + CardTable::kFirstUsedIndex, table->getEndIndex())); - size_t from = 0; + size_t from = CardTable::kFirstUsedIndex; for (char *addr : addrs) { table->dirtyCardForAddress(addr); @@ -233,7 +235,7 @@ TEST_F(CardTableNCTest, NextDirtyCard) { EXPECT_FALSE(table->findNextDirtyCard(from, ind)); auto atEnd = table->findNextDirtyCard(from, ind + 1); - auto inMiddle = table->findNextDirtyCard(from, CardTable::kValidIndices); + auto inMiddle = table->findNextDirtyCard(from, table->getEndIndex()); ASSERT_TRUE(atEnd); EXPECT_EQ(ind, *atEnd); From 35204a91e6a0605d61d4754585e52a7f990f98e4 Mon Sep 17 00:00:00 2001 From: "Gang Zhao (Hermes)" Date: Mon, 21 Oct 2024 16:20:39 -0700 Subject: [PATCH 5/5] Allocate cards and boundaries array separately in CardTable for large segment (#1507) Summary: For large segment, the fixed size for `cards` and `boundaries` array is not large enough to manage the entire segment. This diff adds pointers to the two arrays in SHSegmentInfo. For normal segment, the pointer just points to the inline array in CardTable. For large segment, it points to separately allocated array. Differential Revision: D61747510 --- include/hermes/VM/AlignedHeapSegment.h | 8 +- include/hermes/VM/CardTableNC.h | 99 +++++++++++++++++++------ include/hermes/VM/sh_segment_info.h | 6 ++ lib/VM/gcs/AlignedHeapSegment.cpp | 2 +- lib/VM/gcs/CardTableNC.cpp | 16 ++-- unittests/VMRuntime/CardTableNCTest.cpp | 43 +++++++---- 6 files changed, 127 insertions(+), 47 deletions(-) diff --git a/include/hermes/VM/AlignedHeapSegment.h b/include/hermes/VM/AlignedHeapSegment.h index 8278fc0a8be..81acde4377e 100644 --- a/include/hermes/VM/AlignedHeapSegment.h +++ b/include/hermes/VM/AlignedHeapSegment.h @@ -80,6 +80,10 @@ class AlignedHeapSegmentBase { friend class AlignedHeapSegment; friend class AlignedHeapSegmentBase; + /// Pass segment size to CardTable constructor to allocate its data + /// separately if \p sz > kSegmentUnitSize. + Contents(size_t segmentSize) : cardTable_(segmentSize) {} + /// Note that because of the Contents object, the first few bytes of the /// card table are unused, we instead use them to store a small /// SHSegmentInfo struct. @@ -215,9 +219,9 @@ class AlignedHeapSegmentBase { AlignedHeapSegmentBase() = default; /// Construct Contents() at the address of \p lowLim. - AlignedHeapSegmentBase(void *lowLim) + AlignedHeapSegmentBase(void *lowLim, size_t segmentSize) : lowLim_(reinterpret_cast(lowLim)) { - new (contents()) Contents(); + new (contents()) Contents(segmentSize); contents()->protectGuardPage(oscompat::ProtectMode::None); } diff --git a/include/hermes/VM/CardTableNC.h b/include/hermes/VM/CardTableNC.h index f4b2c62a5cf..f332954a5bb 100644 --- a/include/hermes/VM/CardTableNC.h +++ b/include/hermes/VM/CardTableNC.h @@ -22,10 +22,16 @@ namespace hermes { namespace vm { /// The card table optimizes young gen collections by restricting the amount of -/// heap belonging to the old gen that must be scanned. The card table expects -/// to be constructed inside an AlignedHeapSegment's storage, at some position -/// before the allocation region, and covers the extent of that storage's -/// memory. +/// heap belonging to the old gen that must be scanned. The card table expects +/// to be constructed at the beginning of a segment's storage, and covers the +/// extent of that storage's memory. There are two cases: +/// 1. For AlignedHeapSegment, the inline CardStatus array and Boundary array +/// in the card table is large enough. +/// 2. For JumboHeapSegment, the two arrays are allocated separately. +/// In either case, the pointers to the CardStatus array and Boundary array are +/// stored in \c cards and \c boundaries field of SHSegmentInfo, which occupies +/// the prefix bytes of card table that are mapped to auxiliary data structures +/// for a segment. /// /// Also supports the following query: Given a card in the heap that intersects /// with the used portion of its segment, find its "crossing object" -- the @@ -58,14 +64,19 @@ class CardTable { const char *address_{nullptr}; }; + enum class CardStatus : char { Clean = 0, Dirty = 1 }; + /// The size (and base-two log of the size) of cards used in the card table. static constexpr size_t kLogCardSize = 9; // ==> 512-byte cards. static constexpr size_t kCardSize = 1 << kLogCardSize; // ==> 512-byte cards. - static constexpr size_t kSegmentSize = 1 << HERMESVM_LOG_HEAP_SEGMENT_SIZE; + /// Maximum ize of segment that can have inline cards and boundaries array. + static constexpr size_t kSegmentUnitSize = 1 + << HERMESVM_LOG_HEAP_SEGMENT_SIZE; /// The size of the maximum inline card table. CardStatus array and boundary /// array for larger segment has larger size and is storage separately. - static constexpr size_t kInlineCardTableSize = kSegmentSize >> kLogCardSize; + static constexpr size_t kInlineCardTableSize = + kSegmentUnitSize >> kLogCardSize; /// For convenience, this is a conversion factor to determine how many bytes /// in the heap correspond to a single byte in the card table. This is @@ -93,9 +104,22 @@ class CardTable { sizeof(SHSegmentInfo), (2 * kInlineCardTableSize) >> kLogCardSize); - CardTable() { - // Preserve the segment size. - segmentInfo_.segmentSize = kSegmentSize; + CardTable(size_t segmentSize) { + assert( + segmentSize && segmentSize % kSegmentUnitSize == 0 && + "segmentSize must be a multiple of kSegmentUnitSize"); + + segmentInfo_.segmentSize = segmentSize; + if (segmentSize == kSegmentUnitSize) { + // Just use the inline storage. + setCards(inlineCardStatusArray); + setBoundaries(inlineBoundaryArray_); + } else { + size_t cardTableSize = segmentSize >> kLogCardSize; + // CardStatus is clean by default, so must zero-initialize it. + setCards(new AtomicIfConcurrentGC[cardTableSize] {}); + setBoundaries(new int8_t[cardTableSize]); + } } /// CardTable is not copyable or movable: It must be constructed in-place. CardTable(const CardTable &) = delete; @@ -103,6 +127,16 @@ class CardTable { CardTable &operator=(const CardTable &) = delete; CardTable &operator=(CardTable &&) = delete; + ~CardTable() { + // If CardStatus/Boundary array is allocated separately, free them. + if (cards() != inlineCardStatusArray) { + delete[] cards(); + } + if (boundaries() != inlineBoundaryArray_) { + delete[] boundaries(); + } + } + /// Returns the card table index corresponding to a byte at the given address. /// \pre \p addr must be within the bounds of the segment owning this card /// table or at most 1 card after it, that is to say @@ -115,8 +149,7 @@ class CardTable { /// of how this is used. inline size_t addressToIndex(const void *addr) const LLVM_NO_SANITIZE("null"); - /// Returns the address corresponding to the given card table - /// index. + /// Returns the address corresponding to the given card table index. /// /// \pre \p index is bounded: /// @@ -146,7 +179,7 @@ class CardTable { inline OptValue findNextDirtyCard(size_t fromIndex, size_t endIndex) const; - /// If there is a card card at or after \p fromIndex, at an index less than + /// If there is a card at or after \p fromIndex, at an index less than /// \p endIndex, returns the index of the clean card, else returns none. inline OptValue findNextCleanCard(size_t fromIndex, size_t endIndex) const; @@ -197,7 +230,7 @@ class CardTable { /// for the given \p index. /// TODO(T48709128): remove this when the problem is diagnosed. int8_t cardObjectTableValue(unsigned index) const { - return boundaries_[index]; + return boundaries()[index]; } /// These methods protect and unprotect, respectively, the memory @@ -234,7 +267,21 @@ class CardTable { } #endif - enum class CardStatus : char { Clean = 0, Dirty = 1 }; + void setCards(AtomicIfConcurrentGC *cards) { + segmentInfo_.cards = cards; + } + + AtomicIfConcurrentGC *cards() const { + return static_cast *>(segmentInfo_.cards); + } + + void setBoundaries(int8_t *boundaries) { + segmentInfo_.boundaries = boundaries; + } + + int8_t *boundaries() const { + return segmentInfo_.boundaries; + } /// \return The lowest address whose card can be dirtied in this array. i.e. /// The smallest address such that @@ -272,16 +319,24 @@ class CardTable { union { /// The bytes occupied by segmentInfo_ are guaranteed to be not override by /// writes to cards_ array. See static assertions in AlignedHeapSegmentBase. + /// Pointers to the underlying CardStatus array and boundary array are + /// stored in it. Note that we could also store the boundary array in a + /// union along with inlineBoundaryArray_, since that array has unused + /// prefix bytes as well. It will save 8 bytes here. But it makes the size + /// check more complex as we need to ensure that the segment size is large + /// enough so that inlineBoundaryArray_ has enough unused prefix bytes to + /// store the pointer. SHSegmentInfo segmentInfo_; /// This needs to be atomic so that the background thread in Hades can /// safely dirty cards when compacting. - std::array, kInlineCardTableSize> cards_{}; + AtomicIfConcurrentGC + inlineCardStatusArray[kInlineCardTableSize]{}; }; /// See the comment at kHeapBytesPerCardByte above to see why this is /// necessary. static_assert( - sizeof(cards_[0]) == 1, + sizeof(inlineCardStatusArray[0]) == 1, "Validate assumption that card table entries are one byte"); /// Each card has a corresponding signed byte in the boundaries_ table. A @@ -294,7 +349,7 @@ class CardTable { /// time: If we allocate a large object that crosses many cards, the first /// crossed cards gets a non-negative value, and each subsequent one uses the /// maximum exponent that stays within the card range for the object. - int8_t boundaries_[kInlineCardTableSize]; + int8_t inlineBoundaryArray_[kInlineCardTableSize]; }; /// Implementations of inlines. @@ -333,7 +388,7 @@ inline const char *CardTable::indexToAddress(size_t index) const { } inline void CardTable::dirtyCardForAddress(const void *addr) { - cards_[addressToIndex(addr)].store( + cards()[addressToIndex(addr)].store( CardStatus::Dirty, std::memory_order_relaxed); } @@ -343,7 +398,7 @@ inline bool CardTable::isCardForAddressDirty(const void *addr) const { inline bool CardTable::isCardForIndexDirty(size_t index) const { assert(index < getEndIndex() && "index is required to be in range."); - return cards_[index].load(std::memory_order_relaxed) == CardStatus::Dirty; + return cards()[index].load(std::memory_order_relaxed) == CardStatus::Dirty; } inline OptValue CardTable::findNextDirtyCard( @@ -367,9 +422,9 @@ inline CardTable::Boundary CardTable::nextBoundary(const char *level) const { } inline const char *CardTable::base() const { - // As we know the card table is laid out inline before the allocation region - // of its aligned heap segment, we can use its own this pointer as the base - // address. + // As we know the card table is laid out inline at the beginning of the + // segment storage, which is before the allocation region, we can use its own + // this pointer as the base address. return reinterpret_cast(this); } diff --git a/include/hermes/VM/sh_segment_info.h b/include/hermes/VM/sh_segment_info.h index 0bc4539c52d..7683afc7b9e 100644 --- a/include/hermes/VM/sh_segment_info.h +++ b/include/hermes/VM/sh_segment_info.h @@ -15,6 +15,12 @@ typedef struct SHSegmentInfo { /// The storage size for this segment. We practically don't support segment /// with size larger than UINT32_MAX. unsigned segmentSize; + /// Pointer that points to the CardStatus array for this segment. + /// Erase the actual type AtomicIfConcurrent here to avoid using + /// C++ type and forward declaring nested type. + void *cards; + /// Pointer that points to the boundary array for this segment. + int8_t *boundaries; } SHSegmentInfo; #endif diff --git a/lib/VM/gcs/AlignedHeapSegment.cpp b/lib/VM/gcs/AlignedHeapSegment.cpp index 62b0c416904..a1220b60d1c 100644 --- a/lib/VM/gcs/AlignedHeapSegment.cpp +++ b/lib/VM/gcs/AlignedHeapSegment.cpp @@ -61,7 +61,7 @@ llvh::ErrorOr AlignedHeapSegment::create( } AlignedHeapSegment::AlignedHeapSegment(StorageProvider *provider, void *lowLim) - : AlignedHeapSegmentBase(lowLim), provider_(provider) { + : AlignedHeapSegmentBase(lowLim, kSize), provider_(provider) { assert( storageStart(lowLim_) == lowLim_ && "The lower limit of this storage must be aligned"); diff --git a/lib/VM/gcs/CardTableNC.cpp b/lib/VM/gcs/CardTableNC.cpp index a3d94078364..10937e15192 100644 --- a/lib/VM/gcs/CardTableNC.cpp +++ b/lib/VM/gcs/CardTableNC.cpp @@ -31,7 +31,7 @@ OptValue CardTable::findNextCardWithStatus( size_t fromIndex, size_t endIndex) const { for (size_t idx = fromIndex; idx < endIndex; idx++) - if (cards_[idx].load(std::memory_order_relaxed) == status) + if (cards()[idx].load(std::memory_order_relaxed) == status) return idx; return llvh::None; @@ -66,7 +66,7 @@ void CardTable::cleanOrDirtyRange( size_t to, CardStatus cleanOrDirty) { for (size_t index = from; index < to; index++) { - cards_[index].store(cleanOrDirty, std::memory_order_relaxed); + cards()[index].store(cleanOrDirty, std::memory_order_relaxed); } } @@ -87,7 +87,7 @@ void CardTable::updateBoundaries( "Precondition: must have crossed boundary."); // The object may be large, and may cross multiple cards, but first // handle the first card. - boundaries_[boundary->index()] = + boundaries()[boundary->index()] = (boundary->address() - start) >> LogHeapAlign; boundary->bump(); @@ -100,7 +100,7 @@ void CardTable::updateBoundaries( unsigned currentIndexDelta = 1; unsigned numWithCurrentExp = 0; while (boundary->address() < end) { - boundaries_[boundary->index()] = encodeExp(currentExp); + boundaries()[boundary->index()] = encodeExp(currentExp); numWithCurrentExp++; if (numWithCurrentExp == currentIndexDelta) { numWithCurrentExp = 0; @@ -114,14 +114,14 @@ void CardTable::updateBoundaries( } GCCell *CardTable::firstObjForCard(unsigned index) const { - int8_t val = boundaries_[index]; + int8_t val = boundaries()[index]; // If val is negative, it means skip backwards some number of cards. // In general, for an object crossing 2^N cards, a query for one of // those cards will examine at most N entries in the table. while (val < 0) { index -= 1 << decodeExp(val); - val = boundaries_[index]; + val = boundaries()[index]; } char *boundary = const_cast(indexToAddress(index)); @@ -141,12 +141,12 @@ protectBoundaryTableWork(void *table, size_t sz, oscompat::ProtectMode mode) { void CardTable::protectBoundaryTable() { protectBoundaryTableWork( - &boundaries_[0], getEndIndex(), oscompat::ProtectMode::None); + boundaries(), getEndIndex(), oscompat::ProtectMode::None); } void CardTable::unprotectBoundaryTable() { protectBoundaryTableWork( - &boundaries_[0], getEndIndex(), oscompat::ProtectMode::ReadWrite); + boundaries(), getEndIndex(), oscompat::ProtectMode::ReadWrite); } #endif // HERMES_EXTRA_DEBUG diff --git a/unittests/VMRuntime/CardTableNCTest.cpp b/unittests/VMRuntime/CardTableNCTest.cpp index 67d1450e566..c5bdf04e4a6 100644 --- a/unittests/VMRuntime/CardTableNCTest.cpp +++ b/unittests/VMRuntime/CardTableNCTest.cpp @@ -22,7 +22,11 @@ using namespace hermes::vm; namespace { -struct CardTableNCTest : public ::testing::Test { +struct CardTableParam { + size_t segmentSize; +}; + +struct CardTableNCTest : public ::testing::TestWithParam { CardTableNCTest(); /// Run a test scenario whereby we dirty [dirtyStart, dirtyEnd], and then test @@ -38,7 +42,7 @@ struct CardTableNCTest : public ::testing::Test { std::unique_ptr provider{StorageProvider::mmapProvider()}; AlignedHeapSegment seg{ std::move(AlignedHeapSegment::create(provider.get()).get())}; - CardTable *table{new (seg.lowLim()) CardTable()}; + CardTable *table{nullptr}; // Addresses in the aligned storage to interact with during the tests. std::vector addrs; @@ -57,6 +61,9 @@ void CardTableNCTest::dirtyRangeTest( } CardTableNCTest::CardTableNCTest() { + auto ¶m = GetParam(); + table = new (seg.lowLim()) CardTable(param.segmentSize); + // For purposes of this test, we'll assume the first writeable byte of // the segment comes just after the memory region that can be mapped by // kFirstUsedIndex bytes. @@ -77,7 +84,7 @@ CardTableNCTest::CardTableNCTest() { EXPECT_TRUE(std::is_sorted(addrs.begin(), addrs.end())); } -TEST_F(CardTableNCTest, AddressToIndex) { +TEST_P(CardTableNCTest, AddressToIndex) { // Expected indices in the card table corresponding to the probe // addresses into the storage. const size_t lastIx = table->getEndIndex() - 1; @@ -100,7 +107,7 @@ TEST_F(CardTableNCTest, AddressToIndex) { } } -TEST_F(CardTableNCTest, AddressToIndexBoundary) { +TEST_P(CardTableNCTest, AddressToIndexBoundary) { // This test only works if the card table is laid out at the very beginning of // the storage. ASSERT_EQ(seg.lowLim(), reinterpret_cast(table)); @@ -110,7 +117,7 @@ TEST_F(CardTableNCTest, AddressToIndexBoundary) { EXPECT_EQ(hiLim, table->addressToIndex(seg.hiLim())); } -TEST_F(CardTableNCTest, DirtyAddress) { +TEST_P(CardTableNCTest, DirtyAddress) { const size_t lastIx = table->getEndIndex() - 1; for (char *addr : addrs) { @@ -135,7 +142,7 @@ TEST_F(CardTableNCTest, DirtyAddress) { } /// Dirty an emtpy range. -TEST_F(CardTableNCTest, DirtyAddressRangeEmpty) { +TEST_P(CardTableNCTest, DirtyAddressRangeEmpty) { char *addr = addrs.at(0); table->dirtyCardsForAddressRange(addr, addr); EXPECT_FALSE(table->findNextDirtyCard( @@ -143,7 +150,7 @@ TEST_F(CardTableNCTest, DirtyAddressRangeEmpty) { } /// Dirty an address range smaller than a single card. -TEST_F(CardTableNCTest, DirtyAddressRangeSmall) { +TEST_P(CardTableNCTest, DirtyAddressRangeSmall) { char *addr = addrs.at(0); dirtyRangeTest( /* expectedStart */ addr, @@ -153,7 +160,7 @@ TEST_F(CardTableNCTest, DirtyAddressRangeSmall) { } /// Dirty an address range corresponding exactly to a card. -TEST_F(CardTableNCTest, DirtyAddressRangeCard) { +TEST_P(CardTableNCTest, DirtyAddressRangeCard) { char *addr = addrs.at(0); dirtyRangeTest( /* expectedStart */ addr, @@ -164,7 +171,7 @@ TEST_F(CardTableNCTest, DirtyAddressRangeCard) { /// Dirty an address range the width of a card but spread across a card /// boundary. -TEST_F(CardTableNCTest, DirtyAddressRangeCardOverlapping) { +TEST_P(CardTableNCTest, DirtyAddressRangeCardOverlapping) { char *addr = addrs.at(0); char *start = addr + CardTable::kCardSize / 2; dirtyRangeTest( @@ -176,7 +183,7 @@ TEST_F(CardTableNCTest, DirtyAddressRangeCardOverlapping) { /// Dirty an address range spanning multiple cards, with overhang on either /// side. -TEST_F(CardTableNCTest, DirtyAddressRangeLarge) { +TEST_P(CardTableNCTest, DirtyAddressRangeLarge) { char *addr = addrs.at(0); char *start = addr + CardTable::kCardSize / 2; dirtyRangeTest( @@ -186,13 +193,13 @@ TEST_F(CardTableNCTest, DirtyAddressRangeLarge) { /* expectedEnd */ addr + 4 * CardTable::kCardSize); } -TEST_F(CardTableNCTest, Initial) { +TEST_P(CardTableNCTest, Initial) { for (char *addr : addrs) { EXPECT_FALSE(table->isCardForAddressDirty(addr)); } } -TEST_F(CardTableNCTest, Clear) { +TEST_P(CardTableNCTest, Clear) { for (char *addr : addrs) { ASSERT_FALSE(table->isCardForAddressDirty(addr)); } @@ -211,7 +218,7 @@ TEST_F(CardTableNCTest, Clear) { } } -TEST_F(CardTableNCTest, NextDirtyCardImmediate) { +TEST_P(CardTableNCTest, NextDirtyCardImmediate) { char *addr = addrs.at(addrs.size() / 2); size_t ind = table->addressToIndex(addr); @@ -222,7 +229,7 @@ TEST_F(CardTableNCTest, NextDirtyCardImmediate) { EXPECT_EQ(ind, *dirty); } -TEST_F(CardTableNCTest, NextDirtyCard) { +TEST_P(CardTableNCTest, NextDirtyCard) { /// Empty case: No dirty cards EXPECT_FALSE(table->findNextDirtyCard( CardTable::kFirstUsedIndex, table->getEndIndex())); @@ -246,6 +253,14 @@ TEST_F(CardTableNCTest, NextDirtyCard) { } } +INSTANTIATE_TEST_CASE_P( + CardTableNCTests, + CardTableNCTest, + ::testing::Values( + CardTableParam{AlignedHeapSegmentBase::kSegmentUnitSize}, + CardTableParam{AlignedHeapSegmentBase::kSegmentUnitSize * 8}, + CardTableParam{AlignedHeapSegmentBase::kSegmentUnitSize * 128})); + } // namespace #endif // HERMESVM_GC_MALLOC