Skip to content

[scudo] Add EnableContiguousRegions mode #85149

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 4 commits into from
Apr 9, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 6 additions & 1 deletion compiler-rt/lib/scudo/standalone/allocator_config.def
Original file line number Diff line number Diff line change
Expand Up @@ -87,9 +87,14 @@ PRIMARY_REQUIRED(const s32, MaxReleaseToOsIntervalMs)
// PRIMARY_OPTIONAL(TYPE, NAME, DEFAULT)
//
// Indicates support for offsetting the start of a region by a random number of
// pages. Only used with primary64.
// pages. This is only used if `EnableContiguousRegions` is enabled.
PRIMARY_OPTIONAL(const bool, EnableRandomOffset, false)

// When `EnableContiguousRegions` is true, all regions will be be arranged in
// adjacency. This will reduce the fragmentation caused by region allocations
// but may require a huge amount of contiguous pages at initialization.
PRIMARY_OPTIONAL(const bool, EnableContiguousRegions, true)

// PRIMARY_OPTIONAL_TYPE(NAME, DEFAULT)
//
// Use condition variable to shorten the waiting time of refillment of
Expand Down
117 changes: 76 additions & 41 deletions compiler-rt/lib/scudo/standalone/primary64.h
Original file line number Diff line number Diff line change
Expand Up @@ -117,40 +117,30 @@ template <typename Config> class SizeClassAllocator64 {
SmallerBlockReleasePageDelta =
PagesInGroup * (1 + MinSizeClass / 16U) / 100;

// Reserve the space required for the Primary.
CHECK(ReservedMemory.create(/*Addr=*/0U, PrimarySize,
"scudo:primary_reserve"));
PrimaryBase = ReservedMemory.getBase();
DCHECK_NE(PrimaryBase, 0U);

u32 Seed;
const u64 Time = getMonotonicTimeFast();
if (!getRandom(reinterpret_cast<void *>(&Seed), sizeof(Seed)))
Seed = static_cast<u32>(Time ^ (PrimaryBase >> 12));
Seed = static_cast<u32>(Time ^ (reinterpret_cast<uptr>(&Seed) >> 12));

for (uptr I = 0; I < NumClasses; I++) {
RegionInfo *Region = getRegionInfo(I);
for (uptr I = 0; I < NumClasses; I++)
getRegionInfo(I)->RandState = getRandomU32(&Seed);

// The actual start of a region is offset by a random number of pages
// when PrimaryEnableRandomOffset is set.
Region->RegionBeg = (PrimaryBase + (I << RegionSizeLog)) +
(Config::getEnableRandomOffset()
? ((getRandomModN(&Seed, 16) + 1) * PageSize)
: 0);
Region->RandState = getRandomU32(&Seed);
// Releasing small blocks is expensive, set a higher threshold to avoid
// frequent page releases.
if (isSmallBlock(getSizeByClassId(I)))
Region->TryReleaseThreshold = PageSize * SmallerBlockReleasePageDelta;
else
Region->TryReleaseThreshold = PageSize;
Region->ReleaseInfo.LastReleaseAtNs = Time;
if (Config::getEnableContiguousRegions()) {
ReservedMemoryT ReservedMemory = {};
// Reserve the space required for the Primary.
CHECK(ReservedMemory.create(/*Addr=*/0U, RegionSize * NumClasses,
"scudo:primary_reserve"));
const uptr PrimaryBase = ReservedMemory.getBase();

for (uptr I = 0; I < NumClasses; I++) {
MemMapT RegionMemMap = ReservedMemory.dispatch(
PrimaryBase + (I << RegionSizeLog), RegionSize);
RegionInfo *Region = getRegionInfo(I);

Region->MemMapInfo.MemMap = ReservedMemory.dispatch(
PrimaryBase + (I << RegionSizeLog), RegionSize);
CHECK(Region->MemMapInfo.MemMap.isAllocated());
initRegion(Region, I, RegionMemMap, Config::getEnableRandomOffset());
}
shuffle(RegionInfoArray, NumClasses, &Seed);
}
shuffle(RegionInfoArray, NumClasses, &Seed);

// The binding should be done after region shuffling so that it won't bind
// the FLLock from the wrong region.
Expand All @@ -160,14 +150,17 @@ template <typename Config> class SizeClassAllocator64 {
setOption(Option::ReleaseInterval, static_cast<sptr>(ReleaseToOsInterval));
}

void unmapTestOnly() NO_THREAD_SAFETY_ANALYSIS {
void unmapTestOnly() {
for (uptr I = 0; I < NumClasses; I++) {
RegionInfo *Region = getRegionInfo(I);
{
ScopedLock ML(Region->MMLock);
MemMapT MemMap = Region->MemMapInfo.MemMap;
if (MemMap.isAllocated())
MemMap.unmap(MemMap.getBase(), MemMap.getCapacity());
}
*Region = {};
}
if (PrimaryBase)
ReservedMemory.release();
PrimaryBase = 0U;
}

// When all blocks are freed, it has to be the same size as `AllocatedUser`.
Expand Down Expand Up @@ -251,9 +244,10 @@ template <typename Config> class SizeClassAllocator64 {
}

const bool RegionIsExhausted = Region->Exhausted;
if (!RegionIsExhausted)
if (!RegionIsExhausted) {
PopCount = populateFreeListAndPopBlocks(C, ClassId, Region, ToArray,
MaxBlockCount);
}
ReportRegionExhausted = !RegionIsExhausted && Region->Exhausted;
break;
}
Expand Down Expand Up @@ -513,7 +507,6 @@ template <typename Config> class SizeClassAllocator64 {
private:
static const uptr RegionSize = 1UL << RegionSizeLog;
static const uptr NumClasses = SizeClassMap::NumClasses;
static const uptr PrimarySize = RegionSize * NumClasses;

static const uptr MapSizeIncrement = Config::getMapSizeIncrement();
// Fill at most this number of batches from the newly map'd memory.
Expand Down Expand Up @@ -569,9 +562,14 @@ template <typename Config> class SizeClassAllocator64 {
}

uptr getRegionBaseByClassId(uptr ClassId) {
return roundDown(getRegionInfo(ClassId)->RegionBeg - PrimaryBase,
RegionSize) +
PrimaryBase;
RegionInfo *Region = getRegionInfo(ClassId);
Region->MMLock.assertHeld();

if (!Config::getEnableContiguousRegions() &&
!Region->MemMapInfo.MemMap.isAllocated()) {
return 0U;
}
return Region->MemMapInfo.MemMap.getBase();
}

static CompactPtrT compactPtrInternal(uptr Base, uptr Ptr) {
Expand Down Expand Up @@ -601,6 +599,30 @@ template <typename Config> class SizeClassAllocator64 {
return BlockSize > PageSize;
}

ALWAYS_INLINE void initRegion(RegionInfo *Region, uptr ClassId,
MemMapT MemMap, bool EnableRandomOffset)
REQUIRES(Region->MMLock) {
DCHECK(!Region->MemMapInfo.MemMap.isAllocated());
DCHECK(MemMap.isAllocated());

const uptr PageSize = getPageSizeCached();

Region->MemMapInfo.MemMap = MemMap;

Region->RegionBeg = MemMap.getBase();
if (EnableRandomOffset) {
Region->RegionBeg +=
(getRandomModN(&Region->RandState, 16) + 1) * PageSize;
}

// Releasing small blocks is expensive, set a higher threshold to avoid
// frequent page releases.
if (isSmallBlock(getSizeByClassId(ClassId)))
Region->TryReleaseThreshold = PageSize * SmallerBlockReleasePageDelta;
else
Region->TryReleaseThreshold = PageSize;
}

void pushBatchClassBlocks(RegionInfo *Region, CompactPtrT *Array, u32 Size)
REQUIRES(Region->FLLock) {
DCHECK_EQ(Region, getRegionInfo(SizeClassMap::BatchClassId));
Expand Down Expand Up @@ -988,9 +1010,26 @@ template <typename Config> class SizeClassAllocator64 {
CompactPtrT *ToArray,
const u16 MaxBlockCount)
REQUIRES(Region->MMLock) EXCLUDES(Region->FLLock) {
if (!Config::getEnableContiguousRegions() &&
!Region->MemMapInfo.MemMap.isAllocated()) {
ReservedMemoryT ReservedMemory;
if (UNLIKELY(!ReservedMemory.create(/*Addr=*/0U, RegionSize,
"scudo:primary_reserve",
MAP_ALLOWNOMEM))) {
Printf("Can't reserve pages for size class %zu.\n",
getSizeByClassId(ClassId));
Region->Exhausted = true;
return 0U;
}
initRegion(Region, ClassId,
ReservedMemory.dispatch(ReservedMemory.getBase(),
ReservedMemory.getCapacity()),
/*EnableRandomOffset=*/false);
}

DCHECK(Region->MemMapInfo.MemMap.isAllocated());
const uptr Size = getSizeByClassId(ClassId);
const u16 MaxCount = CacheT::getMaxCached(Size);

const uptr RegionBeg = Region->RegionBeg;
const uptr MappedUser = Region->MemMapInfo.MappedUser;
const uptr TotalUserBytes =
Expand Down Expand Up @@ -1682,10 +1721,6 @@ template <typename Config> class SizeClassAllocator64 {
Region->FLLockCV.notifyAll(Region->FLLock);
}

// TODO: `PrimaryBase` can be obtained from ReservedMemory. This needs to be
// deprecated.
uptr PrimaryBase = 0;
ReservedMemoryT ReservedMemory = {};
// The minimum size of pushed blocks that we will try to release the pages in
// that size class.
uptr SmallerBlockReleasePageDelta = 0;
Expand Down
1 change: 1 addition & 0 deletions compiler-rt/lib/scudo/standalone/tests/primary_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -90,6 +90,7 @@ template <typename SizeClassMapT> struct TestConfig3 {
static const scudo::s32 MaxReleaseToOsIntervalMs = INT32_MAX;
typedef scudo::uptr CompactPtrT;
static const scudo::uptr CompactPtrScale = 0;
static const bool EnableContiguousRegions = false;
static const bool EnableRandomOffset = true;
static const scudo::uptr MapSizeIncrement = 1UL << 18;
};
Expand Down