@@ -180,6 +180,14 @@ template <typename T> class NonZeroLengthArray<T, 0> {
180
180
181
181
template <typename Config> class MapAllocatorCache {
182
182
public:
183
+ typedef enum { COMMITTED = 0 , DECOMMITTED = 1 , NONE } EntryListT;
184
+
185
+ // TODO: Refactor the intrusive list to support non-pointer link type
186
+ typedef struct {
187
+ u16 Head;
188
+ u16 Tail;
189
+ } ListInfo;
190
+
183
191
void getStats (ScopedString *Str) {
184
192
ScopedLock L (Mutex);
185
193
uptr Integral;
@@ -197,13 +205,18 @@ template <typename Config> class MapAllocatorCache {
197
205
SuccessfulRetrieves, CallsToRetrieve, Integral, Fractional);
198
206
Str->append (" Cache Entry Info (Most Recent -> Least Recent):\n " );
199
207
200
- for (u32 I = LRUHead; I != CachedBlock::InvalidEntry; I = Entries[I].Next ) {
201
- CachedBlock &Entry = Entries[I];
202
- Str->append (" StartBlockAddress: 0x%zx, EndBlockAddress: 0x%zx, "
203
- " BlockSize: %zu %s\n " ,
204
- Entry.CommitBase , Entry.CommitBase + Entry.CommitSize ,
205
- Entry.CommitSize , Entry.Time == 0 ? " [R]" : " " );
206
- }
208
+ auto printList = [&](EntryListT ListType) REQUIRES (Mutex) {
209
+ for (u32 I = EntryLists[ListType].Head ; I != CachedBlock::InvalidEntry;
210
+ I = Entries[I].Next ) {
211
+ CachedBlock &Entry = Entries[I];
212
+ Str->append (" StartBlockAddress: 0x%zx, EndBlockAddress: 0x%zx, "
213
+ " BlockSize: %zu %s\n " ,
214
+ Entry.CommitBase , Entry.CommitBase + Entry.CommitSize ,
215
+ Entry.CommitSize , Entry.Time == 0 ? " [R]" : " " );
216
+ }
217
+ };
218
+ printList (COMMITTED);
219
+ printList (DECOMMITTED);
207
220
}
208
221
209
222
// Ensure the default maximum specified fits the array.
@@ -227,8 +240,10 @@ template <typename Config> class MapAllocatorCache {
227
240
setOption (Option::ReleaseInterval, static_cast <sptr>(ReleaseToOsInterval));
228
241
229
242
// The cache is initially empty
230
- LRUHead = CachedBlock::InvalidEntry;
231
- LRUTail = CachedBlock::InvalidEntry;
243
+ EntryLists[COMMITTED].Head = CachedBlock::InvalidEntry;
244
+ EntryLists[COMMITTED].Tail = CachedBlock::InvalidEntry;
245
+ EntryLists[DECOMMITTED].Head = CachedBlock::InvalidEntry;
246
+ EntryLists[DECOMMITTED].Tail = CachedBlock::InvalidEntry;
232
247
233
248
// Available entries will be retrieved starting from the beginning of the
234
249
// Entries array
@@ -310,15 +325,19 @@ template <typename Config> class MapAllocatorCache {
310
325
// All excess entries are evicted from the cache
311
326
while (needToEvict ()) {
312
327
// Save MemMaps of evicted entries to perform unmap outside of lock
313
- EvictionMemMaps.push_back (Entries[LRUTail].MemMap );
314
- remove (LRUTail);
328
+ EntryListT EvictionListType;
329
+ if (EntryLists[DECOMMITTED].Tail == CachedBlock::InvalidEntry)
330
+ EvictionListType = COMMITTED;
331
+ else
332
+ EvictionListType = DECOMMITTED;
333
+ remove (EntryLists[EvictionListType].Tail , EvictionListType);
315
334
}
316
335
317
- insert (Entry);
336
+ insert (Entry, (Entry. Time == 0 ) ? DECOMMITTED : COMMITTED );
318
337
319
338
if (OldestTime == 0 )
320
339
OldestTime = Entry.Time ;
321
- } while (0 );
340
+ } while (0 ); // ScopedLock L(Mutex);
322
341
323
342
for (MemMapT &EvictMemMap : EvictionMemMaps)
324
343
EvictMemMap.unmap (EvictMemMap.getBase (), EvictMemMap.getCapacity ());
@@ -335,56 +354,63 @@ template <typename Config> class MapAllocatorCache {
335
354
// 10% of the requested size proved to be the optimal choice for
336
355
// retrieving cached blocks after testing several options.
337
356
constexpr u32 FragmentedBytesDivisor = 10 ;
338
- bool Found = false ;
339
357
CachedBlock Entry;
340
358
uptr EntryHeaderPos = 0 ;
359
+ uptr OptimalFitIndex = CachedBlock::InvalidEntry;
341
360
{
342
361
ScopedLock L (Mutex);
343
362
CallsToRetrieve++;
344
363
if (EntriesCount == 0 )
345
364
return false ;
346
- u32 OptimalFitIndex = 0 ;
347
365
uptr MinDiff = UINTPTR_MAX;
348
- for (u32 I = LRUHead; I != CachedBlock::InvalidEntry;
349
- I = Entries[I].Next ) {
350
- const uptr CommitBase = Entries[I].CommitBase ;
351
- const uptr CommitSize = Entries[I].CommitSize ;
352
- const uptr AllocPos =
353
- roundDown (CommitBase + CommitSize - Size , Alignment);
354
- const uptr HeaderPos = AllocPos - HeadersSize;
355
- if (HeaderPos > CommitBase + CommitSize)
356
- continue ;
357
- if (HeaderPos < CommitBase ||
358
- AllocPos > CommitBase + PageSize * MaxUnusedCachePages) {
359
- continue ;
360
- }
361
- Found = true ;
362
- const uptr Diff = HeaderPos - CommitBase;
363
- // immediately use a cached block if it's size is close enough to the
364
- // requested size.
365
- const uptr MaxAllowedFragmentedBytes =
366
- (CommitBase + CommitSize - HeaderPos) / FragmentedBytesDivisor;
367
- if (Diff <= MaxAllowedFragmentedBytes) {
366
+ EntryListT OptimalFitListType = NONE;
367
+ auto FindAvailableEntry = [&](EntryListT ListType) REQUIRES (Mutex) {
368
+ for (uptr I = EntryLists[ListType].Head ; I != CachedBlock::InvalidEntry;
369
+ I = Entries[I].Next ) {
370
+ const uptr CommitBase = Entries[I].CommitBase ;
371
+ const uptr CommitSize = Entries[I].CommitSize ;
372
+ const uptr AllocPos =
373
+ roundDown (CommitBase + CommitSize - Size , Alignment);
374
+ const uptr HeaderPos = AllocPos - HeadersSize;
375
+ if (HeaderPos > CommitBase + CommitSize)
376
+ continue ;
377
+ if (HeaderPos < CommitBase ||
378
+ AllocPos > CommitBase + PageSize * MaxUnusedCachePages)
379
+ continue ;
380
+
381
+ const uptr Diff = HeaderPos - CommitBase;
382
+ // immediately use a cached block if it's size is close enough to
383
+ // the requested size.
384
+ const uptr MaxAllowedFragmentedBytes =
385
+ (CommitBase + CommitSize - HeaderPos) / FragmentedBytesDivisor;
386
+ if (Diff <= MaxAllowedFragmentedBytes) {
387
+ OptimalFitIndex = I;
388
+ EntryHeaderPos = HeaderPos;
389
+ OptimalFitListType = ListType;
390
+ return true ;
391
+ }
392
+
393
+ // keep track of the smallest cached block
394
+ // that is greater than (AllocSize + HeaderSize)
395
+ if (Diff > MinDiff)
396
+ continue ;
368
397
OptimalFitIndex = I;
398
+ MinDiff = Diff;
399
+ OptimalFitListType = ListType;
369
400
EntryHeaderPos = HeaderPos;
370
- break ;
371
401
}
372
- // keep track of the smallest cached block
373
- // that is greater than (AllocSize + HeaderSize)
374
- if (Diff > MinDiff)
375
- continue ;
376
- OptimalFitIndex = I;
377
- MinDiff = Diff;
378
- EntryHeaderPos = HeaderPos;
379
- }
380
- if (Found) {
381
- Entry = Entries[OptimalFitIndex];
382
- remove (OptimalFitIndex);
383
- SuccessfulRetrieves++;
384
- }
385
- }
386
- if (!Found)
387
- return false ;
402
+ return (OptimalFitIndex != CachedBlock::InvalidEntry);
403
+ };
404
+
405
+ // Prioritize valid fit from COMMITTED entries over
406
+ // optimal fit from DECOMMITTED entries
407
+ if (!FindAvailableEntry (COMMITTED) && !FindAvailableEntry (DECOMMITTED))
408
+ return false ;
409
+
410
+ Entry = Entries[OptimalFitIndex];
411
+ remove (OptimalFitIndex, OptimalFitListType);
412
+ SuccessfulRetrieves++;
413
+ } // ScopedLock L(Mutex);
388
414
389
415
*H = reinterpret_cast <LargeBlock::Header *>(
390
416
LargeBlock::addHeaderTag<Config>(EntryHeaderPos));
@@ -448,10 +474,15 @@ template <typename Config> class MapAllocatorCache {
448
474
Quarantine[I].invalidate ();
449
475
}
450
476
}
451
- for (u32 I = LRUHead; I != CachedBlock::InvalidEntry; I = Entries[I].Next ) {
452
- Entries[I].MemMap .setMemoryPermission (Entries[I].CommitBase ,
453
- Entries[I].CommitSize , 0 );
454
- }
477
+ auto disableLists = [&](EntryListT EntryList) REQUIRES (Mutex) {
478
+ for (u32 I = EntryLists[EntryList].Head ; I != CachedBlock::InvalidEntry;
479
+ I = Entries[I].Next ) {
480
+ Entries[I].MemMap .setMemoryPermission (Entries[I].CommitBase ,
481
+ Entries[I].CommitSize , 0 );
482
+ }
483
+ };
484
+ disableLists (COMMITTED);
485
+ disableLists (DECOMMITTED);
455
486
QuarantinePos = -1U ;
456
487
}
457
488
@@ -466,7 +497,7 @@ template <typename Config> class MapAllocatorCache {
466
497
return (EntriesCount >= atomic_load_relaxed (&MaxEntriesCount));
467
498
}
468
499
469
- void insert (const CachedBlock &Entry) REQUIRES(Mutex) {
500
+ void insert (const CachedBlock &Entry, EntryListT ListType ) REQUIRES(Mutex) {
470
501
DCHECK_LT (EntriesCount, atomic_load_relaxed (&MaxEntriesCount));
471
502
472
503
// Cache should be populated with valid entries when not empty
@@ -475,66 +506,86 @@ template <typename Config> class MapAllocatorCache {
475
506
u32 FreeIndex = AvailableHead;
476
507
AvailableHead = Entries[AvailableHead].Next ;
477
508
478
- if (EntriesCount == 0 ) {
479
- LRUTail = static_cast <u16>(FreeIndex);
480
- } else {
481
- // Check list order
482
- if (EntriesCount > 1 )
483
- DCHECK_GE (Entries[LRUHead].Time , Entries[Entries[LRUHead].Next ].Time );
484
- Entries[LRUHead].Prev = static_cast <u16>(FreeIndex);
485
- }
486
-
487
509
Entries[FreeIndex] = Entry;
488
- Entries[FreeIndex].Next = LRUHead;
489
- Entries[FreeIndex].Prev = CachedBlock::InvalidEntry;
490
- LRUHead = static_cast <u16>(FreeIndex);
510
+ pushFront (FreeIndex, ListType);
491
511
EntriesCount++;
492
512
513
+ if (Entries[EntryLists[ListType].Head ].Next != CachedBlock::InvalidEntry) {
514
+ DCHECK_GE (Entries[EntryLists[ListType].Head ].Time ,
515
+ Entries[Entries[EntryLists[ListType].Head ].Next ].Time );
516
+ }
493
517
// Availability stack should not have available entries when all entries
494
518
// are in use
495
519
if (EntriesCount == Config::getEntriesArraySize ())
496
520
DCHECK_EQ (AvailableHead, CachedBlock::InvalidEntry);
497
521
}
498
522
499
- void remove (uptr I) REQUIRES(Mutex) {
500
- DCHECK (Entries[I].isValid ());
501
-
502
- Entries[I].invalidate ();
503
-
504
- if (I == LRUHead)
505
- LRUHead = Entries[I].Next ;
523
+ // Joins the entries adjacent to Entries[I], effectively
524
+ // unlinking Entries[I] from the list
525
+ void unlink (uptr I, EntryListT ListType) REQUIRES(Mutex) {
526
+ if (I == EntryLists[ListType].Head )
527
+ EntryLists[ListType].Head = Entries[I].Next ;
506
528
else
507
529
Entries[Entries[I].Prev ].Next = Entries[I].Next ;
508
530
509
- if (I == LRUTail )
510
- LRUTail = Entries[I].Prev ;
531
+ if (I == EntryLists[ListType]. Tail )
532
+ EntryLists[ListType]. Tail = Entries[I].Prev ;
511
533
else
512
534
Entries[Entries[I].Next ].Prev = Entries[I].Prev ;
535
+ }
513
536
537
+ // Invalidates Entries[I], removes Entries[I] from list, and pushes
538
+ // Entries[I] onto the stack of available entries
539
+ void remove (uptr I, EntryListT ListType) REQUIRES(Mutex) {
540
+ DCHECK (Entries[I].isValid ());
541
+
542
+ Entries[I].invalidate ();
543
+
544
+ unlink (I, ListType);
514
545
Entries[I].Next = AvailableHead;
515
546
AvailableHead = static_cast <u16>(I);
516
547
EntriesCount--;
517
548
518
549
// Cache should not have valid entries when not empty
519
550
if (EntriesCount == 0 ) {
520
- DCHECK_EQ (LRUHead, CachedBlock::InvalidEntry);
521
- DCHECK_EQ (LRUTail, CachedBlock::InvalidEntry);
551
+ DCHECK_EQ (EntryLists[COMMITTED].Head , CachedBlock::InvalidEntry);
552
+ DCHECK_EQ (EntryLists[COMMITTED].Tail , CachedBlock::InvalidEntry);
553
+ DCHECK_EQ (EntryLists[DECOMMITTED].Head , CachedBlock::InvalidEntry);
554
+ DCHECK_EQ (EntryLists[DECOMMITTED].Tail , CachedBlock::InvalidEntry);
522
555
}
523
556
}
524
557
558
+ inline void pushFront (uptr I, EntryListT ListType) REQUIRES(Mutex) {
559
+ if (EntryLists[ListType].Tail == CachedBlock::InvalidEntry)
560
+ EntryLists[ListType].Tail = static_cast <u16>(I);
561
+ else
562
+ Entries[EntryLists[ListType].Head ].Prev = static_cast <u16>(I);
563
+
564
+ Entries[I].Next = EntryLists[ListType].Head ;
565
+ Entries[I].Prev = CachedBlock::InvalidEntry;
566
+ EntryLists[ListType].Head = static_cast <u16>(I);
567
+ }
568
+
525
569
void empty () {
526
570
MemMapT MapInfo[Config::getEntriesArraySize ()];
527
571
uptr N = 0 ;
528
572
{
529
573
ScopedLock L (Mutex);
530
- for (uptr I = 0 ; I < Config::getEntriesArraySize (); I++) {
531
- if (!Entries[I].isValid ())
532
- continue ;
533
- MapInfo[N] = Entries[I].MemMap ;
534
- remove (I);
535
- N++;
536
- }
574
+ auto emptyList = [&](EntryListT ListType) REQUIRES (Mutex) {
575
+ for (uptr I = EntryLists[ListType].Head ;
576
+ I != CachedBlock::InvalidEntry;) {
577
+ uptr ToRemove = I;
578
+ I = Entries[I].Next ;
579
+ MapInfo[N] = Entries[ToRemove].MemMap ;
580
+ remove (ToRemove, ListType);
581
+ N++;
582
+ }
583
+ };
584
+ emptyList (COMMITTED);
585
+ emptyList (DECOMMITTED);
537
586
EntriesCount = 0 ;
587
+ for (uptr I = 0 ; I < Config::getEntriesArraySize (); I++)
588
+ DCHECK (!Entries[I].isValid ());
538
589
}
539
590
for (uptr I = 0 ; I < N; I++) {
540
591
MemMapT &MemMap = MapInfo[I];
@@ -561,8 +612,13 @@ template <typename Config> class MapAllocatorCache {
561
612
OldestTime = 0 ;
562
613
for (uptr I = 0 ; I < Config::getQuarantineSize (); I++)
563
614
releaseIfOlderThan (Quarantine[I], Time);
564
- for (uptr I = 0 ; I < Config::getEntriesArraySize (); I++)
615
+ for (uptr I = 0 ; I < Config::getEntriesArraySize (); I++) {
616
+ if (Entries[I].isValid () && Entries[I].Time && Entries[I].Time <= Time) {
617
+ unlink (I, COMMITTED);
618
+ pushFront (I, DECOMMITTED);
619
+ }
565
620
releaseIfOlderThan (Entries[I], Time);
621
+ }
566
622
}
567
623
568
624
HybridMutex Mutex;
@@ -579,10 +635,12 @@ template <typename Config> class MapAllocatorCache {
579
635
NonZeroLengthArray<CachedBlock, Config::getQuarantineSize()>
580
636
Quarantine GUARDED_BY (Mutex) = {};
581
637
582
- // The LRUHead of the cache is the most recently used cache entry
583
- u16 LRUHead GUARDED_BY (Mutex) = 0;
584
- // The LRUTail of the cache is the least recently used cache entry
585
- u16 LRUTail GUARDED_BY (Mutex) = 0;
638
+ // EntryLists stores the head and tail indices of all
639
+ // lists being used to store valid cache entries.
640
+ // Currently there are lists storing COMMITTED and DECOMMITTED entries.
641
+ // COMMITTED entries have memory chunks that have not been released to the OS
642
+ // DECOMMITTED entries have memory chunks that have been released to the OS
643
+ ListInfo EntryLists[2 ] GUARDED_BY(Mutex) = {};
586
644
// The AvailableHead is the top of the stack of available entries
587
645
u16 AvailableHead GUARDED_BY (Mutex) = 0;
588
646
};
0 commit comments