@@ -180,6 +180,14 @@ template <typename T> class NonZeroLengthArray<T, 0> {
180180
181181template <typename Config> class MapAllocatorCache {
182182public:
183+ typedef enum { COMMITTED = 0 , DECOMMITTED = 1 , NONE } EntryListT;
184+
185+ // TODO: Refactor the intrusive list to support non-pointer link type
186+ typedef struct {
187+ u16 Head;
188+ u16 Tail;
189+ } ListInfo;
190+
183191 void getStats (ScopedString *Str) {
184192 ScopedLock L (Mutex);
185193 uptr Integral;
@@ -197,13 +205,18 @@ template <typename Config> class MapAllocatorCache {
197205 SuccessfulRetrieves, CallsToRetrieve, Integral, Fractional);
198206 Str->append (" Cache Entry Info (Most Recent -> Least Recent):\n " );
199207
200- for (u32 I = LRUHead; I != CachedBlock::InvalidEntry; I = Entries[I].Next ) {
201- CachedBlock &Entry = Entries[I];
202- Str->append (" StartBlockAddress: 0x%zx, EndBlockAddress: 0x%zx, "
203- " BlockSize: %zu %s\n " ,
204- Entry.CommitBase , Entry.CommitBase + Entry.CommitSize ,
205- Entry.CommitSize , Entry.Time == 0 ? " [R]" : " " );
206- }
208+ auto printList = [&](EntryListT ListType) REQUIRES (Mutex) {
209+ for (u32 I = EntryLists[ListType].Head ; I != CachedBlock::InvalidEntry;
210+ I = Entries[I].Next ) {
211+ CachedBlock &Entry = Entries[I];
212+ Str->append (" StartBlockAddress: 0x%zx, EndBlockAddress: 0x%zx, "
213+ " BlockSize: %zu %s\n " ,
214+ Entry.CommitBase , Entry.CommitBase + Entry.CommitSize ,
215+ Entry.CommitSize , Entry.Time == 0 ? " [R]" : " " );
216+ }
217+ };
218+ printList (COMMITTED);
219+ printList (DECOMMITTED);
207220 }
208221
209222 // Ensure the default maximum specified fits the array.
@@ -227,8 +240,10 @@ template <typename Config> class MapAllocatorCache {
227240 setOption (Option::ReleaseInterval, static_cast <sptr>(ReleaseToOsInterval));
228241
229242 // The cache is initially empty
230- LRUHead = CachedBlock::InvalidEntry;
231- LRUTail = CachedBlock::InvalidEntry;
243+ EntryLists[COMMITTED].Head = CachedBlock::InvalidEntry;
244+ EntryLists[COMMITTED].Tail = CachedBlock::InvalidEntry;
245+ EntryLists[DECOMMITTED].Head = CachedBlock::InvalidEntry;
246+ EntryLists[DECOMMITTED].Tail = CachedBlock::InvalidEntry;
232247
233248 // Available entries will be retrieved starting from the beginning of the
234249 // Entries array
@@ -310,15 +325,19 @@ template <typename Config> class MapAllocatorCache {
310325 // All excess entries are evicted from the cache
311326 while (needToEvict ()) {
312327 // Save MemMaps of evicted entries to perform unmap outside of lock
313- EvictionMemMaps.push_back (Entries[LRUTail].MemMap );
314- remove (LRUTail);
328+ EntryListT EvictionListType;
329+ if (EntryLists[DECOMMITTED].Tail == CachedBlock::InvalidEntry)
330+ EvictionListType = COMMITTED;
331+ else
332+ EvictionListType = DECOMMITTED;
333+ remove (EntryLists[EvictionListType].Tail , EvictionListType);
315334 }
316335
317- insert (Entry);
336+ insert (Entry, (Entry. Time == 0 ) ? DECOMMITTED : COMMITTED );
318337
319338 if (OldestTime == 0 )
320339 OldestTime = Entry.Time ;
321- } while (0 );
340+ } while (0 ); // ScopedLock L(Mutex);
322341
323342 for (MemMapT &EvictMemMap : EvictionMemMaps)
324343 EvictMemMap.unmap (EvictMemMap.getBase (), EvictMemMap.getCapacity ());
@@ -335,56 +354,63 @@ template <typename Config> class MapAllocatorCache {
335354 // 10% of the requested size proved to be the optimal choice for
336355 // retrieving cached blocks after testing several options.
337356 constexpr u32 FragmentedBytesDivisor = 10 ;
338- bool Found = false ;
339357 CachedBlock Entry;
340358 uptr EntryHeaderPos = 0 ;
359+ uptr OptimalFitIndex = CachedBlock::InvalidEntry;
341360 {
342361 ScopedLock L (Mutex);
343362 CallsToRetrieve++;
344363 if (EntriesCount == 0 )
345364 return false ;
346- u32 OptimalFitIndex = 0 ;
347365 uptr MinDiff = UINTPTR_MAX;
348- for (u32 I = LRUHead; I != CachedBlock::InvalidEntry;
349- I = Entries[I].Next ) {
350- const uptr CommitBase = Entries[I].CommitBase ;
351- const uptr CommitSize = Entries[I].CommitSize ;
352- const uptr AllocPos =
353- roundDown (CommitBase + CommitSize - Size, Alignment);
354- const uptr HeaderPos = AllocPos - HeadersSize;
355- if (HeaderPos > CommitBase + CommitSize)
356- continue ;
357- if (HeaderPos < CommitBase ||
358- AllocPos > CommitBase + PageSize * MaxUnusedCachePages) {
359- continue ;
360- }
361- Found = true ;
362- const uptr Diff = HeaderPos - CommitBase;
363- // immediately use a cached block if it's size is close enough to the
364- // requested size.
365- const uptr MaxAllowedFragmentedBytes =
366- (CommitBase + CommitSize - HeaderPos) / FragmentedBytesDivisor;
367- if (Diff <= MaxAllowedFragmentedBytes) {
366+ EntryListT OptimalFitListType = NONE;
367+ auto FindAvailableEntry = [&](EntryListT ListType) REQUIRES (Mutex) {
368+ for (uptr I = EntryLists[ListType].Head ; I != CachedBlock::InvalidEntry;
369+ I = Entries[I].Next ) {
370+ const uptr CommitBase = Entries[I].CommitBase ;
371+ const uptr CommitSize = Entries[I].CommitSize ;
372+ const uptr AllocPos =
373+ roundDown (CommitBase + CommitSize - Size, Alignment);
374+ const uptr HeaderPos = AllocPos - HeadersSize;
375+ if (HeaderPos > CommitBase + CommitSize)
376+ continue ;
377+ if (HeaderPos < CommitBase ||
378+ AllocPos > CommitBase + PageSize * MaxUnusedCachePages)
379+ continue ;
380+
381+ const uptr Diff = HeaderPos - CommitBase;
382+ // immediately use a cached block if it's size is close enough to
383+ // the requested size.
384+ const uptr MaxAllowedFragmentedBytes =
385+ (CommitBase + CommitSize - HeaderPos) / FragmentedBytesDivisor;
386+ if (Diff <= MaxAllowedFragmentedBytes) {
387+ OptimalFitIndex = I;
388+ EntryHeaderPos = HeaderPos;
389+ OptimalFitListType = ListType;
390+ return true ;
391+ }
392+
393+ // keep track of the smallest cached block
394+ // that is greater than (AllocSize + HeaderSize)
395+ if (Diff > MinDiff)
396+ continue ;
368397 OptimalFitIndex = I;
398+ MinDiff = Diff;
399+ OptimalFitListType = ListType;
369400 EntryHeaderPos = HeaderPos;
370- break ;
371401 }
372- // keep track of the smallest cached block
373- // that is greater than (AllocSize + HeaderSize)
374- if (Diff > MinDiff)
375- continue ;
376- OptimalFitIndex = I;
377- MinDiff = Diff;
378- EntryHeaderPos = HeaderPos;
379- }
380- if (Found) {
381- Entry = Entries[OptimalFitIndex];
382- remove (OptimalFitIndex);
383- SuccessfulRetrieves++;
384- }
385- }
386- if (!Found)
387- return false ;
402+ return (OptimalFitIndex != CachedBlock::InvalidEntry);
403+ };
404+
405+ // Prioritize valid fit from COMMITTED entries over
406+ // optimal fit from DECOMMITTED entries
407+ if (!FindAvailableEntry (COMMITTED) && !FindAvailableEntry (DECOMMITTED))
408+ return false ;
409+
410+ Entry = Entries[OptimalFitIndex];
411+ remove (OptimalFitIndex, OptimalFitListType);
412+ SuccessfulRetrieves++;
413+ } // ScopedLock L(Mutex);
388414
389415 *H = reinterpret_cast <LargeBlock::Header *>(
390416 LargeBlock::addHeaderTag<Config>(EntryHeaderPos));
@@ -448,10 +474,15 @@ template <typename Config> class MapAllocatorCache {
448474 Quarantine[I].invalidate ();
449475 }
450476 }
451- for (u32 I = LRUHead; I != CachedBlock::InvalidEntry; I = Entries[I].Next ) {
452- Entries[I].MemMap .setMemoryPermission (Entries[I].CommitBase ,
453- Entries[I].CommitSize , 0 );
454- }
477+ auto disableLists = [&](EntryListT EntryList) REQUIRES (Mutex) {
478+ for (u32 I = EntryLists[EntryList].Head ; I != CachedBlock::InvalidEntry;
479+ I = Entries[I].Next ) {
480+ Entries[I].MemMap .setMemoryPermission (Entries[I].CommitBase ,
481+ Entries[I].CommitSize , 0 );
482+ }
483+ };
484+ disableLists (COMMITTED);
485+ disableLists (DECOMMITTED);
455486 QuarantinePos = -1U ;
456487 }
457488
@@ -466,7 +497,7 @@ template <typename Config> class MapAllocatorCache {
466497 return (EntriesCount >= atomic_load_relaxed (&MaxEntriesCount));
467498 }
468499
469- void insert (const CachedBlock &Entry) REQUIRES(Mutex) {
500+ void insert (const CachedBlock &Entry, EntryListT ListType ) REQUIRES(Mutex) {
470501 DCHECK_LT (EntriesCount, atomic_load_relaxed (&MaxEntriesCount));
471502
472503 // Cache should be populated with valid entries when not empty
@@ -475,66 +506,86 @@ template <typename Config> class MapAllocatorCache {
475506 u32 FreeIndex = AvailableHead;
476507 AvailableHead = Entries[AvailableHead].Next ;
477508
478- if (EntriesCount == 0 ) {
479- LRUTail = static_cast <u16 >(FreeIndex);
480- } else {
481- // Check list order
482- if (EntriesCount > 1 )
483- DCHECK_GE (Entries[LRUHead].Time , Entries[Entries[LRUHead].Next ].Time );
484- Entries[LRUHead].Prev = static_cast <u16 >(FreeIndex);
485- }
486-
487509 Entries[FreeIndex] = Entry;
488- Entries[FreeIndex].Next = LRUHead;
489- Entries[FreeIndex].Prev = CachedBlock::InvalidEntry;
490- LRUHead = static_cast <u16 >(FreeIndex);
510+ pushFront (FreeIndex, ListType);
491511 EntriesCount++;
492512
513+ if (Entries[EntryLists[ListType].Head ].Next != CachedBlock::InvalidEntry) {
514+ DCHECK_GE (Entries[EntryLists[ListType].Head ].Time ,
515+ Entries[Entries[EntryLists[ListType].Head ].Next ].Time );
516+ }
493517 // Availability stack should not have available entries when all entries
494518 // are in use
495519 if (EntriesCount == Config::getEntriesArraySize ())
496520 DCHECK_EQ (AvailableHead, CachedBlock::InvalidEntry);
497521 }
498522
499- void remove (uptr I) REQUIRES(Mutex) {
500- DCHECK (Entries[I].isValid ());
501-
502- Entries[I].invalidate ();
503-
504- if (I == LRUHead)
505- LRUHead = Entries[I].Next ;
523+ // Joins the entries adjacent to Entries[I], effectively
524+ // unlinking Entries[I] from the list
525+ void unlink (uptr I, EntryListT ListType) REQUIRES(Mutex) {
526+ if (I == EntryLists[ListType].Head )
527+ EntryLists[ListType].Head = Entries[I].Next ;
506528 else
507529 Entries[Entries[I].Prev ].Next = Entries[I].Next ;
508530
509- if (I == LRUTail )
510- LRUTail = Entries[I].Prev ;
531+ if (I == EntryLists[ListType]. Tail )
532+ EntryLists[ListType]. Tail = Entries[I].Prev ;
511533 else
512534 Entries[Entries[I].Next ].Prev = Entries[I].Prev ;
535+ }
513536
537+ // Invalidates Entries[I], removes Entries[I] from list, and pushes
538+ // Entries[I] onto the stack of available entries
539+ void remove (uptr I, EntryListT ListType) REQUIRES(Mutex) {
540+ DCHECK (Entries[I].isValid ());
541+
542+ Entries[I].invalidate ();
543+
544+ unlink (I, ListType);
514545 Entries[I].Next = AvailableHead;
515546 AvailableHead = static_cast <u16 >(I);
516547 EntriesCount--;
517548
518549 // Cache should not have valid entries when not empty
519550 if (EntriesCount == 0 ) {
520- DCHECK_EQ (LRUHead, CachedBlock::InvalidEntry);
521- DCHECK_EQ (LRUTail, CachedBlock::InvalidEntry);
551+ DCHECK_EQ (EntryLists[COMMITTED].Head , CachedBlock::InvalidEntry);
552+ DCHECK_EQ (EntryLists[COMMITTED].Tail , CachedBlock::InvalidEntry);
553+ DCHECK_EQ (EntryLists[DECOMMITTED].Head , CachedBlock::InvalidEntry);
554+ DCHECK_EQ (EntryLists[DECOMMITTED].Tail , CachedBlock::InvalidEntry);
522555 }
523556 }
524557
558+ inline void pushFront (uptr I, EntryListT ListType) REQUIRES(Mutex) {
559+ if (EntryLists[ListType].Tail == CachedBlock::InvalidEntry)
560+ EntryLists[ListType].Tail = static_cast <u16 >(I);
561+ else
562+ Entries[EntryLists[ListType].Head ].Prev = static_cast <u16 >(I);
563+
564+ Entries[I].Next = EntryLists[ListType].Head ;
565+ Entries[I].Prev = CachedBlock::InvalidEntry;
566+ EntryLists[ListType].Head = static_cast <u16 >(I);
567+ }
568+
525569 void empty () {
526570 MemMapT MapInfo[Config::getEntriesArraySize ()];
527571 uptr N = 0 ;
528572 {
529573 ScopedLock L (Mutex);
530- for (uptr I = 0 ; I < Config::getEntriesArraySize (); I++) {
531- if (!Entries[I].isValid ())
532- continue ;
533- MapInfo[N] = Entries[I].MemMap ;
534- remove (I);
535- N++;
536- }
574+ auto emptyList = [&](EntryListT ListType) REQUIRES (Mutex) {
575+ for (uptr I = EntryLists[ListType].Head ;
576+ I != CachedBlock::InvalidEntry;) {
577+ uptr ToRemove = I;
578+ I = Entries[I].Next ;
579+ MapInfo[N] = Entries[ToRemove].MemMap ;
580+ remove (ToRemove, ListType);
581+ N++;
582+ }
583+ };
584+ emptyList (COMMITTED);
585+ emptyList (DECOMMITTED);
537586 EntriesCount = 0 ;
587+ for (uptr I = 0 ; I < Config::getEntriesArraySize (); I++)
588+ DCHECK (!Entries[I].isValid ());
538589 }
539590 for (uptr I = 0 ; I < N; I++) {
540591 MemMapT &MemMap = MapInfo[I];
@@ -561,8 +612,13 @@ template <typename Config> class MapAllocatorCache {
561612 OldestTime = 0 ;
562613 for (uptr I = 0 ; I < Config::getQuarantineSize (); I++)
563614 releaseIfOlderThan (Quarantine[I], Time);
564- for (uptr I = 0 ; I < Config::getEntriesArraySize (); I++)
615+ for (uptr I = 0 ; I < Config::getEntriesArraySize (); I++) {
616+ if (Entries[I].isValid () && Entries[I].Time && Entries[I].Time <= Time) {
617+ unlink (I, COMMITTED);
618+ pushFront (I, DECOMMITTED);
619+ }
565620 releaseIfOlderThan (Entries[I], Time);
621+ }
566622 }
567623
568624 HybridMutex Mutex;
@@ -579,10 +635,12 @@ template <typename Config> class MapAllocatorCache {
579635 NonZeroLengthArray<CachedBlock, Config::getQuarantineSize()>
580636 Quarantine GUARDED_BY (Mutex) = {};
581637
582- // The LRUHead of the cache is the most recently used cache entry
583- u16 LRUHead GUARDED_BY (Mutex) = 0;
584- // The LRUTail of the cache is the least recently used cache entry
585- u16 LRUTail GUARDED_BY (Mutex) = 0;
638+ // EntryLists stores the head and tail indices of all
639+ // lists being used to store valid cache entries.
640+ // Currently there are lists storing COMMITTED and DECOMMITTED entries.
641+ // COMMITTED entries have memory chunks that have not been released to the OS
642+ // DECOMMITTED entries have memory chunks that have been released to the OS
643+ ListInfo EntryLists[2 ] GUARDED_BY(Mutex) = {};
586644 // The AvailableHead is the top of the stack of available entries
587645 u16 AvailableHead GUARDED_BY (Mutex) = 0;
588646};
0 commit comments