@@ -42,6 +42,60 @@ struct SizeClassAllocator64FlagMasks { // Bit masks.
42
42
};
43
43
};
44
44
45
+ template <typename Allocator>
46
+ class MemoryMapper {
47
+ public:
48
+ typedef typename Allocator::CompactPtrT CompactPtrT;
49
+
50
+ explicit MemoryMapper (const Allocator &allocator) : allocator_(allocator) {}
51
+
52
+ ~MemoryMapper () {
53
+ if (buffer_)
54
+ UnmapOrDie (buffer_, buffer_size_);
55
+ }
56
+
57
+ bool GetAndResetStats (uptr &ranges, uptr &bytes) {
58
+ ranges = released_ranges_count_;
59
+ released_ranges_count_ = 0 ;
60
+ bytes = released_bytes_;
61
+ released_bytes_ = 0 ;
62
+ return ranges != 0 ;
63
+ }
64
+
65
+ void *MapPackedCounterArrayBuffer (uptr buffer_size) {
66
+ // TODO(alekseyshl): The idea to explore is to check if we have enough
67
+ // space between num_freed_chunks*sizeof(CompactPtrT) and
68
+ // mapped_free_array to fit buffer_size bytes and use that space instead
69
+ // of mapping a temporary one.
70
+ if (buffer_size_ < buffer_size) {
71
+ if (buffer_)
72
+ UnmapOrDie (buffer_, buffer_size_);
73
+ buffer_ = MmapOrDieOnFatalError (buffer_size, " ReleaseToOSPageCounters" );
74
+ buffer_size_ = buffer_size;
75
+ } else {
76
+ internal_memset (buffer_, 0 , buffer_size);
77
+ }
78
+ return buffer_;
79
+ }
80
+
81
+ // Releases [from, to) range of pages back to OS.
82
+ void ReleasePageRangeToOS (CompactPtrT from, CompactPtrT to, uptr class_id) {
83
+ const uptr region_base = allocator_.GetRegionBeginBySizeClass (class_id);
84
+ const uptr from_page = allocator_.CompactPtrToPointer (region_base, from);
85
+ const uptr to_page = allocator_.CompactPtrToPointer (region_base, to);
86
+ ReleaseMemoryPagesToOS (from_page, to_page);
87
+ released_ranges_count_++;
88
+ released_bytes_ += to_page - from_page;
89
+ }
90
+
91
+ private:
92
+ const Allocator &allocator_;
93
+ uptr released_ranges_count_ = 0 ;
94
+ uptr released_bytes_ = 0 ;
95
+ void *buffer_ = nullptr ;
96
+ uptr buffer_size_ = 0 ;
97
+ };
98
+
45
99
template <class Params >
46
100
class SizeClassAllocator64 {
47
101
public:
@@ -57,6 +111,7 @@ class SizeClassAllocator64 {
57
111
58
112
typedef SizeClassAllocator64<Params> ThisT;
59
113
typedef SizeClassAllocator64LocalCache<ThisT> AllocatorCache;
114
+ typedef MemoryMapper<ThisT> MemoryMapper;
60
115
61
116
// When we know the size class (the region base) we can represent a pointer
62
117
// as a 4-byte integer (offset from the region start shifted right by 4).
@@ -120,9 +175,10 @@ class SizeClassAllocator64 {
120
175
}
121
176
122
177
void ForceReleaseToOS () {
178
+ MemoryMapper memory_mapper (*this );
123
179
for (uptr class_id = 1 ; class_id < kNumClasses ; class_id++) {
124
180
BlockingMutexLock l (&GetRegionInfo (class_id)->mutex );
125
- MaybeReleaseToOS (class_id, true /* force*/ );
181
+ MaybeReleaseToOS (&memory_mapper, class_id, true /* force*/ );
126
182
}
127
183
}
128
184
@@ -131,7 +187,8 @@ class SizeClassAllocator64 {
131
187
alignment <= SizeClassMap::kMaxSize ;
132
188
}
133
189
134
- NOINLINE void ReturnToAllocator (AllocatorStats *stat, uptr class_id,
190
+ NOINLINE void ReturnToAllocator (MemoryMapper *memory_mapper,
191
+ AllocatorStats *stat, uptr class_id,
135
192
const CompactPtrT *chunks, uptr n_chunks) {
136
193
RegionInfo *region = GetRegionInfo (class_id);
137
194
uptr region_beg = GetRegionBeginBySizeClass (class_id);
@@ -154,7 +211,7 @@ class SizeClassAllocator64 {
154
211
region->num_freed_chunks = new_num_freed_chunks;
155
212
region->stats .n_freed += n_chunks;
156
213
157
- MaybeReleaseToOS (class_id, false /* force*/ );
214
+ MaybeReleaseToOS (memory_mapper, class_id, false /* force*/ );
158
215
}
159
216
160
217
NOINLINE bool GetFromAllocator (AllocatorStats *stat, uptr class_id,
@@ -362,10 +419,10 @@ class SizeClassAllocator64 {
362
419
// For the performance sake, none of the accessors check the validity of the
363
420
// arguments, it is assumed that index is always in [0, n) range and the value
364
421
// is not incremented past max_value.
365
- template < class MemoryMapperT >
422
+ template < typename MemoryMapper >
366
423
class PackedCounterArray {
367
424
public:
368
- PackedCounterArray (u64 num_counters, u64 max_value, MemoryMapperT *mapper)
425
+ PackedCounterArray (u64 num_counters, u64 max_value, MemoryMapper *mapper)
369
426
: n(num_counters), memory_mapper(mapper) {
370
427
CHECK_GT (num_counters, 0 );
371
428
CHECK_GT (max_value, 0 );
@@ -389,11 +446,6 @@ class SizeClassAllocator64 {
389
446
buffer = reinterpret_cast <u64*>(
390
447
memory_mapper->MapPackedCounterArrayBuffer (buffer_size));
391
448
}
392
- ~PackedCounterArray () {
393
- if (buffer) {
394
- memory_mapper->UnmapPackedCounterArrayBuffer (buffer, buffer_size);
395
- }
396
- }
397
449
398
450
bool IsAllocated () const {
399
451
return !!buffer;
@@ -430,18 +482,21 @@ class SizeClassAllocator64 {
430
482
u64 packing_ratio_log;
431
483
u64 bit_offset_mask;
432
484
433
- MemoryMapperT* const memory_mapper;
485
+ MemoryMapper * const memory_mapper;
434
486
u64 buffer_size;
435
487
u64* buffer;
436
488
};
437
489
438
- template <class MemoryMapperT >
490
+ template <class MemoryMapperT >
439
491
class FreePagesRangeTracker {
440
492
public:
441
- explicit FreePagesRangeTracker (MemoryMapperT* mapper)
493
+ explicit FreePagesRangeTracker (MemoryMapperT * mapper, uptr class_id )
442
494
: memory_mapper(mapper),
495
+ class_id(class_id),
443
496
page_size_scaled_log(Log2(GetPageSizeCached() >> kCompactPtrScale)),
444
- in_the_range(false ), current_page(0 ), current_range_start_page(0 ) {}
497
+ in_the_range(false ),
498
+ current_page(0 ),
499
+ current_range_start_page(0 ) {}
445
500
446
501
void NextPage (bool freed) {
447
502
if (freed) {
@@ -463,13 +518,14 @@ class SizeClassAllocator64 {
463
518
void CloseOpenedRange () {
464
519
if (in_the_range) {
465
520
memory_mapper->ReleasePageRangeToOS (
466
- current_range_start_page << page_size_scaled_log,
521
+ class_id, current_range_start_page << page_size_scaled_log,
467
522
current_page << page_size_scaled_log);
468
523
in_the_range = false ;
469
524
}
470
525
}
471
526
472
- MemoryMapperT* const memory_mapper;
527
+ MemoryMapperT *const memory_mapper;
528
+ const uptr class_id;
473
529
const uptr page_size_scaled_log;
474
530
bool in_the_range;
475
531
uptr current_page;
@@ -480,11 +536,12 @@ class SizeClassAllocator64 {
480
536
// chunks only and returns these pages back to OS.
481
537
// allocated_pages_count is the total number of pages allocated for the
482
538
// current bucket.
483
- template <class MemoryMapperT >
539
+ template <class MemoryMapper >
484
540
static void ReleaseFreeMemoryToOS (CompactPtrT *free_array,
485
541
uptr free_array_count, uptr chunk_size,
486
542
uptr allocated_pages_count,
487
- MemoryMapperT *memory_mapper) {
543
+ MemoryMapper *memory_mapper,
544
+ uptr class_id) {
488
545
const uptr page_size = GetPageSizeCached ();
489
546
490
547
// Figure out the number of chunks per page and whether we can take a fast
@@ -520,9 +577,8 @@ class SizeClassAllocator64 {
520
577
UNREACHABLE (" All chunk_size/page_size ratios must be handled." );
521
578
}
522
579
523
- PackedCounterArray<MemoryMapperT> counters (allocated_pages_count,
524
- full_pages_chunk_count_max,
525
- memory_mapper);
580
+ PackedCounterArray<MemoryMapper> counters (
581
+ allocated_pages_count, full_pages_chunk_count_max, memory_mapper);
526
582
if (!counters.IsAllocated ())
527
583
return ;
528
584
@@ -547,7 +603,7 @@ class SizeClassAllocator64 {
547
603
548
604
// Iterate over pages detecting ranges of pages with chunk counters equal
549
605
// to the expected number of chunks for the particular page.
550
- FreePagesRangeTracker<MemoryMapperT > range_tracker (memory_mapper);
606
+ FreePagesRangeTracker<MemoryMapper > range_tracker (memory_mapper, class_id );
551
607
if (same_chunk_count_per_page) {
552
608
// Fast path, every page has the same number of chunks affecting it.
553
609
for (uptr i = 0 ; i < counters.GetCount (); i++)
@@ -586,7 +642,7 @@ class SizeClassAllocator64 {
586
642
}
587
643
588
644
private:
589
- friend class MemoryMapper ;
645
+ friend class __sanitizer :: MemoryMapper<ThisT> ;
590
646
591
647
ReservedAddressRange address_range;
592
648
@@ -820,57 +876,13 @@ class SizeClassAllocator64 {
820
876
return true ;
821
877
}
822
878
823
- class MemoryMapper {
824
- public:
825
- MemoryMapper (const ThisT& base_allocator, uptr class_id)
826
- : allocator(base_allocator),
827
- region_base (base_allocator.GetRegionBeginBySizeClass(class_id)),
828
- released_ranges_count(0 ),
829
- released_bytes(0 ) {
830
- }
831
-
832
- uptr GetReleasedRangesCount () const {
833
- return released_ranges_count;
834
- }
835
-
836
- uptr GetReleasedBytes () const {
837
- return released_bytes;
838
- }
839
-
840
- void *MapPackedCounterArrayBuffer (uptr buffer_size) {
841
- // TODO(alekseyshl): The idea to explore is to check if we have enough
842
- // space between num_freed_chunks*sizeof(CompactPtrT) and
843
- // mapped_free_array to fit buffer_size bytes and use that space instead
844
- // of mapping a temporary one.
845
- return MmapOrDieOnFatalError (buffer_size, " ReleaseToOSPageCounters" );
846
- }
847
-
848
- void UnmapPackedCounterArrayBuffer (void *buffer, uptr buffer_size) {
849
- UnmapOrDie (buffer, buffer_size);
850
- }
851
-
852
- // Releases [from, to) range of pages back to OS.
853
- void ReleasePageRangeToOS (CompactPtrT from, CompactPtrT to) {
854
- const uptr from_page = allocator.CompactPtrToPointer (region_base, from);
855
- const uptr to_page = allocator.CompactPtrToPointer (region_base, to);
856
- ReleaseMemoryPagesToOS (from_page, to_page);
857
- released_ranges_count++;
858
- released_bytes += to_page - from_page;
859
- }
860
-
861
- private:
862
- const ThisT& allocator;
863
- const uptr region_base;
864
- uptr released_ranges_count;
865
- uptr released_bytes;
866
- };
867
-
868
879
// Attempts to release RAM occupied by freed chunks back to OS. The region is
869
880
// expected to be locked.
870
881
//
871
882
// TODO(morehouse): Support a callback on memory release so HWASan can release
872
883
// aliases as well.
873
- void MaybeReleaseToOS (uptr class_id, bool force) {
884
+ void MaybeReleaseToOS (MemoryMapper *memory_mapper, uptr class_id,
885
+ bool force) {
874
886
RegionInfo *region = GetRegionInfo (class_id);
875
887
const uptr chunk_size = ClassIdToSize (class_id);
876
888
const uptr page_size = GetPageSizeCached ();
@@ -894,17 +906,16 @@ class SizeClassAllocator64 {
894
906
}
895
907
}
896
908
897
- MemoryMapper memory_mapper (*this , class_id);
898
-
899
- ReleaseFreeMemoryToOS<MemoryMapper>(
909
+ ReleaseFreeMemoryToOS (
900
910
GetFreeArray (GetRegionBeginBySizeClass (class_id)), n, chunk_size,
901
- RoundUpTo (region->allocated_user , page_size) / page_size,
902
- &memory_mapper );
911
+ RoundUpTo (region->allocated_user , page_size) / page_size, memory_mapper,
912
+ class_id );
903
913
904
- if (memory_mapper.GetReleasedRangesCount () > 0 ) {
914
+ uptr ranges, bytes;
915
+ if (memory_mapper->GetAndResetStats (ranges, bytes)) {
905
916
region->rtoi .n_freed_at_last_release = region->stats .n_freed ;
906
- region->rtoi .num_releases += memory_mapper. GetReleasedRangesCount () ;
907
- region->rtoi .last_released_bytes = memory_mapper. GetReleasedBytes () ;
917
+ region->rtoi .num_releases += ranges ;
918
+ region->rtoi .last_released_bytes = bytes ;
908
919
}
909
920
region->rtoi .last_release_at_ns = MonotonicNanoTime ();
910
921
}
0 commit comments