@@ -35,6 +35,10 @@ namespace dart {
35
35
#undef MAP_FAILED
36
36
#define MAP_FAILED reinterpret_cast <void *>(-1 )
37
37
38
+ #if defined(DART_HOST_OS_IOS)
39
+ #define LARGE_RESERVATIONS_MAY_FAIL
40
+ #endif
41
+
38
42
DECLARE_FLAG (bool , dual_map_code);
39
43
DECLARE_FLAG (bool , write_protect_code);
40
44
@@ -76,20 +80,65 @@ intptr_t VirtualMemory::CalculatePageSize() {
76
80
return page_size;
77
81
}
78
82
83
+ #if defined(DART_COMPRESSED_POINTERS) && defined(LARGE_RESERVATIONS_MAY_FAIL)
84
+ // Truncate to the largest subregion in [region] that doesn't cross an
85
+ // [alignment] boundary.
86
+ static MemoryRegion ClipToAlignedRegion (MemoryRegion region, size_t alignment) {
87
+ uword base = region.start ();
88
+ uword aligned_base = Utils::RoundUp (base, alignment);
89
+ uword size_below =
90
+ region.end () >= aligned_base ? aligned_base - base : region.size ();
91
+ uword size_above =
92
+ region.end () >= aligned_base ? region.end () - aligned_base : 0 ;
93
+ ASSERT (size_below + size_above == region.size ());
94
+ if (size_below >= size_above) {
95
+ unmap (aligned_base, aligned_base + size_above);
96
+ return MemoryRegion (reinterpret_cast <void *>(base), size_below);
97
+ }
98
+ unmap (base, base + size_below);
99
+ if (size_above > alignment) {
100
+ unmap (aligned_base + alignment, aligned_base + size_above);
101
+ size_above = alignment;
102
+ }
103
+ return MemoryRegion (reinterpret_cast <void *>(aligned_base), size_above);
104
+ }
105
+ #endif // LARGE_RESERVATIONS_MAY_FAIL
106
+
79
107
void VirtualMemory::Init () {
80
108
page_size_ = CalculatePageSize ();
81
109
82
110
#if defined(DART_COMPRESSED_POINTERS)
83
111
ASSERT (compressed_heap_ == nullptr );
112
+ #if defined(LARGE_RESERVATIONS_MAY_FAIL)
113
+ // Try to reserve a region for the compressed heap by requesting decreasing
114
+ // powers-of-two until one succeeds, and use the largest subregion that does
115
+ // not cross a 4GB boundary. The subregion itself is not necessarily
116
+ // 4GB-aligned.
117
+ for (size_t allocated_size = kCompressedHeapSize + kCompressedHeapAlignment ;
118
+ allocated_size >= kCompressedHeapPageSize ; allocated_size >>= 1 ) {
119
+ void * address = GenericMapAligned (
120
+ nullptr , PROT_NONE, allocated_size, kCompressedHeapPageSize ,
121
+ allocated_size + kCompressedHeapPageSize ,
122
+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE);
123
+ if (address == MAP_FAILED) continue ;
124
+
125
+ MemoryRegion region (address, allocated_size);
126
+ region = ClipToAlignedRegion (region, kCompressedHeapAlignment );
127
+ compressed_heap_ = new VirtualMemory (region, region);
128
+ break ;
129
+ }
130
+ #else
84
131
compressed_heap_ = Reserve (kCompressedHeapSize , kCompressedHeapAlignment );
132
+ #endif
85
133
if (compressed_heap_ == nullptr ) {
86
134
int error = errno;
87
135
const int kBufferSize = 1024 ;
88
136
char error_buf[kBufferSize ];
89
137
FATAL (" Failed to reserve region for compressed heap: %d (%s)" , error,
90
138
Utils::StrError (error, error_buf, kBufferSize ));
91
139
}
92
- VirtualMemoryCompressedHeap::Init (compressed_heap_->address ());
140
+ VirtualMemoryCompressedHeap::Init (compressed_heap_->address (),
141
+ compressed_heap_->size ());
93
142
#endif // defined(DART_COMPRESSED_POINTERS)
94
143
95
144
#if defined(DUAL_MAPPING_SUPPORTED)
@@ -248,6 +297,25 @@ VirtualMemory* VirtualMemory::AllocateAligned(intptr_t size,
248
297
MemoryRegion region =
249
298
VirtualMemoryCompressedHeap::Allocate (size, alignment);
250
299
if (region.pointer () == nullptr ) {
300
+ #if defined(LARGE_RESERVATIONS_MAY_FAIL)
301
+ // Try a fresh allocation and hope it ends up in the right region. On
302
+ // macOS/iOS, this works surprisingly often.
303
+ void * address =
304
+ GenericMapAligned (nullptr , PROT_READ | PROT_WRITE, size, alignment,
305
+ size + alignment, MAP_PRIVATE | MAP_ANONYMOUS);
306
+ if (address != nullptr ) {
307
+ uword ok_start = Utils::RoundDown (compressed_heap_->start (),
308
+ kCompressedHeapAlignment );
309
+ uword ok_end = ok_start + kCompressedHeapSize ;
310
+ uword start = reinterpret_cast <uword>(address);
311
+ uword end = start + size;
312
+ if ((start >= ok_start) && (end <= ok_end)) {
313
+ MemoryRegion region (address, size);
314
+ return new VirtualMemory (region, region);
315
+ }
316
+ munmap (address, size);
317
+ }
318
+ #endif
251
319
return nullptr ;
252
320
}
253
321
Commit (region.pointer (), region.size ());
@@ -372,7 +440,10 @@ void VirtualMemory::Commit(void* address, intptr_t size) {
372
440
MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1 , 0 );
373
441
if (result == MAP_FAILED) {
374
442
int error = errno;
375
- FATAL (" Failed to commit: %d\n " , error);
443
+ const int kBufferSize = 1024 ;
444
+ char error_buf[kBufferSize ];
445
+ FATAL (" Failed to commit: %d (%s)" , error,
446
+ Utils::StrError (error, error_buf, kBufferSize ));
376
447
}
377
448
}
378
449
@@ -384,7 +455,10 @@ void VirtualMemory::Decommit(void* address, intptr_t size) {
384
455
MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | MAP_FIXED, -1 , 0 );
385
456
if (result == MAP_FAILED) {
386
457
int error = errno;
387
- FATAL (" Failed to decommit: %d\n " , error);
458
+ const int kBufferSize = 1024 ;
459
+ char error_buf[kBufferSize ];
460
+ FATAL (" Failed to decommit: %d (%s)" , error,
461
+ Utils::StrError (error, error_buf, kBufferSize ));
388
462
}
389
463
}
390
464
0 commit comments