Skip to content

Commit 8f9113d

Browse files
rmacnak-googlecommit-bot@chromium.org
authored andcommitted
[vm, gc] Try to handle virtual memory limitations on iOS.
iOS appears not to allow any single virtual memory allocation to exceed ~700MB. iOS appears to limit the total amount of allocated virtual memory to amount of physical memory available, even virtual memory that is reserved by not committed. So instead of trying to allocate the full 4GB region for compressed pointer, allocate the largest power-of-two that succeeds, and speculate that further allocations to into the same 4GB region. TEST=none Change-Id: Ib45f7ece59e1adb96d175ae861b984c0c6737549 Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/210640 Reviewed-by: Siva Annamalai <[email protected]> Commit-Queue: Ryan Macnak <[email protected]>
1 parent 6433038 commit 8f9113d

File tree

4 files changed

+103
-9
lines changed

4 files changed

+103
-9
lines changed

runtime/vm/virtual_memory_compressed.cc

Lines changed: 22 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@
1111
namespace dart {
1212

1313
uword VirtualMemoryCompressedHeap::base_ = 0;
14+
uword VirtualMemoryCompressedHeap::size_ = 0;
1415
uint8_t* VirtualMemoryCompressedHeap::pages_ = nullptr;
1516
uword VirtualMemoryCompressedHeap::minimum_free_page_id_ = 0;
1617
Mutex* VirtualMemoryCompressedHeap::mutex_ = nullptr;
@@ -34,19 +35,37 @@ void VirtualMemoryCompressedHeap::ClearPageUsed(uword page_id) {
3435
pages_[page_id / 8] &= ~PageMask(page_id);
3536
}
3637

37-
void VirtualMemoryCompressedHeap::Init(void* compressed_heap_region) {
38+
void VirtualMemoryCompressedHeap::Init(void* compressed_heap_region,
39+
size_t size) {
3840
pages_ = new uint8_t[kCompressedHeapBitmapSize];
3941
memset(pages_, 0, kCompressedHeapBitmapSize);
42+
ASSERT(size > 0);
43+
ASSERT(size <= kCompressedHeapSize);
44+
for (intptr_t page_id = size / kCompressedHeapPageSize;
45+
page_id < kCompressedHeapNumPages; page_id++) {
46+
SetPageUsed(page_id);
47+
}
4048
base_ = reinterpret_cast<uword>(compressed_heap_region);
49+
size_ = size;
4150
ASSERT(base_ != 0);
42-
ASSERT(Utils::IsAligned(base_, kCompressedHeapSize));
51+
ASSERT(size_ != 0);
52+
ASSERT(size_ <= kCompressedHeapSize);
53+
ASSERT(Utils::IsAligned(base_, kCompressedHeapPageSize));
54+
ASSERT(Utils::IsAligned(size_, kCompressedHeapPageSize));
55+
// base_ is not necessarily 4GB-aligned, because on some systems we can't make
56+
// a large enough reservation to guarentee it. Instead, we have only the
57+
// weaker property that all addresses in [base_, base_ + size_) have the same
58+
// same upper 32 bits, which is what we really need for compressed pointers.
59+
intptr_t mask = ~(kCompressedHeapAlignment - 1);
60+
ASSERT((base_ & mask) == (base_ + size_ - 1 & mask));
4361
mutex_ = new Mutex(NOT_IN_PRODUCT("compressed_heap_mutex"));
4462
}
4563

4664
void VirtualMemoryCompressedHeap::Cleanup() {
4765
delete[] pages_;
4866
delete mutex_;
4967
base_ = 0;
68+
size_ = 0;
5069
pages_ = nullptr;
5170
minimum_free_page_id_ = 0;
5271
mutex_ = nullptr;
@@ -123,8 +142,7 @@ void VirtualMemoryCompressedHeap::Free(void* address, intptr_t size) {
123142
}
124143

125144
bool VirtualMemoryCompressedHeap::Contains(void* address) {
126-
return reinterpret_cast<uword>(address) >= base_ &&
127-
reinterpret_cast<uword>(address) < base_ + kCompressedHeapSize;
145+
return (reinterpret_cast<uword>(address) - base_) < size_;
128146
}
129147

130148
} // namespace dart

runtime/vm/virtual_memory_compressed.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,7 @@ class VirtualMemoryCompressedHeap : public AllStatic {
3333
public:
3434
// Initializes the compressed heap. The callee must allocate a region of
3535
// kCompressedHeapSize bytes, aligned to kCompressedHeapSize.
36-
static void Init(void* compressed_heap_region);
36+
static void Init(void* compressed_heap_region, size_t size);
3737

3838
// Cleans up the compressed heap. The callee is responsible for freeing the
3939
// region's memory.
@@ -58,6 +58,7 @@ class VirtualMemoryCompressedHeap : public AllStatic {
5858
static void ClearPageUsed(uword page_id);
5959

6060
static uword base_;
61+
static uword size_;
6162
static uint8_t* pages_;
6263
static uword minimum_free_page_id_;
6364
static Mutex* mutex_;

runtime/vm/virtual_memory_posix.cc

Lines changed: 77 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -35,6 +35,10 @@ namespace dart {
3535
#undef MAP_FAILED
3636
#define MAP_FAILED reinterpret_cast<void*>(-1)
3737

38+
#if defined(DART_HOST_OS_IOS)
39+
#define LARGE_RESERVATIONS_MAY_FAIL
40+
#endif
41+
3842
DECLARE_FLAG(bool, dual_map_code);
3943
DECLARE_FLAG(bool, write_protect_code);
4044

@@ -76,20 +80,65 @@ intptr_t VirtualMemory::CalculatePageSize() {
7680
return page_size;
7781
}
7882

83+
#if defined(DART_COMPRESSED_POINTERS) && defined(LARGE_RESERVATIONS_MAY_FAIL)
84+
// Truncate to the largest subregion in [region] that doesn't cross an
85+
// [alignment] boundary.
86+
static MemoryRegion ClipToAlignedRegion(MemoryRegion region, size_t alignment) {
87+
uword base = region.start();
88+
uword aligned_base = Utils::RoundUp(base, alignment);
89+
uword size_below =
90+
region.end() >= aligned_base ? aligned_base - base : region.size();
91+
uword size_above =
92+
region.end() >= aligned_base ? region.end() - aligned_base : 0;
93+
ASSERT(size_below + size_above == region.size());
94+
if (size_below >= size_above) {
95+
unmap(aligned_base, aligned_base + size_above);
96+
return MemoryRegion(reinterpret_cast<void*>(base), size_below);
97+
}
98+
unmap(base, base + size_below);
99+
if (size_above > alignment) {
100+
unmap(aligned_base + alignment, aligned_base + size_above);
101+
size_above = alignment;
102+
}
103+
return MemoryRegion(reinterpret_cast<void*>(aligned_base), size_above);
104+
}
105+
#endif // LARGE_RESERVATIONS_MAY_FAIL
106+
79107
void VirtualMemory::Init() {
80108
page_size_ = CalculatePageSize();
81109

82110
#if defined(DART_COMPRESSED_POINTERS)
83111
ASSERT(compressed_heap_ == nullptr);
112+
#if defined(LARGE_RESERVATIONS_MAY_FAIL)
113+
// Try to reserve a region for the compressed heap by requesting decreasing
114+
// powers-of-two until one succeeds, and use the largest subregion that does
115+
// not cross a 4GB boundary. The subregion itself is not necessarily
116+
// 4GB-aligned.
117+
for (size_t allocated_size = kCompressedHeapSize + kCompressedHeapAlignment;
118+
allocated_size >= kCompressedHeapPageSize; allocated_size >>= 1) {
119+
void* address = GenericMapAligned(
120+
nullptr, PROT_NONE, allocated_size, kCompressedHeapPageSize,
121+
allocated_size + kCompressedHeapPageSize,
122+
MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE);
123+
if (address == MAP_FAILED) continue;
124+
125+
MemoryRegion region(address, allocated_size);
126+
region = ClipToAlignedRegion(region, kCompressedHeapAlignment);
127+
compressed_heap_ = new VirtualMemory(region, region);
128+
break;
129+
}
130+
#else
84131
compressed_heap_ = Reserve(kCompressedHeapSize, kCompressedHeapAlignment);
132+
#endif
85133
if (compressed_heap_ == nullptr) {
86134
int error = errno;
87135
const int kBufferSize = 1024;
88136
char error_buf[kBufferSize];
89137
FATAL("Failed to reserve region for compressed heap: %d (%s)", error,
90138
Utils::StrError(error, error_buf, kBufferSize));
91139
}
92-
VirtualMemoryCompressedHeap::Init(compressed_heap_->address());
140+
VirtualMemoryCompressedHeap::Init(compressed_heap_->address(),
141+
compressed_heap_->size());
93142
#endif // defined(DART_COMPRESSED_POINTERS)
94143

95144
#if defined(DUAL_MAPPING_SUPPORTED)
@@ -248,6 +297,25 @@ VirtualMemory* VirtualMemory::AllocateAligned(intptr_t size,
248297
MemoryRegion region =
249298
VirtualMemoryCompressedHeap::Allocate(size, alignment);
250299
if (region.pointer() == nullptr) {
300+
#if defined(LARGE_RESERVATIONS_MAY_FAIL)
301+
// Try a fresh allocation and hope it ends up in the right region. On
302+
// macOS/iOS, this works surprisingly often.
303+
void* address =
304+
GenericMapAligned(nullptr, PROT_READ | PROT_WRITE, size, alignment,
305+
size + alignment, MAP_PRIVATE | MAP_ANONYMOUS);
306+
if (address != nullptr) {
307+
uword ok_start = Utils::RoundDown(compressed_heap_->start(),
308+
kCompressedHeapAlignment);
309+
uword ok_end = ok_start + kCompressedHeapSize;
310+
uword start = reinterpret_cast<uword>(address);
311+
uword end = start + size;
312+
if ((start >= ok_start) && (end <= ok_end)) {
313+
MemoryRegion region(address, size);
314+
return new VirtualMemory(region, region);
315+
}
316+
munmap(address, size);
317+
}
318+
#endif
251319
return nullptr;
252320
}
253321
Commit(region.pointer(), region.size());
@@ -372,7 +440,10 @@ void VirtualMemory::Commit(void* address, intptr_t size) {
372440
MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
373441
if (result == MAP_FAILED) {
374442
int error = errno;
375-
FATAL("Failed to commit: %d\n", error);
443+
const int kBufferSize = 1024;
444+
char error_buf[kBufferSize];
445+
FATAL("Failed to commit: %d (%s)", error,
446+
Utils::StrError(error, error_buf, kBufferSize));
376447
}
377448
}
378449

@@ -384,7 +455,10 @@ void VirtualMemory::Decommit(void* address, intptr_t size) {
384455
MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | MAP_FIXED, -1, 0);
385456
if (result == MAP_FAILED) {
386457
int error = errno;
387-
FATAL("Failed to decommit: %d\n", error);
458+
const int kBufferSize = 1024;
459+
char error_buf[kBufferSize];
460+
FATAL("Failed to decommit: %d (%s)", error,
461+
Utils::StrError(error, error_buf, kBufferSize));
388462
}
389463
}
390464

runtime/vm/virtual_memory_win.cc

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -62,7 +62,8 @@ void VirtualMemory::Init() {
6262
int error = GetLastError();
6363
FATAL("Failed to reserve region for compressed heap: %d", error);
6464
}
65-
VirtualMemoryCompressedHeap::Init(compressed_heap_->address());
65+
VirtualMemoryCompressedHeap::Init(compressed_heap_->address(),
66+
compressed_heap_->size());
6667
#endif // defined(DART_COMPRESSED_POINTERS)
6768
}
6869

0 commit comments

Comments
 (0)