Skip to content

Commit 8b78a6f

Browse files
committed
Backport of LLVM code to fix ARM relocation bug.
Supply a new memory manager for RuntimeDyld that avoids putting ARM code too far apart. This is the code from llvm/llvm-project#71968, copied into our tree and moved into a new namespace llvm::backport, and adjusted to work on older LLVM versions. This should fix the spate of crashes we've been receiving lately from users on ARM systems. XXX Ideally the LLVM project will commit this, and then we can resync with the code in the LLVM 19.x stable branch, instead of using the code from their PR, before we ship it! Reported-by: Anthonin Bonnefoy <[email protected]> Reviewed-by: Anthonin Bonnefoy <[email protected]> Discussion: https://postgr.es/m/CAO6_Xqr63qj%3DSx7HY6ZiiQ6R_JbX%2B-p6sTPwDYwTWZjUmjsYBg%40mail.gmail.com
1 parent 2e6a804 commit 8b78a6f

File tree

7 files changed

+641
-1
lines changed

7 files changed

+641
-1
lines changed

src/backend/jit/llvm/Makefile

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -47,7 +47,8 @@ OBJS += \
4747
llvmjit.o \
4848
llvmjit_error.o \
4949
llvmjit_inline.o \
50-
llvmjit_wrap.o
50+
llvmjit_wrap.o \
51+
SectionMemoryManager.o
5152

5253
# Code generation
5354
OBJS += \
Lines changed: 388 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,388 @@
1+
/*
2+
* This file taken from https://github.com/llvm/llvm-project/pull/71968, with
3+
* the name changed to llvm::backport::SectionMemoryManager, so we can support
4+
* the ARM memory model on broken LLVM versions.
5+
*/
6+
7+
//===- SectionMemoryManager.cpp - Memory manager for MCJIT/RtDyld *- C++ -*-==//
8+
//
9+
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
10+
// See https://llvm.org/LICENSE.txt for license information.
11+
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
12+
//
13+
//===----------------------------------------------------------------------===//
14+
//
15+
// This file implements the section-based memory manager used by the MCJIT
16+
// execution engine and RuntimeDyld
17+
//
18+
//===----------------------------------------------------------------------===//
19+
20+
#include "jit/SectionMemoryManager.h"
21+
#include "llvm/Support/MathExtras.h"
22+
#include "llvm/Support/Process.h"
23+
24+
namespace llvm {
25+
namespace backport {
26+
27+
bool SectionMemoryManager::hasSpace(const MemoryGroup &MemGroup,
28+
uintptr_t Size) const {
29+
for (const FreeMemBlock &FreeMB : MemGroup.FreeMem) {
30+
if (FreeMB.Free.allocatedSize() >= Size)
31+
return true;
32+
}
33+
return false;
34+
}
35+
36+
#if LLVM_VERSION_MAJOR < 16
37+
void SectionMemoryManager::reserveAllocationSpace(uintptr_t CodeSize,
38+
uint32_t CodeAlign_i,
39+
uintptr_t RODataSize,
40+
uint32_t RODataAlign_i,
41+
uintptr_t RWDataSize,
42+
uint32_t RWDataAlign_i) {
43+
Align CodeAlign(CodeAlign_i);
44+
Align RODataAlign(RODataAlign_i);
45+
Align RWDataAlign(RWDataAlign_i);
46+
#else
47+
void SectionMemoryManager::reserveAllocationSpace(
48+
uintptr_t CodeSize, Align CodeAlign, uintptr_t RODataSize,
49+
Align RODataAlign, uintptr_t RWDataSize, Align RWDataAlign) {
50+
#endif
51+
if (CodeSize == 0 && RODataSize == 0 && RWDataSize == 0)
52+
return;
53+
54+
static const size_t PageSize = sys::Process::getPageSizeEstimate();
55+
56+
// Code alignment needs to be at least the stub alignment - however, we
57+
// don't have an easy way to get that here so as a workaround, we assume
58+
// it's 8, which is the largest value I observed across all platforms.
59+
constexpr uint64_t StubAlign = 8;
60+
CodeAlign = Align(std::max(CodeAlign.value(), StubAlign));
61+
RODataAlign = Align(std::max(RODataAlign.value(), StubAlign));
62+
RWDataAlign = Align(std::max(RWDataAlign.value(), StubAlign));
63+
64+
// Get space required for each section. Use the same calculation as
65+
// allocateSection because we need to be able to satisfy it.
66+
uint64_t RequiredCodeSize = alignTo(CodeSize, CodeAlign) + CodeAlign.value();
67+
uint64_t RequiredRODataSize =
68+
alignTo(RODataSize, RODataAlign) + RODataAlign.value();
69+
uint64_t RequiredRWDataSize =
70+
alignTo(RWDataSize, RWDataAlign) + RWDataAlign.value();
71+
72+
if (hasSpace(CodeMem, RequiredCodeSize) &&
73+
hasSpace(RODataMem, RequiredRODataSize) &&
74+
hasSpace(RWDataMem, RequiredRWDataSize)) {
75+
// Sufficient space in contiguous block already available.
76+
return;
77+
}
78+
79+
// MemoryManager does not have functions for releasing memory after it's
80+
// allocated. Normally it tries to use any excess blocks that were allocated
81+
// due to page alignment, but if we have insufficient free memory for the
82+
// request this can lead to allocating disparate memory that can violate the
83+
// ARM ABI. Clear free memory so only the new allocations are used, but do
84+
// not release allocated memory as it may still be in-use.
85+
CodeMem.FreeMem.clear();
86+
RODataMem.FreeMem.clear();
87+
RWDataMem.FreeMem.clear();
88+
89+
// Round up to the nearest page size. Blocks must be page-aligned.
90+
RequiredCodeSize = alignTo(RequiredCodeSize, PageSize);
91+
RequiredRODataSize = alignTo(RequiredRODataSize, PageSize);
92+
RequiredRWDataSize = alignTo(RequiredRWDataSize, PageSize);
93+
uint64_t RequiredSize =
94+
RequiredCodeSize + RequiredRODataSize + RequiredRWDataSize;
95+
96+
std::error_code ec;
97+
sys::MemoryBlock MB = MMapper->allocateMappedMemory(
98+
AllocationPurpose::RWData, RequiredSize, nullptr,
99+
sys::Memory::MF_READ | sys::Memory::MF_WRITE, ec);
100+
if (ec) {
101+
return;
102+
}
103+
// CodeMem will arbitrarily own this MemoryBlock to handle cleanup.
104+
CodeMem.AllocatedMem.push_back(MB);
105+
uintptr_t Addr = (uintptr_t)MB.base();
106+
FreeMemBlock FreeMB;
107+
FreeMB.PendingPrefixIndex = (unsigned)-1;
108+
109+
if (CodeSize > 0) {
110+
assert(isAddrAligned(CodeAlign, (void *)Addr));
111+
FreeMB.Free = sys::MemoryBlock((void *)Addr, RequiredCodeSize);
112+
CodeMem.FreeMem.push_back(FreeMB);
113+
Addr += RequiredCodeSize;
114+
}
115+
116+
if (RODataSize > 0) {
117+
assert(isAddrAligned(RODataAlign, (void *)Addr));
118+
FreeMB.Free = sys::MemoryBlock((void *)Addr, RequiredRODataSize);
119+
RODataMem.FreeMem.push_back(FreeMB);
120+
Addr += RequiredRODataSize;
121+
}
122+
123+
if (RWDataSize > 0) {
124+
assert(isAddrAligned(RWDataAlign, (void *)Addr));
125+
FreeMB.Free = sys::MemoryBlock((void *)Addr, RequiredRWDataSize);
126+
RWDataMem.FreeMem.push_back(FreeMB);
127+
}
128+
}
129+
130+
uint8_t *SectionMemoryManager::allocateDataSection(uintptr_t Size,
131+
unsigned Alignment,
132+
unsigned SectionID,
133+
StringRef SectionName,
134+
bool IsReadOnly) {
135+
if (IsReadOnly)
136+
return allocateSection(SectionMemoryManager::AllocationPurpose::ROData,
137+
Size, Alignment);
138+
return allocateSection(SectionMemoryManager::AllocationPurpose::RWData, Size,
139+
Alignment);
140+
}
141+
142+
uint8_t *SectionMemoryManager::allocateCodeSection(uintptr_t Size,
143+
unsigned Alignment,
144+
unsigned SectionID,
145+
StringRef SectionName) {
146+
return allocateSection(SectionMemoryManager::AllocationPurpose::Code, Size,
147+
Alignment);
148+
}
149+
150+
uint8_t *SectionMemoryManager::allocateSection(
151+
SectionMemoryManager::AllocationPurpose Purpose, uintptr_t Size,
152+
unsigned Alignment) {
153+
if (!Alignment)
154+
Alignment = 16;
155+
156+
assert(!(Alignment & (Alignment - 1)) && "Alignment must be a power of two.");
157+
158+
uintptr_t RequiredSize = Alignment * ((Size + Alignment - 1) / Alignment + 1);
159+
uintptr_t Addr = 0;
160+
161+
MemoryGroup &MemGroup = [&]() -> MemoryGroup & {
162+
switch (Purpose) {
163+
case AllocationPurpose::Code:
164+
return CodeMem;
165+
case AllocationPurpose::ROData:
166+
return RODataMem;
167+
case AllocationPurpose::RWData:
168+
return RWDataMem;
169+
}
170+
llvm_unreachable("Unknown SectionMemoryManager::AllocationPurpose");
171+
}();
172+
173+
// Look in the list of free memory regions and use a block there if one
174+
// is available.
175+
for (FreeMemBlock &FreeMB : MemGroup.FreeMem) {
176+
if (FreeMB.Free.allocatedSize() >= RequiredSize) {
177+
Addr = (uintptr_t)FreeMB.Free.base();
178+
uintptr_t EndOfBlock = Addr + FreeMB.Free.allocatedSize();
179+
// Align the address.
180+
Addr = (Addr + Alignment - 1) & ~(uintptr_t)(Alignment - 1);
181+
182+
if (FreeMB.PendingPrefixIndex == (unsigned)-1) {
183+
// The part of the block we're giving out to the user is now pending
184+
MemGroup.PendingMem.push_back(sys::MemoryBlock((void *)Addr, Size));
185+
186+
// Remember this pending block, such that future allocations can just
187+
// modify it rather than creating a new one
188+
FreeMB.PendingPrefixIndex = MemGroup.PendingMem.size() - 1;
189+
} else {
190+
sys::MemoryBlock &PendingMB =
191+
MemGroup.PendingMem[FreeMB.PendingPrefixIndex];
192+
PendingMB = sys::MemoryBlock(PendingMB.base(),
193+
Addr + Size - (uintptr_t)PendingMB.base());
194+
}
195+
196+
// Remember how much free space is now left in this block
197+
FreeMB.Free =
198+
sys::MemoryBlock((void *)(Addr + Size), EndOfBlock - Addr - Size);
199+
return (uint8_t *)Addr;
200+
}
201+
}
202+
203+
// No pre-allocated free block was large enough. Allocate a new memory region.
204+
// Note that all sections get allocated as read-write. The permissions will
205+
// be updated later based on memory group.
206+
//
207+
// FIXME: It would be useful to define a default allocation size (or add
208+
// it as a constructor parameter) to minimize the number of allocations.
209+
//
210+
// FIXME: Initialize the Near member for each memory group to avoid
211+
// interleaving.
212+
std::error_code ec;
213+
sys::MemoryBlock MB = MMapper->allocateMappedMemory(
214+
Purpose, RequiredSize, &MemGroup.Near,
215+
sys::Memory::MF_READ | sys::Memory::MF_WRITE, ec);
216+
if (ec) {
217+
// FIXME: Add error propagation to the interface.
218+
return nullptr;
219+
}
220+
221+
// Save this address as the basis for our next request
222+
MemGroup.Near = MB;
223+
224+
// Copy the address to all the other groups, if they have not
225+
// been initialized.
226+
if (CodeMem.Near.base() == nullptr)
227+
CodeMem.Near = MB;
228+
if (RODataMem.Near.base() == nullptr)
229+
RODataMem.Near = MB;
230+
if (RWDataMem.Near.base() == nullptr)
231+
RWDataMem.Near = MB;
232+
233+
// Remember that we allocated this memory
234+
MemGroup.AllocatedMem.push_back(MB);
235+
Addr = (uintptr_t)MB.base();
236+
uintptr_t EndOfBlock = Addr + MB.allocatedSize();
237+
238+
// Align the address.
239+
Addr = (Addr + Alignment - 1) & ~(uintptr_t)(Alignment - 1);
240+
241+
// The part of the block we're giving out to the user is now pending
242+
MemGroup.PendingMem.push_back(sys::MemoryBlock((void *)Addr, Size));
243+
244+
// The allocateMappedMemory may allocate much more memory than we need. In
245+
// this case, we store the unused memory as a free memory block.
246+
unsigned FreeSize = EndOfBlock - Addr - Size;
247+
if (FreeSize > 16) {
248+
FreeMemBlock FreeMB;
249+
FreeMB.Free = sys::MemoryBlock((void *)(Addr + Size), FreeSize);
250+
FreeMB.PendingPrefixIndex = (unsigned)-1;
251+
MemGroup.FreeMem.push_back(FreeMB);
252+
}
253+
254+
// Return aligned address
255+
return (uint8_t *)Addr;
256+
}
257+
258+
bool SectionMemoryManager::finalizeMemory(std::string *ErrMsg) {
259+
// FIXME: Should in-progress permissions be reverted if an error occurs?
260+
std::error_code ec;
261+
262+
// Make code memory executable.
263+
ec = applyMemoryGroupPermissions(CodeMem,
264+
sys::Memory::MF_READ | sys::Memory::MF_EXEC);
265+
if (ec) {
266+
if (ErrMsg) {
267+
*ErrMsg = ec.message();
268+
}
269+
return true;
270+
}
271+
272+
// Make read-only data memory read-only.
273+
ec = applyMemoryGroupPermissions(RODataMem, sys::Memory::MF_READ);
274+
if (ec) {
275+
if (ErrMsg) {
276+
*ErrMsg = ec.message();
277+
}
278+
return true;
279+
}
280+
281+
// Read-write data memory already has the correct permissions
282+
283+
// Some platforms with separate data cache and instruction cache require
284+
// explicit cache flush, otherwise JIT code manipulations (like resolved
285+
// relocations) will get to the data cache but not to the instruction cache.
286+
invalidateInstructionCache();
287+
288+
return false;
289+
}
290+
291+
static sys::MemoryBlock trimBlockToPageSize(sys::MemoryBlock M) {
292+
static const size_t PageSize = sys::Process::getPageSizeEstimate();
293+
294+
size_t StartOverlap =
295+
(PageSize - ((uintptr_t)M.base() % PageSize)) % PageSize;
296+
297+
size_t TrimmedSize = M.allocatedSize();
298+
TrimmedSize -= StartOverlap;
299+
TrimmedSize -= TrimmedSize % PageSize;
300+
301+
sys::MemoryBlock Trimmed((void *)((uintptr_t)M.base() + StartOverlap),
302+
TrimmedSize);
303+
304+
assert(((uintptr_t)Trimmed.base() % PageSize) == 0);
305+
assert((Trimmed.allocatedSize() % PageSize) == 0);
306+
assert(M.base() <= Trimmed.base() &&
307+
Trimmed.allocatedSize() <= M.allocatedSize());
308+
309+
return Trimmed;
310+
}
311+
312+
std::error_code
313+
SectionMemoryManager::applyMemoryGroupPermissions(MemoryGroup &MemGroup,
314+
unsigned Permissions) {
315+
for (sys::MemoryBlock &MB : MemGroup.PendingMem)
316+
if (std::error_code EC = MMapper->protectMappedMemory(MB, Permissions))
317+
return EC;
318+
319+
MemGroup.PendingMem.clear();
320+
321+
// Now go through free blocks and trim any of them that don't span the entire
322+
// page because one of the pending blocks may have overlapped it.
323+
for (FreeMemBlock &FreeMB : MemGroup.FreeMem) {
324+
FreeMB.Free = trimBlockToPageSize(FreeMB.Free);
325+
// We cleared the PendingMem list, so all these pointers are now invalid
326+
FreeMB.PendingPrefixIndex = (unsigned)-1;
327+
}
328+
329+
// Remove all blocks which are now empty
330+
erase_if(MemGroup.FreeMem, [](FreeMemBlock &FreeMB) {
331+
return FreeMB.Free.allocatedSize() == 0;
332+
});
333+
334+
return std::error_code();
335+
}
336+
337+
void SectionMemoryManager::invalidateInstructionCache() {
338+
for (sys::MemoryBlock &Block : CodeMem.PendingMem)
339+
sys::Memory::InvalidateInstructionCache(Block.base(),
340+
Block.allocatedSize());
341+
}
342+
343+
SectionMemoryManager::~SectionMemoryManager() {
344+
for (MemoryGroup *Group : {&CodeMem, &RWDataMem, &RODataMem}) {
345+
for (sys::MemoryBlock &Block : Group->AllocatedMem)
346+
MMapper->releaseMappedMemory(Block);
347+
}
348+
}
349+
350+
SectionMemoryManager::MemoryMapper::~MemoryMapper() = default;
351+
352+
void SectionMemoryManager::anchor() {}
353+
354+
namespace {
355+
// Trivial implementation of SectionMemoryManager::MemoryMapper that just calls
356+
// into sys::Memory.
357+
class DefaultMMapper final : public SectionMemoryManager::MemoryMapper {
358+
public:
359+
sys::MemoryBlock
360+
allocateMappedMemory(SectionMemoryManager::AllocationPurpose Purpose,
361+
size_t NumBytes, const sys::MemoryBlock *const NearBlock,
362+
unsigned Flags, std::error_code &EC) override {
363+
return sys::Memory::allocateMappedMemory(NumBytes, NearBlock, Flags, EC);
364+
}
365+
366+
std::error_code protectMappedMemory(const sys::MemoryBlock &Block,
367+
unsigned Flags) override {
368+
return sys::Memory::protectMappedMemory(Block, Flags);
369+
}
370+
371+
std::error_code releaseMappedMemory(sys::MemoryBlock &M) override {
372+
return sys::Memory::releaseMappedMemory(M);
373+
}
374+
};
375+
} // namespace
376+
377+
SectionMemoryManager::SectionMemoryManager(MemoryMapper *UnownedMM,
378+
bool ReserveAlloc)
379+
: MMapper(UnownedMM), OwnedMMapper(nullptr),
380+
ReserveAllocation(ReserveAlloc) {
381+
if (!MMapper) {
382+
OwnedMMapper = std::make_unique<DefaultMMapper>();
383+
MMapper = OwnedMMapper.get();
384+
}
385+
}
386+
387+
} // namespace backport
388+
} // namespace llvm

0 commit comments

Comments
 (0)