Skip to content
This repository was archived by the owner on Jan 23, 2023. It is now read-only.

Commit 3fdc085

Browse files
committed
Prepare to remove VIRTUAL_PAGE_* from map.cpp
Also simplify previous section code
1 parent f83bc0d commit 3fdc085

File tree

1 file changed

+26
-23
lines changed

1 file changed

+26
-23
lines changed

src/pal/src/map/map.cpp

Lines changed: 26 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -44,6 +44,13 @@ using namespace CorUnix;
4444

4545
SET_DEFAULT_DEBUG_CHANNEL(VIRTUAL);
4646

47+
#include "pal/utils.h"
48+
49+
// This is temporary until #10981 merges.
50+
// There will be an equivalent but opposite temporary fix in #10981 which
51+
// will trigger a merge conflict to be sure both of these workarounds are removed
52+
#define VirtualPageSize() VIRTUAL_PAGE_SIZE
53+
4754
//
4855
// The mapping critical section guards access to the list
4956
// of currently mapped views. If a thread needs to access
@@ -2012,14 +2019,14 @@ BOOL MAPGetRegionInfo(LPVOID lpAddress,
20122019
real_map_sz = pView->NumberOfBytesToMap;
20132020
#endif
20142021

2015-
MappedSize = ((real_map_sz-1) & ~VIRTUAL_PAGE_MASK) + VIRTUAL_PAGE_SIZE;
2022+
MappedSize = ALIGN_UP(real_map_sz, VirtualPageSize());
20162023
if ( real_map_addr <= lpAddress &&
20172024
(VOID *)((UINT_PTR)real_map_addr+MappedSize) > lpAddress )
20182025
{
20192026
if (lpBuffer)
20202027
{
2021-
SIZE_T regionSize = MappedSize + (UINT_PTR) real_map_addr -
2022-
((UINT_PTR) lpAddress & ~VIRTUAL_PAGE_MASK);
2028+
SIZE_T regionSize = MappedSize + (UINT_PTR) real_map_addr -
2029+
ALIGN_DOWN((UINT_PTR)lpAddress, VirtualPageSize());
20232030

20242031
lpBuffer->BaseAddress = lpAddress;
20252032
lpBuffer->AllocationProtect = 0;
@@ -2241,7 +2248,7 @@ MAPmmapAndRecord(
22412248
PAL_ERROR palError = NO_ERROR;
22422249
LPVOID pvBaseAddress = NULL;
22432250

2244-
off_t adjust = offset & (VIRTUAL_PAGE_MASK);
2251+
off_t adjust = offset & (VirtualPageSize() - 1);
22452252

22462253
pvBaseAddress = mmap(static_cast<char *>(addr) - adjust, len + adjust, prot, flags, fd, offset - adjust);
22472254
if (MAP_FAILED == pvBaseAddress)
@@ -2410,7 +2417,7 @@ void * MAPMapPEFile(HANDLE hFile)
24102417
{
24112418
//if we're forcing relocs, create an anonymous mapping at the preferred base. Only create the
24122419
//mapping if we can create it at the specified address.
2413-
pForceRelocBase = mmap( (void*)preferredBase, VIRTUAL_PAGE_SIZE, PROT_NONE, MAP_ANON|MAP_FIXED|MAP_PRIVATE, -1, 0 );
2420+
pForceRelocBase = mmap( (void*)preferredBase, VirtualPageSize(), PROT_NONE, MAP_ANON|MAP_FIXED|MAP_PRIVATE, -1, 0 );
24142421
if (pForceRelocBase == MAP_FAILED)
24152422
{
24162423
TRACE_(LOADER)("Attempt to take preferred base of %p to force relocation failed\n", (void*)preferredBase);
@@ -2439,7 +2446,7 @@ void * MAPMapPEFile(HANDLE hFile)
24392446
// First try to reserve virtual memory using ExecutableAllcator. This allows all PE images to be
24402447
// near each other and close to the coreclr library which also allows the runtime to generate
24412448
// more efficient code (by avoiding usage of jump stubs).
2442-
loadedBase = ReserveMemoryFromExecutableAllocator(pThread, ((virtualSize-1) & ~VIRTUAL_PAGE_MASK) + VIRTUAL_PAGE_SIZE);
2449+
loadedBase = ReserveMemoryFromExecutableAllocator(pThread, ALIGN_UP(virtualSize, VirtualPageSize()));
24432450
if (loadedBase == NULL)
24442451
{
24452452
// MAC64 requires we pass MAP_SHARED (or MAP_PRIVATE) flags - otherwise, the call is failed.
@@ -2462,7 +2469,7 @@ void * MAPMapPEFile(HANDLE hFile)
24622469
if (forceRelocs)
24632470
{
24642471
_ASSERTE(((SIZE_T)loadedBase) != preferredBase);
2465-
munmap(pForceRelocBase, VIRTUAL_PAGE_SIZE); // now that we've forced relocation, let the original address mapping go
2472+
munmap(pForceRelocBase, VirtualPageSize()); // now that we've forced relocation, let the original address mapping go
24662473
}
24672474
if (((SIZE_T)loadedBase) != preferredBase)
24682475
{
@@ -2478,7 +2485,7 @@ void * MAPMapPEFile(HANDLE hFile)
24782485
//separately.
24792486

24802487
size_t headerSize;
2481-
headerSize = VIRTUAL_PAGE_SIZE; // if there are lots of sections, this could be wrong
2488+
headerSize = VirtualPageSize(); // if there are lots of sections, this could be wrong
24822489

24832490
//first, map the PE header to the first page in the image. Get pointers to the section headers
24842491
palError = MAPmmapAndRecord(pFileObject, loadedBase,
@@ -2513,10 +2520,8 @@ void * MAPMapPEFile(HANDLE hFile)
25132520
goto doneReleaseMappingCriticalSection;
25142521
}
25152522

2516-
void* prevSectionBase;
2517-
prevSectionBase = loadedBase; // the first "section" for our purposes is the header
2518-
size_t prevSectionSizeInMemory;
2519-
prevSectionSizeInMemory = headerSize;
2523+
void* prevSectionEnd;
2524+
prevSectionEnd = (char*)loadedBase + headerSize; // the first "section" for our purposes is the header
25202525
for (unsigned i = 0; i < numSections; ++i)
25212526
{
25222527
//for each section, map the section of the file to the correct virtual offset. Gather the
@@ -2526,12 +2531,13 @@ void * MAPMapPEFile(HANDLE hFile)
25262531
IMAGE_SECTION_HEADER &currentHeader = firstSection[i];
25272532

25282533
void* sectionBase = (char*)loadedBase + currentHeader.VirtualAddress;
2534+
void* sectionBaseAligned = ALIGN_DOWN(sectionBase, VirtualPageSize());
25292535

25302536
// Validate the section header
25312537
if ( (sectionBase < loadedBase) // Did computing the section base overflow?
25322538
|| ((char*)sectionBase + currentHeader.SizeOfRawData < (char*)sectionBase) // Does the section overflow?
25332539
|| ((char*)sectionBase + currentHeader.SizeOfRawData > (char*)loadedBase + virtualSize) // Does the section extend past the end of the image as the header stated?
2534-
|| ((char*)prevSectionBase + prevSectionSizeInMemory > sectionBase) // Does this section overlap the previous one?
2540+
|| (prevSectionEnd > sectionBase) // Does this section overlap the previous one?
25352541
)
25362542
{
25372543
ERROR_(LOADER)( "section %d is corrupt\n", i );
@@ -2546,13 +2552,12 @@ void * MAPMapPEFile(HANDLE hFile)
25462552
}
25472553

25482554
// Is there space between the previous section and this one? If so, add a PROT_NONE mapping to cover it.
2549-
if ((char*)prevSectionBase + prevSectionSizeInMemory < sectionBase)
2555+
if (prevSectionEnd < sectionBaseAligned)
25502556
{
2551-
char* gapBase = (char*)prevSectionBase + prevSectionSizeInMemory;
25522557
palError = MAPRecordMapping(pFileObject,
25532558
loadedBase,
2554-
(void*)gapBase,
2555-
(char*)sectionBase - gapBase,
2559+
prevSectionEnd,
2560+
(char*)sectionBaseAligned - (char*)prevSectionEnd,
25562561
PROT_NONE);
25572562
if (NO_ERROR != palError)
25582563
{
@@ -2596,20 +2601,18 @@ void * MAPMapPEFile(HANDLE hFile)
25962601
}
25972602
#endif // _DEBUG
25982603

2599-
prevSectionBase = sectionBase;
2600-
prevSectionSizeInMemory = (currentHeader.SizeOfRawData + VIRTUAL_PAGE_MASK) & ~VIRTUAL_PAGE_MASK; // round up to page boundary
2604+
prevSectionEnd = ALIGN_UP((char*)sectionBase + currentHeader.SizeOfRawData, VirtualPageSize()); // round up to page boundary
26012605
}
26022606

26032607
// Is there space after the last section and before the end of the mapped image? If so, add a PROT_NONE mapping to cover it.
26042608
char* imageEnd;
26052609
imageEnd = (char*)loadedBase + virtualSize; // actually, points just after the mapped end
2606-
if ((char*)prevSectionBase + prevSectionSizeInMemory < imageEnd)
2610+
if (prevSectionEnd < imageEnd)
26072611
{
2608-
char* gapBase = (char*)prevSectionBase + prevSectionSizeInMemory;
26092612
palError = MAPRecordMapping(pFileObject,
26102613
loadedBase,
2611-
(void*)gapBase,
2612-
imageEnd - gapBase,
2614+
prevSectionEnd,
2615+
(char*)imageEnd - (char*)prevSectionEnd,
26132616
PROT_NONE);
26142617
if (NO_ERROR != palError)
26152618
{

0 commit comments

Comments
 (0)