Skip to content

Commit 22664f3

Browse files
committed
runtime: reserve fewer memory for aligned reservation on sbrk systems
Sometimes the runtime needs to reserve some memory with a large alignment, which the OS usually won't directly satisfy. So, it asks size+align bytes instead, and frees the unaligned portions. On sbrk systems, this doesn't work that well, as freeing the tail portion doesn't really free the memory to the OS. Instead, we could simply round the current break up, then reserve the given size, without wasting the tail portion. Also, don't create heap arena hints on sbrk systems. We can only grow the break sequentially, and reserving specific addresses would not succeed anyway. For #69018. Change-Id: Iadc2c54d62b00ad7befa5bbf71146523483a8c47 Reviewed-on: https://go-review.googlesource.com/c/go/+/621715 Reviewed-by: Michael Knyszek <[email protected]> LUCI-TryBot-Result: Go LUCI <[email protected]> Reviewed-by: Michael Pratt <[email protected]>
1 parent 0addb2a commit 22664f3

File tree

3 files changed

+66
-2
lines changed

3 files changed

+66
-2
lines changed

src/runtime/malloc.go

Lines changed: 10 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -470,7 +470,10 @@ func mallocinit() {
470470
lockInit(&globalAlloc.mutex, lockRankGlobalAlloc)
471471

472472
// Create initial arena growth hints.
473-
if goarch.PtrSize == 8 {
473+
if isSbrkPlatform {
474+
// Don't generate hints on sbrk platforms. We can
475+
// only grow the break sequentially.
476+
} else if goarch.PtrSize == 8 {
474477
// On a 64-bit machine, we pick the following hints
475478
// because:
476479
//
@@ -828,6 +831,12 @@ mapped:
828831
// aligned to align bytes. It may reserve either n or n+align bytes,
829832
// so it returns the size that was reserved.
830833
func sysReserveAligned(v unsafe.Pointer, size, align uintptr) (unsafe.Pointer, uintptr) {
834+
if isSbrkPlatform {
835+
if v != nil {
836+
throw("unexpected heap arena hint on sbrk platform")
837+
}
838+
return sysReserveAlignedSbrk(size, align)
839+
}
831840
// Since the alignment is rather large in uses of this
832841
// function, we're not likely to get it by chance, so we ask
833842
// for a larger region and remove the parts we don't need.

src/runtime/mem_nonsbrk.go

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,15 @@
1+
// Copyright 2024 The Go Authors. All rights reserved.
2+
// Use of this source code is governed by a BSD-style
3+
// license that can be found in the LICENSE file.
4+
5+
//go:build !plan9 && !wasm
6+
7+
package runtime
8+
9+
import "unsafe"
10+
11+
const isSbrkPlatform = false
12+
13+
func sysReserveAlignedSbrk(size, align uintptr) (unsafe.Pointer, uintptr) {
14+
panic("unreachable")
15+
}

src/runtime/mem_sbrk.go

Lines changed: 41 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,8 @@ package runtime
88

99
import "unsafe"
1010

11+
const isSbrkPlatform = true
12+
1113
const memDebug = false
1214

1315
// Memory management on sbrk systems (including the linear memory
@@ -47,6 +49,13 @@ func (p memHdrPtr) ptr() *memHdr { return (*memHdr)(unsafe.Pointer(p)) }
4749
func (p *memHdrPtr) set(x *memHdr) { *p = memHdrPtr(unsafe.Pointer(x)) }
4850

4951
func memAlloc(n uintptr) unsafe.Pointer {
52+
if p := memAllocNoGrow(n); p != nil {
53+
return p
54+
}
55+
return sbrk(n)
56+
}
57+
58+
func memAllocNoGrow(n uintptr) unsafe.Pointer {
5059
n = memRound(n)
5160
var prevp *memHdr
5261
for p := memFreelist.ptr(); p != nil; p = p.next.ptr() {
@@ -66,7 +75,7 @@ func memAlloc(n uintptr) unsafe.Pointer {
6675
}
6776
prevp = p
6877
}
69-
return sbrk(n)
78+
return nil
7079
}
7180

7281
func memFree(ap unsafe.Pointer, n uintptr) {
@@ -207,3 +216,34 @@ func sysReserveOS(v unsafe.Pointer, n uintptr) unsafe.Pointer {
207216
unlock(&memlock)
208217
return p
209218
}
219+
220+
func sysReserveAlignedSbrk(size, align uintptr) (unsafe.Pointer, uintptr) {
221+
lock(&memlock)
222+
if p := memAllocNoGrow(size + align); p != nil {
223+
// We can satisfy the reservation from the free list.
224+
// Trim off the unaligned parts.
225+
pAligned := alignUp(uintptr(p), align)
226+
if startLen := pAligned - uintptr(p); startLen > 0 {
227+
memFree(p, startLen)
228+
}
229+
end := pAligned + size
230+
if endLen := (uintptr(p) + size + align) - end; endLen > 0 {
231+
memFree(unsafe.Pointer(end), endLen)
232+
}
233+
memCheck()
234+
return unsafe.Pointer(pAligned), size
235+
}
236+
237+
// Round up bloc to align, then allocate size.
238+
p := alignUp(bloc, align)
239+
r := sbrk(p + size - bloc)
240+
if r == nil {
241+
p, size = 0, 0
242+
} else if l := p - uintptr(r); l > 0 {
243+
// Free the area we skipped over for alignment.
244+
memFree(r, l)
245+
memCheck()
246+
}
247+
unlock(&memlock)
248+
return unsafe.Pointer(p), size
249+
}

0 commit comments

Comments
 (0)