|
| 1 | +/* |
| 2 | +Copyright 2020 The Kubernetes Authors. |
| 3 | +
|
| 4 | +Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | +you may not use this file except in compliance with the License. |
| 6 | +You may obtain a copy of the License at |
| 7 | +
|
| 8 | + http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | +
|
| 10 | +Unless required by applicable law or agreed to in writing, software |
| 11 | +distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | +See the License for the specific language governing permissions and |
| 14 | +limitations under the License. |
| 15 | +*/ |
| 16 | + |
| 17 | +package value |
| 18 | + |
| 19 | +// Allocator provides a value object allocation strategy. |
| 20 | +// Value objects can be allocated by passing an allocator to the "Using" |
| 21 | +// receiver functions on the value interfaces, e.g. Map.ZipUsing(allocator, ...). |
| 22 | +// Value objects returned from "Using" functions should be given back to the allocator |
| 23 | +// once longer needed by calling Allocator.Free(Value). |
| 24 | +type Allocator interface { |
| 25 | + // Free gives the allocator back any value objects returned by the "Using" |
| 26 | + // receiver functions on the value interfaces. |
| 27 | + // interface{} may be any of: Value, Map, List or Range. |
| 28 | + Free(interface{}) |
| 29 | + |
| 30 | + // The unexported functions are for "Using" receiver functions of the value types |
| 31 | + // to request what they need from the allocator. |
| 32 | + allocValueUnstructured() *valueUnstructured |
| 33 | + allocListUnstructuredRange() *listUnstructuredRange |
| 34 | + allocValueReflect() *valueReflect |
| 35 | + allocMapReflect() *mapReflect |
| 36 | + allocStructReflect() *structReflect |
| 37 | + allocListReflect() *listReflect |
| 38 | + allocListReflectRange() *listReflectRange |
| 39 | +} |
| 40 | + |
| 41 | +// HeapAllocator simply allocates objects to the heap. It is the default |
| 42 | +// allocator used receiver functions on the value interfaces that do not accept |
| 43 | +// an allocator and should be used whenever allocating objects that will not |
| 44 | +// be given back to an allocator by calling Allocator.Free(Value). |
| 45 | +var HeapAllocator = &heapAllocator{} |
| 46 | + |
| 47 | +type heapAllocator struct{} |
| 48 | + |
| 49 | +func (p *heapAllocator) allocValueUnstructured() *valueUnstructured { |
| 50 | + return &valueUnstructured{} |
| 51 | +} |
| 52 | + |
| 53 | +func (p *heapAllocator) allocListUnstructuredRange() *listUnstructuredRange { |
| 54 | + return &listUnstructuredRange{vv: &valueUnstructured{}} |
| 55 | +} |
| 56 | + |
| 57 | +func (p *heapAllocator) allocValueReflect() *valueReflect { |
| 58 | + return &valueReflect{} |
| 59 | +} |
| 60 | + |
| 61 | +func (p *heapAllocator) allocStructReflect() *structReflect { |
| 62 | + return &structReflect{} |
| 63 | +} |
| 64 | + |
| 65 | +func (p *heapAllocator) allocMapReflect() *mapReflect { |
| 66 | + return &mapReflect{} |
| 67 | +} |
| 68 | + |
| 69 | +func (p *heapAllocator) allocListReflect() *listReflect { |
| 70 | + return &listReflect{} |
| 71 | +} |
| 72 | + |
| 73 | +func (p *heapAllocator) allocListReflectRange() *listReflectRange { |
| 74 | + return &listReflectRange{vr: &valueReflect{}} |
| 75 | +} |
| 76 | + |
| 77 | +func (p *heapAllocator) Free(_ interface{}) {} |
| 78 | + |
| 79 | +// NewFreelistAllocator create freelists based allocator. |
| 80 | +// This allocator provides fast allocation and freeing of short lived value objects. |
| 81 | +// |
| 82 | +// The freelists are bounded in size by freelistMaxSize. If more than this amount of value objects is |
| 83 | +// allocated at once, the excess will be returned to the heap for garbage collection when freed. |
| 84 | +// |
| 85 | +// This allocator is unsafe and must not be accessed concurrently by goroutines. |
| 86 | +// |
| 87 | +// This allocator works well for traversal of value data trees. Typical usage is to acquire |
| 88 | +// a freelist at the beginning of the traversal and use it through out |
| 89 | +// for all temporary value access. |
| 90 | +func NewFreelistAllocator() Allocator { |
| 91 | + return &freelist{} |
| 92 | +} |
| 93 | + |
| 94 | +// Bound memory usage of freelists. This prevents the processing of very large lists from leaking memory. |
| 95 | +// This limit is large enough for endpoints objects containing 1000 IP address entries. Freed objects |
| 96 | +// that don't fit into the freelist are orphaned on the heap to be garbage collected. |
| 97 | +const freelistMaxSize = 1000 |
| 98 | + |
| 99 | +type freelist struct { |
| 100 | + spareUnstructured []*valueUnstructured |
| 101 | + spareListUnstructuredRange []*listUnstructuredRange |
| 102 | + spareReflect []*valueReflect |
| 103 | + spareMapReflect []*mapReflect |
| 104 | + spareStructReflect []*structReflect |
| 105 | + spareListReflect []*listReflect |
| 106 | + spareListReflectRange []*listReflectRange |
| 107 | +} |
| 108 | + |
| 109 | +func (w *freelist) Free(value interface{}) { |
| 110 | + switch v := value.(type) { |
| 111 | + case *valueUnstructured: |
| 112 | + v.Value = nil // don't hold references to unstructured objects |
| 113 | + if len(w.spareUnstructured) < freelistMaxSize { |
| 114 | + w.spareUnstructured = append(w.spareUnstructured, v) |
| 115 | + } |
| 116 | + case *listUnstructuredRange: |
| 117 | + v.vv.Value = nil // don't hold references to unstructured objects |
| 118 | + if len(w.spareListUnstructuredRange) < freelistMaxSize { |
| 119 | + w.spareListUnstructuredRange = append(w.spareListUnstructuredRange, v) |
| 120 | + } |
| 121 | + case *valueReflect: |
| 122 | + v.ParentMapKey = nil |
| 123 | + v.ParentMap = nil |
| 124 | + if len(w.spareReflect) < freelistMaxSize { |
| 125 | + w.spareReflect = append(w.spareReflect, v) |
| 126 | + } |
| 127 | + case *mapReflect: |
| 128 | + if len(w.spareMapReflect) < freelistMaxSize { |
| 129 | + w.spareMapReflect = append(w.spareMapReflect, v) |
| 130 | + } |
| 131 | + case *structReflect: |
| 132 | + if len(w.spareStructReflect) < freelistMaxSize { |
| 133 | + w.spareStructReflect = append(w.spareStructReflect, v) |
| 134 | + } |
| 135 | + case *listReflect: |
| 136 | + if len(w.spareListReflect) < freelistMaxSize { |
| 137 | + w.spareListReflect = append(w.spareListReflect, v) |
| 138 | + } |
| 139 | + case *listReflectRange: |
| 140 | + v.vr.ParentMapKey = nil |
| 141 | + v.vr.ParentMap = nil |
| 142 | + if len(w.spareListReflectRange) < freelistMaxSize { |
| 143 | + w.spareListReflectRange = append(w.spareListReflectRange, v) |
| 144 | + } |
| 145 | + } |
| 146 | +} |
| 147 | + |
| 148 | +func (w *freelist) allocValueUnstructured() *valueUnstructured { |
| 149 | + var w2 *valueUnstructured |
| 150 | + if n := len(w.spareUnstructured); n > 0 { |
| 151 | + w2, w.spareUnstructured = (w.spareUnstructured)[n-1], (w.spareUnstructured)[:n-1] |
| 152 | + } else { |
| 153 | + w2 = &valueUnstructured{} |
| 154 | + } |
| 155 | + return w2 |
| 156 | +} |
| 157 | + |
| 158 | +func (w *freelist) allocListUnstructuredRange() *listUnstructuredRange { |
| 159 | + var w2 *listUnstructuredRange |
| 160 | + if n := len(w.spareListUnstructuredRange); n > 0 { |
| 161 | + w2, w.spareListUnstructuredRange = (w.spareListUnstructuredRange)[n-1], (w.spareListUnstructuredRange)[:n-1] |
| 162 | + } else { |
| 163 | + w2 = &listUnstructuredRange{vv: &valueUnstructured{}} |
| 164 | + } |
| 165 | + return w2 |
| 166 | +} |
| 167 | + |
| 168 | +func (w *freelist) allocValueReflect() *valueReflect { |
| 169 | + var w2 *valueReflect |
| 170 | + if n := len(w.spareReflect); n > 0 { |
| 171 | + w2, w.spareReflect = (w.spareReflect)[n-1], (w.spareReflect)[:n-1] |
| 172 | + } else { |
| 173 | + w2 = &valueReflect{} |
| 174 | + } |
| 175 | + return w2 |
| 176 | +} |
| 177 | + |
| 178 | +func (w *freelist) allocStructReflect() *structReflect { |
| 179 | + var w2 *structReflect |
| 180 | + if n := len(w.spareStructReflect); n > 0 { |
| 181 | + w2, w.spareStructReflect = (w.spareStructReflect)[n-1], (w.spareStructReflect)[:n-1] |
| 182 | + } else { |
| 183 | + w2 = &structReflect{} |
| 184 | + } |
| 185 | + return w2 |
| 186 | +} |
| 187 | + |
| 188 | +func (w *freelist) allocMapReflect() *mapReflect { |
| 189 | + var w2 *mapReflect |
| 190 | + if n := len(w.spareMapReflect); n > 0 { |
| 191 | + w2, w.spareMapReflect = (w.spareMapReflect)[n-1], (w.spareMapReflect)[:n-1] |
| 192 | + } else { |
| 193 | + w2 = &mapReflect{} |
| 194 | + } |
| 195 | + return w2 |
| 196 | +} |
| 197 | + |
| 198 | +func (w *freelist) allocListReflect() *listReflect { |
| 199 | + var w2 *listReflect |
| 200 | + if n := len(w.spareListReflect); n > 0 { |
| 201 | + w2, w.spareListReflect = (w.spareListReflect)[n-1], (w.spareListReflect)[:n-1] |
| 202 | + } else { |
| 203 | + w2 = &listReflect{} |
| 204 | + } |
| 205 | + return w2 |
| 206 | +} |
| 207 | + |
| 208 | +func (w *freelist) allocListReflectRange() *listReflectRange { |
| 209 | + var w2 *listReflectRange |
| 210 | + if n := len(w.spareListReflectRange); n > 0 { |
| 211 | + w2, w.spareListReflectRange = (w.spareListReflectRange)[n-1], (w.spareListReflectRange)[:n-1] |
| 212 | + } else { |
| 213 | + w2 = &listReflectRange{vr: &valueReflect{}} |
| 214 | + } |
| 215 | + return w2 |
| 216 | +} |
0 commit comments