Skip to content

Commit 9ab92ee

Browse files
committed
minor changes in FakePodInfo
Signed-off-by: Nir Rozenbaum <[email protected]>
1 parent cfec2d8 commit 9ab92ee

File tree

3 files changed

+57
-72
lines changed

3 files changed

+57
-72
lines changed

pkg/epp/backend/metrics/fake_metrics_scraper.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -63,7 +63,7 @@ func (s *FakeMetricsScraper) Scrape(ctx context.Context, pod *backend.Pod, port
6363
return podinfo.CloneScrapedData(res), nil
6464
}
6565

66-
func (s *FakeMetricsScraper) ProcessResult(ctx context.Context, podinfo podinfo.ScrapedData) {} // noop
66+
func (s *FakeMetricsScraper) ProcessResult(ctx context.Context, data podinfo.ScrapedData) {} // noop
6767

6868
func (s *FakeMetricsScraper) SetRes(new map[types.NamespacedName]*Metrics) {
6969
s.resMu.Lock()

pkg/epp/backend/pod-info/fake_pod_info.go

Lines changed: 20 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -18,47 +18,54 @@ package podinfo
1818

1919
import (
2020
"fmt"
21-
"sync"
2221

2322
corev1 "k8s.io/api/core/v1"
23+
"k8s.io/apimachinery/pkg/types"
2424
"sigs.k8s.io/gateway-api-inference-extension/pkg/epp/backend"
2525
)
2626

2727
var _ PodInfo = &FakePodInfo{}
2828

29+
func NewFakePodInfo(namespacedName types.NamespacedName) *FakePodInfo {
30+
return &FakePodInfo{
31+
pod: &backend.Pod{NamespacedName: namespacedName},
32+
data: map[string]ScrapedData{},
33+
}
34+
}
35+
2936
// FakePodInfo is an implementation of PodInfo that doesn't run the async scrape loop.
3037
type FakePodInfo struct {
31-
Pod *backend.Pod
32-
Data map[string]ScrapedData
33-
Lock sync.RWMutex // dataLock is used to synchronize RW access to data map.
38+
pod *backend.Pod
39+
data map[string]ScrapedData
40+
}
41+
42+
func (fpi *FakePodInfo) WithData(data map[string]ScrapedData) *FakePodInfo {
43+
fpi.data = data
44+
return fpi
3445
}
3546

3647
func (fpi *FakePodInfo) GetPod() *backend.Pod {
37-
return fpi.Pod
48+
return fpi.pod
3849
}
3950
func (fpi *FakePodInfo) GetData(key string) (ScrapedData, bool) {
40-
fpi.Lock.RLock()
41-
defer fpi.Lock.RUnlock()
42-
data, ok := fpi.Data[key]
51+
data, ok := fpi.data[key]
4352
return data, ok
4453
}
4554

4655
func (fpi *FakePodInfo) GetDataKeys() []string {
47-
fpi.Lock.RLock()
48-
defer fpi.Lock.RUnlock()
4956
result := []string{}
50-
for key := range fpi.Data {
57+
for key := range fpi.data {
5158
result = append(result, key)
5259
}
5360
return result
5461
}
5562

5663
func (fpi *FakePodInfo) UpdatePod(pod *corev1.Pod) {
57-
fpi.Pod = toInternalPod(pod)
64+
fpi.pod = toInternalPod(pod)
5865
}
5966

6067
func (fpi *FakePodInfo) Stop() {} // noop
6168

6269
func (fpi *FakePodInfo) String() string {
63-
return fmt.Sprintf("Pod: %v; Data: %v", fpi.GetPod(), fpi.Data)
70+
return fmt.Sprintf("Pod: %v; Data: %v", fpi.GetPod(), fpi.data)
6471
}

pkg/epp/scheduling/scheduler_test.go

Lines changed: 36 additions & 58 deletions
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,6 @@ package scheduling
1818

1919
import (
2020
"context"
21-
"sync"
2221
"testing"
2322

2423
"github.com/google/go-cmp/cmp"
@@ -60,9 +59,8 @@ func TestSchedule(t *testing.T) {
6059
// pod2 will be picked because it has relatively low queue size, with the requested
6160
// model being active, and has low KV cache.
6261
input: []*podinfo.FakePodInfo{
63-
{
64-
Pod: &backend.Pod{NamespacedName: k8stypes.NamespacedName{Name: "pod1"}},
65-
Data: map[string]podinfo.ScrapedData{
62+
podinfo.NewFakePodInfo(k8stypes.NamespacedName{Name: "pod1"}).
63+
WithData(map[string]podinfo.ScrapedData{
6664
metrics.MetricsDataKey: &backendmetrics.Metrics{
6765
WaitingQueueSize: 0,
6866
KVCacheUsagePercent: 0.2,
@@ -72,12 +70,9 @@ func TestSchedule(t *testing.T) {
7270
"bar": 1,
7371
},
7472
},
75-
},
76-
Lock: sync.RWMutex{},
77-
},
78-
{
79-
Pod: &backend.Pod{NamespacedName: k8stypes.NamespacedName{Name: "pod2"}},
80-
Data: map[string]podinfo.ScrapedData{
73+
}),
74+
podinfo.NewFakePodInfo(k8stypes.NamespacedName{Name: "pod2"}).
75+
WithData(map[string]podinfo.ScrapedData{
8176
metrics.MetricsDataKey: &backendmetrics.Metrics{
8277
WaitingQueueSize: 3,
8378
KVCacheUsagePercent: 0.1,
@@ -87,11 +82,9 @@ func TestSchedule(t *testing.T) {
8782
"critical": 1,
8883
},
8984
},
90-
},
91-
},
92-
{
93-
Pod: &backend.Pod{NamespacedName: k8stypes.NamespacedName{Name: "pod3"}},
94-
Data: map[string]podinfo.ScrapedData{
85+
}),
86+
podinfo.NewFakePodInfo(k8stypes.NamespacedName{Name: "pod3"}).
87+
WithData(map[string]podinfo.ScrapedData{
9588
metrics.MetricsDataKey: &backendmetrics.Metrics{
9689
WaitingQueueSize: 10,
9790
KVCacheUsagePercent: 0.2,
@@ -100,8 +93,7 @@ func TestSchedule(t *testing.T) {
10093
"foo": 1,
10194
},
10295
},
103-
},
104-
},
96+
}),
10597
},
10698
wantRes: &types.Result{
10799
TargetPod: &types.ScoredPod{
@@ -132,9 +124,8 @@ func TestSchedule(t *testing.T) {
132124
},
133125
// pod1 will be picked because it has capacity for the sheddable request.
134126
input: []*podinfo.FakePodInfo{
135-
{
136-
Pod: &backend.Pod{NamespacedName: k8stypes.NamespacedName{Name: "pod1"}},
137-
Data: map[string]podinfo.ScrapedData{
127+
podinfo.NewFakePodInfo(k8stypes.NamespacedName{Name: "pod1"}).
128+
WithData(map[string]podinfo.ScrapedData{
138129
metrics.MetricsDataKey: &backendmetrics.Metrics{
139130
WaitingQueueSize: 0,
140131
KVCacheUsagePercent: 0.2,
@@ -144,12 +135,9 @@ func TestSchedule(t *testing.T) {
144135
"bar": 1,
145136
},
146137
},
147-
},
148-
Lock: sync.RWMutex{},
149-
},
150-
{
151-
Pod: &backend.Pod{NamespacedName: k8stypes.NamespacedName{Name: "pod2"}},
152-
Data: map[string]podinfo.ScrapedData{
138+
}),
139+
podinfo.NewFakePodInfo(k8stypes.NamespacedName{Name: "pod2"}).
140+
WithData(map[string]podinfo.ScrapedData{
153141
metrics.MetricsDataKey: &backendmetrics.Metrics{
154142
WaitingQueueSize: 3,
155143
KVCacheUsagePercent: 0.1,
@@ -159,11 +147,9 @@ func TestSchedule(t *testing.T) {
159147
"critical": 1,
160148
},
161149
},
162-
},
163-
},
164-
{
165-
Pod: &backend.Pod{NamespacedName: k8stypes.NamespacedName{Name: "pod3"}},
166-
Data: map[string]podinfo.ScrapedData{
150+
}),
151+
podinfo.NewFakePodInfo(k8stypes.NamespacedName{Name: "pod3"}).
152+
WithData(map[string]podinfo.ScrapedData{
167153
metrics.MetricsDataKey: &backendmetrics.Metrics{
168154
WaitingQueueSize: 10,
169155
KVCacheUsagePercent: 0.2,
@@ -172,8 +158,7 @@ func TestSchedule(t *testing.T) {
172158
"foo": 1,
173159
},
174160
},
175-
},
176-
},
161+
}),
177162
},
178163
wantRes: &types.Result{
179164
TargetPod: &types.ScoredPod{
@@ -205,9 +190,8 @@ func TestSchedule(t *testing.T) {
205190
// All pods have higher KV cache thant the threshold, so the sheddable request will be
206191
// dropped.
207192
input: []*podinfo.FakePodInfo{
208-
{
209-
Pod: &backend.Pod{NamespacedName: k8stypes.NamespacedName{Name: "pod1"}},
210-
Data: map[string]podinfo.ScrapedData{
193+
podinfo.NewFakePodInfo(k8stypes.NamespacedName{Name: "pod1"}).
194+
WithData(map[string]podinfo.ScrapedData{
211195
metrics.MetricsDataKey: &backendmetrics.Metrics{
212196
WaitingQueueSize: 10,
213197
KVCacheUsagePercent: 0.9,
@@ -217,12 +201,9 @@ func TestSchedule(t *testing.T) {
217201
"bar": 1,
218202
},
219203
},
220-
},
221-
Lock: sync.RWMutex{},
222-
},
223-
{
224-
Pod: &backend.Pod{NamespacedName: k8stypes.NamespacedName{Name: "pod2"}},
225-
Data: map[string]podinfo.ScrapedData{
204+
}),
205+
podinfo.NewFakePodInfo(k8stypes.NamespacedName{Name: "pod2"}).
206+
WithData(map[string]podinfo.ScrapedData{
226207
metrics.MetricsDataKey: &backendmetrics.Metrics{
227208
WaitingQueueSize: 3,
228209
KVCacheUsagePercent: 0.85,
@@ -232,11 +213,9 @@ func TestSchedule(t *testing.T) {
232213
"critical": 1,
233214
},
234215
},
235-
},
236-
},
237-
{
238-
Pod: &backend.Pod{NamespacedName: k8stypes.NamespacedName{Name: "pod3"}},
239-
Data: map[string]podinfo.ScrapedData{
216+
}),
217+
podinfo.NewFakePodInfo(k8stypes.NamespacedName{Name: "pod3"}).
218+
WithData(map[string]podinfo.ScrapedData{
240219
metrics.MetricsDataKey: &backendmetrics.Metrics{
241220
WaitingQueueSize: 10,
242221
KVCacheUsagePercent: 0.85,
@@ -245,8 +224,7 @@ func TestSchedule(t *testing.T) {
245224
"foo": 1,
246225
},
247226
},
248-
},
249-
},
227+
}),
250228
},
251229
wantRes: nil,
252230
err: true,
@@ -311,9 +289,9 @@ func TestSchedulePlugins(t *testing.T) {
311289
postSchedulePlugins: []plugins.PostSchedule{tp1, tp2},
312290
},
313291
input: []*podinfo.FakePodInfo{
314-
{Pod: &backend.Pod{NamespacedName: k8stypes.NamespacedName{Name: "pod1"}}, Lock: sync.RWMutex{}},
315-
{Pod: &backend.Pod{NamespacedName: k8stypes.NamespacedName{Name: "pod2"}}, Lock: sync.RWMutex{}},
316-
{Pod: &backend.Pod{NamespacedName: k8stypes.NamespacedName{Name: "pod3"}}, Lock: sync.RWMutex{}},
292+
podinfo.NewFakePodInfo(k8stypes.NamespacedName{Name: "pod1"}),
293+
podinfo.NewFakePodInfo(k8stypes.NamespacedName{Name: "pod2"}),
294+
podinfo.NewFakePodInfo(k8stypes.NamespacedName{Name: "pod3"}),
317295
},
318296
wantTargetPod: k8stypes.NamespacedName{Name: "pod1"},
319297
targetPodScore: 1.1,
@@ -333,9 +311,9 @@ func TestSchedulePlugins(t *testing.T) {
333311
postSchedulePlugins: []plugins.PostSchedule{tp1, tp2},
334312
},
335313
input: []*podinfo.FakePodInfo{
336-
{Pod: &backend.Pod{NamespacedName: k8stypes.NamespacedName{Name: "pod1"}}, Lock: sync.RWMutex{}},
337-
{Pod: &backend.Pod{NamespacedName: k8stypes.NamespacedName{Name: "pod2"}}, Lock: sync.RWMutex{}},
338-
{Pod: &backend.Pod{NamespacedName: k8stypes.NamespacedName{Name: "pod3"}}, Lock: sync.RWMutex{}},
314+
podinfo.NewFakePodInfo(k8stypes.NamespacedName{Name: "pod1"}),
315+
podinfo.NewFakePodInfo(k8stypes.NamespacedName{Name: "pod2"}),
316+
podinfo.NewFakePodInfo(k8stypes.NamespacedName{Name: "pod3"}),
339317
},
340318
wantTargetPod: k8stypes.NamespacedName{Name: "pod1"},
341319
targetPodScore: 50,
@@ -355,9 +333,9 @@ func TestSchedulePlugins(t *testing.T) {
355333
postSchedulePlugins: []plugins.PostSchedule{tp1, tp2},
356334
},
357335
input: []*podinfo.FakePodInfo{
358-
{Pod: &backend.Pod{NamespacedName: k8stypes.NamespacedName{Name: "pod1"}}, Lock: sync.RWMutex{}},
359-
{Pod: &backend.Pod{NamespacedName: k8stypes.NamespacedName{Name: "pod2"}}, Lock: sync.RWMutex{}},
360-
{Pod: &backend.Pod{NamespacedName: k8stypes.NamespacedName{Name: "pod3"}}, Lock: sync.RWMutex{}},
336+
podinfo.NewFakePodInfo(k8stypes.NamespacedName{Name: "pod1"}),
337+
podinfo.NewFakePodInfo(k8stypes.NamespacedName{Name: "pod2"}),
338+
podinfo.NewFakePodInfo(k8stypes.NamespacedName{Name: "pod3"}),
361339
},
362340
numPodsToScore: 0,
363341
err: true, // no available pods to server after filter all

0 commit comments

Comments
 (0)