-
Notifications
You must be signed in to change notification settings - Fork 185
convert subset filter from a plugin to logic in director #1088
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from 3 commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -24,12 +24,14 @@ import ( | |
| "math/rand" | ||
| "net" | ||
| "strconv" | ||
| "strings" | ||
| "time" | ||
|
|
||
| "github.com/go-logr/logr" | ||
| "sigs.k8s.io/controller-runtime/pkg/log" | ||
| "sigs.k8s.io/gateway-api-inference-extension/api/v1alpha2" | ||
| "sigs.k8s.io/gateway-api-inference-extension/pkg/epp/backend" | ||
| backendmetrics "sigs.k8s.io/gateway-api-inference-extension/pkg/epp/backend/metrics" | ||
| "sigs.k8s.io/gateway-api-inference-extension/pkg/epp/datastore" | ||
| "sigs.k8s.io/gateway-api-inference-extension/pkg/epp/handlers" | ||
| "sigs.k8s.io/gateway-api-inference-extension/pkg/epp/metrics" | ||
|
|
@@ -39,6 +41,11 @@ import ( | |
| requtil "sigs.k8s.io/gateway-api-inference-extension/pkg/epp/util/request" | ||
| ) | ||
|
|
||
| const ( | ||
| subsetHintNamespace = "envoy.lb.subset_hint" | ||
| subsetHintKey = "x-gateway-destination-endpoint-subset" | ||
| ) | ||
|
|
||
| // Scheduler defines the interface required by the Director for scheduling. | ||
| type Scheduler interface { | ||
| Schedule(ctx context.Context, request *schedulingtypes.LLMRequest, candidatePods []schedulingtypes.Pod) (result *schedulingtypes.SchedulingResult, err error) | ||
|
|
@@ -118,12 +125,12 @@ func (d *Director) HandleRequest(ctx context.Context, reqCtx *handlers.RequestCo | |
| } | ||
|
|
||
| // Prepare LLMRequest (needed for both saturation detection and Scheduler) | ||
| reqCtx.SchedulingRequest = schedulingtypes.NewLLMRequest( | ||
| reqCtx.Request.Headers[requtil.RequestIdHeaderKey], | ||
| reqCtx.ResolvedTargetModel, | ||
| prompt, | ||
| reqCtx.Request.Headers, | ||
| reqCtx.Request.Metadata) | ||
| reqCtx.SchedulingRequest = &schedulingtypes.LLMRequest{ | ||
| RequestId: reqCtx.Request.Headers[requtil.RequestIdHeaderKey], | ||
| TargetModel: reqCtx.ResolvedTargetModel, | ||
| Prompt: prompt, | ||
| Headers: reqCtx.Request.Headers, | ||
| } | ||
|
|
||
| logger = logger.WithValues("model", reqCtx.Model, "resolvedTargetModel", reqCtx.ResolvedTargetModel, "criticality", requestCriticality) | ||
|
|
||
|
|
@@ -135,11 +142,11 @@ func (d *Director) HandleRequest(ctx context.Context, reqCtx *handlers.RequestCo | |
| return reqCtx, err | ||
| } | ||
|
|
||
| // --- 3. Call Scheduler --- | ||
| // Snapshot pod metrics from the datastore to: | ||
| // 1. Reduce concurrent access to the datastore. | ||
| // 2. Ensure consistent data during the scheduling operation of a request between all scheduling cycles. | ||
| candidatePods := schedulingtypes.ToSchedulerPodMetrics(d.datastore.PodGetAll()) | ||
| // --- 3. Call Scheduler (with the relevant candidate pods) --- | ||
| candidatePods, err := d.getCandidatePodsForScheduling(reqCtx.Request.Metadata) | ||
| if err != nil { | ||
| return reqCtx, errutil.Error{Code: errutil.BadRequest, Msg: fmt.Errorf("failed to find candidate pods: %w", err).Error()} | ||
|
||
| } | ||
| results, err := d.scheduler.Schedule(ctx, reqCtx.SchedulingRequest, candidatePods) | ||
| if err != nil { | ||
| return reqCtx, errutil.Error{Code: errutil.InferencePoolResourceExhausted, Msg: fmt.Errorf("failed to find target pod: %w", err).Error()} | ||
|
|
@@ -177,6 +184,45 @@ func (d *Director) admitRequest(ctx context.Context, requestCriticality v1alpha2 | |
| return nil | ||
| } | ||
|
|
||
| // getCandidatePodsForScheduling gets the list of relevant endpoints for the scheduling cycle from the datastore. | ||
| // according to EPP protocol, if "x-gateway-destination-endpoint-subset" is set on the request metadata and specifies | ||
| // a subset of endpoints, only these endpoints will be considered as candidates for the scheduler. | ||
| // Snapshot pod metrics from the datastore to: | ||
| // 1. Reduce concurrent access to the datastore. | ||
| // 2. Ensure consistent data during the scheduling operation of a request between all scheduling cycles. | ||
| func (d *Director) getCandidatePodsForScheduling(requestMetadata map[string]any) ([]schedulingtypes.Pod, error) { | ||
| subsetMap, found := requestMetadata[subsetHintNamespace].(map[string]any) | ||
| if !found { | ||
| return schedulingtypes.ToSchedulerPodMetrics(d.datastore.PodGetAll()), nil | ||
| } | ||
|
|
||
| // Check if endpoint key is present in the subset map and ensure there is at least one value | ||
| endpointSubsetList, found := subsetMap[subsetHintKey].([]any) | ||
| if !found { | ||
| return schedulingtypes.ToSchedulerPodMetrics(d.datastore.PodGetAll()), nil | ||
| } else if len(endpointSubsetList) == 0 { | ||
| return nil, fmt.Errorf("'%s' metadata cannot be empty", subsetHintKey) | ||
| } | ||
|
|
||
| // Create a map of endpoint addresses for easy lookup | ||
| endpoints := make(map[string]bool) | ||
| for _, endpoint := range endpointSubsetList { | ||
| // Extract address from endpoint | ||
| // The endpoint is formatted as "<address>:<port>" (ex. "10.0.1.0:8080") | ||
| epStr := strings.Split(endpoint.(string), ":")[0] | ||
| endpoints[epStr] = true | ||
| } | ||
|
|
||
| podFitleredList := d.datastore.PodList(func(pm backendmetrics.PodMetrics) bool { | ||
| if _, found := endpoints[pm.GetPod().Address]; found { | ||
| return true | ||
| } | ||
| return false | ||
| }) | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Can we pls a trace log line indicating if the subset key is set, and the number of endpoints it included vs what the datastore has? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. done |
||
|
|
||
| return schedulingtypes.ToSchedulerPodMetrics(podFitleredList), nil | ||
| } | ||
|
|
||
| // prepareRequest populates the RequestContext and calls the registered PreRequest plugins | ||
| // for allowing plugging customized logic based on the scheduling results. | ||
| func (d *Director) prepareRequest(ctx context.Context, reqCtx *handlers.RequestContext, result *schedulingtypes.SchedulingResult) (*handlers.RequestContext, error) { | ||
|
|
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
this comment was moved to the helper function godoc