Skip to content
This repository was archived by the owner on Jun 5, 2025. It is now read-only.

Commit ff72833

Browse files
committed
Fix duplication
Signed-off-by: Radoslav Dimitrov <[email protected]>
1 parent 49cc214 commit ff72833

File tree

2 files changed

+13
-21
lines changed

2 files changed

+13
-21
lines changed

src/codegate/providers/vllm/adapter.py

Lines changed: 5 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -105,41 +105,28 @@ def _has_chat_ml_format(data: Dict) -> bool:
105105
return False
106106

107107
def normalize(self, data: Dict) -> ChatCompletionRequest:
108-
"""
109-
Normalize the input data to the format expected by LiteLLM.
110-
Ensures the model name has the hosted_vllm prefix and constructs the complete endpoint URL.
111-
"""
112-
# Make a copy of the data to avoid modifying the original and normalize the message content
113108
normalized_data = self._normalize_content_messages(data)
114109

115-
# Format the model name to include the provider
110+
# Format the model name
116111
if "model" in normalized_data:
117112
model_name = normalized_data["model"]
118113
if not model_name.startswith("hosted_vllm/"):
119114
normalized_data["model"] = f"hosted_vllm/{model_name}"
120115

121-
# Construct the complete endpoint URL
116+
# Construct the complete endpoint URL without duplicating paths
122117
if "base_url" in normalized_data:
123118
base_url = normalized_data["base_url"].rstrip("/")
124-
original_endpoint = normalized_data.pop("original_endpoint", "")
125-
126-
# Ensure we have /v1 in the path
127-
if not base_url.endswith("/v1"):
128-
base_url = f"{base_url}/v1"
129-
130-
# Add the original endpoint if it exists
131-
if original_endpoint:
132-
normalized_data["base_url"] = f"{base_url}/{original_endpoint}"
133-
else:
134-
normalized_data["base_url"] = base_url
119+
normalized_data["base_url"] = base_url
135120

136121
ret_data = normalized_data
137122
if self._has_chat_ml_format(normalized_data):
138123
ret_data = self._chat_ml_normalizer.normalize(normalized_data)
139124
else:
140125
ret_data = ChatCompletionRequest(**normalized_data)
126+
141127
if ret_data.get("stream", False):
142128
ret_data["stream_options"] = {"include_usage": True}
129+
143130
return ret_data
144131

145132
def denormalize(self, data: ChatCompletionRequest) -> Dict:

src/codegate/providers/vllm/provider.py

Lines changed: 8 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,12 @@ def _get_base_url(self) -> str:
3838
"""
3939
config = Config.get_config()
4040
base_url = config.provider_urls.get("vllm") if config else ""
41-
return base_url.rstrip("/") if base_url else ""
41+
if base_url:
42+
base_url = base_url.rstrip("/")
43+
# Add /v1 if not present
44+
if not base_url.endswith("/v1"):
45+
base_url = f"{base_url}/v1"
46+
return base_url
4247

4348
def _get_endpoint_from_request(self, request: Request) -> str:
4449
"""
@@ -48,8 +53,8 @@ def _get_endpoint_from_request(self, request: Request) -> str:
4853
# Find the index of 'vllm' in the path
4954
try:
5055
vllm_index = path_parts.index(self.provider_route_name)
51-
# Get everything after 'vllm'
52-
endpoint = "/".join(path_parts[vllm_index + 1 :])
56+
# Get everything after 'vllm' but before any duplicates
57+
endpoint = path_parts[vllm_index + 1].split("/")[0] # Take just the first part
5358
return endpoint
5459
except ValueError:
5560
return ""

0 commit comments

Comments
 (0)