Skip to content

Commit 5e2bc4e

Browse files
authored
Merge pull request #546 from radofuchs/LCORE_491_Info_E2E_tests
LCORE-491: E2E tests for Info, Models and Metrics endpoints
2 parents 14dbeb7 + caf23d9 commit 5e2bc4e

File tree

3 files changed

+107
-63
lines changed

3 files changed

+107
-63
lines changed

.github/workflows/e2e_tests.yaml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -65,7 +65,7 @@ jobs:
6565
isAbsolutePath: false
6666
file: 'lightspeed-stack.yaml'
6767
content: |
68-
name: foo bar baz
68+
name: Lightspeed Core Service (LCS)
6969
service:
7070
host: 0.0.0.0
7171
port: 8080

tests/e2e/features/info.feature

Lines changed: 55 additions & 46 deletions
Original file line numberDiff line numberDiff line change
@@ -1,46 +1,55 @@
1-
# Feature: Info endpoint API tests
2-
#TODO: fix test
3-
4-
# Background:
5-
# Given The service is started locally
6-
# And REST API service hostname is localhost
7-
# And REST API service port is 8080
8-
# And REST API service prefix is /v1
9-
10-
# Scenario: Check if the OpenAPI endpoint works as expected
11-
# Given The system is in default state
12-
# When I access endpoint "openapi.json" using HTTP GET method
13-
# Then The status code of the response is 200
14-
# And The body of the response contains OpenAPI
15-
16-
# Scenario: Check if info endpoint is working
17-
# Given The system is in default state
18-
# When I access REST API endpoint "info" using HTTP GET method
19-
# Then The status code of the response is 200
20-
# And The body of the response has proper name "lightspeed_stack" and version "0.2.0"
21-
22-
# Scenario: Check if models endpoint is working
23-
# Given The system is in default state
24-
# When I access REST API endpoint "models" using HTTP GET method
25-
# Then The status code of the response is 200
26-
# And The body of the response contains gpt
27-
28-
29-
# Scenario: Check if models endpoint is working
30-
# Given The system is in default state
31-
# And The llama-stack connection is disrupted
32-
# When I access REST API endpoint "models" using HTTP GET method
33-
# Then The status code of the response is 503
34-
35-
# Scenario: Check if metrics endpoint is working
36-
# Given The system is in default state
37-
# When I access REST API endpoint "metrics" using HTTP GET method
38-
# Then The status code of the response is 200
39-
# And The body of the response has proper metrics
40-
41-
# Scenario: Check if metrics endpoint is working
42-
# Given The system is in default state
43-
# And The llama-stack connection is disrupted
44-
# When I access REST API endpoint "metrics" using HTTP GET method
45-
# Then The status code of the response is 500
46-
1+
Feature: Info tests
2+
3+
4+
Background:
5+
Given The service is started locally
6+
And REST API service hostname is localhost
7+
And REST API service port is 8080
8+
And REST API service prefix is /v1
9+
10+
Scenario: Check if the OpenAPI endpoint works as expected
11+
Given The system is in default state
12+
When I access endpoint "openapi.json" using HTTP GET method
13+
Then The status code of the response is 200
14+
And The body of the response contains OpenAPI
15+
16+
Scenario: Check if info endpoint is working
17+
Given The system is in default state
18+
When I access REST API endpoint "info" using HTTP GET method
19+
Then The status code of the response is 200
20+
And The body of the response has proper name Lightspeed Core Service (LCS) and version 0.2.0
21+
And The body of the response has llama-stack version 0.2.19
22+
23+
Scenario: Check if info endpoint reports error when llama-stack connection is not working
24+
Given The system is in default state
25+
And The llama-stack connection is disrupted
26+
When I access REST API endpoint "info" using HTTP GET method
27+
Then The status code of the response is 500
28+
And The body of the response is the following
29+
"""
30+
{"detail": {"response": "Unable to connect to Llama Stack", "cause": "Connection error."}}
31+
"""
32+
33+
Scenario: Check if models endpoint is working
34+
Given The system is in default state
35+
When I access REST API endpoint "models" using HTTP GET method
36+
Then The status code of the response is 200
37+
And The body of the response for model gpt-4o-mini has proper structure
38+
39+
40+
Scenario: Check if models endpoint is working
41+
Given The system is in default state
42+
And The llama-stack connection is disrupted
43+
When I access REST API endpoint "models" using HTTP GET method
44+
Then The status code of the response is 500
45+
And The body of the response is the following
46+
"""
47+
{"detail": {"response": "Unable to connect to Llama Stack", "cause": "Connection error."}}
48+
"""
49+
50+
51+
Scenario: Check if metrics endpoint is working
52+
Given The system is in default state
53+
When I access endpoint "metrics" using HTTP GET method
54+
Then The status code of the response is 200
55+
And The body of the response contains ls_provider_model_configuration

tests/e2e/features/steps/info.py

Lines changed: 51 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -4,19 +4,54 @@
44
from behave.runner import Context
55

66

7-
@then(
8-
"The body of the response has proper name {system_prompt:w} and version {version:w}"
9-
)
10-
def check_name_version(context: Context, system_prompt: str, version: str) -> None:
11-
"""Check proper name and version number."""
12-
context.system_prompt = system_prompt
13-
context.version = version
14-
# TODO: add step implementation
15-
assert context is not None
16-
17-
18-
@then("The body of the response has proper metrics")
19-
def check_metrics(context: Context) -> None:
20-
"""Check proper metrics."""
21-
# TODO: add step implementation
22-
assert context is not None
7+
@then("The body of the response has proper name {service_name} and version {version}")
8+
def check_name_version(context: Context, service_name: str, version: str) -> None:
9+
"""Check proper service name and version number."""
10+
response_json = context.response.json()
11+
assert response_json is not None, "Response is not valid JSON"
12+
13+
assert response_json["name"] == service_name, f"name is {response_json["name"]}"
14+
assert (
15+
response_json["service_version"] == version
16+
), f"version is {response_json["service_version"]}"
17+
18+
19+
@then("The body of the response has llama-stack version {llama_version}")
20+
def check_llama_version(context: Context, llama_version: str) -> None:
21+
"""Check proper llama-stack version number."""
22+
response_json = context.response.json()
23+
assert response_json is not None, "Response is not valid JSON"
24+
25+
assert (
26+
response_json["llama_stack_version"] == llama_version
27+
), f"llama-stack version is {response_json["llama_stack_version"]}"
28+
29+
30+
@then("The body of the response for model {model} has proper structure")
31+
def check_model_structure(context: Context, model: str) -> None:
32+
"""Check that the gpt-4o-mini model has the correct structure and required fields."""
33+
response_json = context.response.json()
34+
assert response_json is not None, "Response is not valid JSON"
35+
36+
assert "models" in response_json, "Response missing 'models' field"
37+
models = response_json["models"]
38+
assert len(models) > 0, "Models list should not be empty"
39+
40+
gpt_model = None
41+
for model_id in models:
42+
if "gpt-4o-mini" in model_id.get("identifier", ""):
43+
gpt_model = model_id
44+
break
45+
46+
assert gpt_model is not None
47+
48+
assert gpt_model["type"] == "model", "type should be 'model'"
49+
assert gpt_model["api_model_type"] == "llm", "api_model_type should be 'llm'"
50+
assert gpt_model["model_type"] == "llm", "model_type should be 'llm'"
51+
assert gpt_model["provider_id"] == "openai", "provider_id should be 'openai'"
52+
assert (
53+
gpt_model["provider_resource_id"] == model
54+
), "provider_resource_id should be 'gpt-4o-mini'"
55+
assert (
56+
gpt_model["identifier"] == f"openai/{model}"
57+
), "identifier should be 'openai/gpt-4o-mini'"

0 commit comments

Comments
 (0)