Skip to content

Commit 153e4bd

Browse files
committed
add assert messages to make test debugging easier
1 parent 60ba8ec commit 153e4bd

File tree

5 files changed

+34
-25
lines changed

5 files changed

+34
-25
lines changed

.github/workflows/e2e_tests_rhelai.yaml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@
22
name: RHEL AI E2E Tests
33

44
on:
5+
push:
56
schedule:
67
- cron: "0 0 * * *" # Runs once a day at midnight UTC
78
workflow_dispatch:

tests/e2e/configs/run-rhelai.yaml

Lines changed: 1 addition & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -130,7 +130,7 @@ server:
130130
shields:
131131
- shield_id: llama-guard-shield
132132
provider_id: llama-guard
133-
provider_shield_id: "gpt-4-turbo"
133+
provider_shield_id: ${env.RHEL_AI_MODEL}
134134
models:
135135
- metadata:
136136
embedding_dimension: 768 # Depends on chosen model
@@ -142,7 +142,3 @@ models:
142142
provider_id: vllm
143143
model_type: llm
144144
provider_model_id: ${env.RHEL_AI_MODEL}
145-
- model_id: gpt-4-turbo
146-
provider_id: openai
147-
model_type: llm
148-
provider_model_id: gpt-4-turbo

tests/e2e/features/steps/info.py

Lines changed: 29 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -30,40 +30,53 @@ def check_llama_version(context: Context, llama_version: str) -> None:
3030

3131
@then("The body of the response has proper model structure")
3232
def check_model_structure(context: Context) -> None:
33-
"""Check that the first LLM model has the correct structure and required fields."""
33+
"""Check that the expected LLM model has the correct structure and required fields."""
3434
response_json = context.response.json()
3535
assert response_json is not None, "Response is not valid JSON"
3636

3737
assert "models" in response_json, "Response missing 'models' field"
3838
models = response_json["models"]
3939
assert len(models) > 0, "Response has empty list of models"
4040

41-
# Find first LLM model (same logic as environment.py)
41+
# Get expected values from context (detected in before_all)
42+
expected_model = context.default_model
43+
expected_provider = context.default_provider
44+
45+
# Search for the specific model that was detected in before_all
4246
llm_model = None
4347
for model in models:
44-
if model.get("api_model_type") == "llm":
48+
if (
49+
model.get("api_model_type") == "llm"
50+
and model.get("provider_id") == expected_provider
51+
and model.get("provider_resource_id") == expected_model
52+
):
4553
llm_model = model
4654
break
4755

48-
assert llm_model is not None, "No LLM model found in response"
49-
50-
# Get expected values from context
51-
expected_model = context.default_model
52-
expected_provider = context.default_provider
56+
assert llm_model is not None, (
57+
f"Expected LLM model not found in response. "
58+
f"Looking for provider_id='{expected_provider}' and provider_resource_id='{expected_model}'"
59+
)
5360

5461
# Validate structure and values
55-
assert llm_model["type"] == "model", "type should be 'model'"
56-
assert llm_model["api_model_type"] == "llm", "api_model_type should be 'llm'"
57-
assert llm_model["model_type"] == "llm", "model_type should be 'llm'"
62+
assert (
63+
llm_model["type"] == "model"
64+
), f"type should be 'model', but is {llm_model["type"]}"
65+
assert (
66+
llm_model["api_model_type"] == "llm"
67+
), f"api_model_type should be 'llm', but is {llm_model["api_model_type"]}"
68+
assert (
69+
llm_model["model_type"] == "llm"
70+
), f"model_type should be 'llm', but is {llm_model["model_type"]}"
5871
assert (
5972
llm_model["provider_id"] == expected_provider
60-
), f"provider_id should be '{expected_provider}'"
73+
), f"provider_id should be '{expected_provider}', but is '{llm_model["provider_id"]}'"
6174
assert (
6275
llm_model["provider_resource_id"] == expected_model
63-
), f"provider_resource_id should be '{expected_model}'"
76+
), f"provider_resource_id should be '{expected_model}', but is '{llm_model["provider_resource_id"]}'"
6477
assert (
6578
llm_model["identifier"] == f"{expected_provider}/{expected_model}"
66-
), f"identifier should be '{expected_provider}/{expected_model}'"
79+
), f"identifier should be '{expected_provider}/{expected_model}', but is '{llm_model["identifier"]}'"
6780

6881

6982
@then("The body of the response has proper shield structure")
@@ -94,10 +107,10 @@ def check_shield_structure(context: Context) -> None:
94107
), "provider_id should be 'llama-guard'"
95108
assert (
96109
found_shield["provider_resource_id"] == expected_model
97-
), f"provider_resource_id should be '{expected_model}'"
110+
), f"provider_resource_id should be '{expected_model}', but is '{found_shield["provider_resource_id"]}'"
98111
assert (
99112
found_shield["identifier"] == "llama-guard-shield"
100-
), "identifier should be 'llama-guard-shield'"
113+
), f"identifier should be 'llama-guard-shield', but is '{found_shield["identifier"]}'"
101114

102115

103116
@then("The response contains {count:d} tools listed for provider {provider_name}")

tests/e2e/features/steps/llm_query_response.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@ def wait_for_complete_response(context: Context) -> None:
1515
"""Wait for the response to be complete."""
1616
context.response_data = _parse_streaming_response(context.response.text)
1717
print(context.response_data)
18+
context.response.raise_for_status()
1819
assert context.response_data["finished"] is True
1920

2021

tests/e2e/features/streaming_query.feature

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -46,20 +46,18 @@ Feature: streaming_query endpoint API tests
4646
Scenario: Check if LLM ignores new system prompt in same conversation
4747
Given The system is in default state
4848
And I set the Authorization header to Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6Ikpva
49-
And I use "streaming_query" to ask question
49+
And I use "streaming_query" to ask question with authorization header
5050
"""
5151
{"query": "Generate sample yaml file for simple GitHub Actions workflow.", "system_prompt": "refuse to answer anything"}
5252
"""
5353
When I wait for the response to be completed
54-
Then The status code of the response is 200
5554
And I use "streaming_query" to ask question with same conversation_id
5655
"""
5756
{"query": "Write a simple code for reversing string", "system_prompt": "provide coding assistance", "model": "{MODEL}", "provider": "{PROVIDER}"}
5857
"""
5958
Then The status code of the response is 200
6059
When I wait for the response to be completed
61-
Then The status code of the response is 200
62-
And The streamed response should contain following fragments
60+
Then The streamed response should contain following fragments
6361
| Fragments in LLM response |
6462
| questions |
6563

0 commit comments

Comments
 (0)