@@ -6,6 +6,9 @@ on: [push, pull_request_target]
66jobs :
77 e2e_tests :
88 runs-on : ubuntu-latest
9+ strategy :
10+ matrix :
11+ environment : [ "ci"]
912 env :
1013 OPENAI_API_KEY : ${{ secrets.OPENAI_API_KEY }}
1114
2326 # Don’t keep credentials when running untrusted PR code under PR_TARGET.
2427 persist-credentials : ${{ github.event_name != 'pull_request_target' }}
2528
26- - name : Debug checkout for umago/lightspeed-stack setup-metrics branch
27- run : |
28- echo "=== GitHub Event Information ==="
29- echo "Event name: ${{ github.event_name }}"
30- echo "Base repo: ${{ github.repository }}"
31- echo "Base SHA: ${{ github.sha }}"
32- echo ""
33- echo "=== PR Information ==="
34- echo "PR head repo: '${{ github.event.pull_request.head.repo.full_name }}'"
35- echo "PR head ref: '${{ github.event.pull_request.head.ref }}'"
36- echo "PR head SHA: '${{ github.event.pull_request.head.sha }}'"
37- echo "PR number: ${{ github.event.pull_request.number }}"
38- echo ""
39- echo "=== Resolved Checkout Values ==="
40- echo "Repository used: ${{ github.event.pull_request.head.repo.full_name || github.repository }}"
41- echo "Ref used: ${{ github.event.pull_request.head.ref || github.sha }}"
42- echo ""
43- echo "=== Expected for umago/lightspeed-stack:setup-metrics ==="
44- echo "Should be repo: umago/lightspeed-stack"
45- echo "Should be ref: setup-metrics"
46-
4729 - name : Verify actual git checkout result
4830 run : |
4931 echo "=== Git Status After Checkout ==="
@@ -91,161 +73,60 @@ jobs:
9173 authentication:
9274 module: "noop"
9375
94- 95- env :
96- OPENAI_API_KEY : ${{ secrets.OPENAI_API_KEY }}
97- with :
98- path : ' .'
99- isAbsolutePath : false
100- file : ' run.yaml'
101- content : |
102- version: '2'
103- image_name: simplest-llamastack-app
104- apis:
105- - agents
106- - datasetio
107- - eval
108- - files
109- - inference
110- - post_training
111- - safety
112- - scoring
113- - telemetry
114- - tool_runtime
115- - vector_io
116- benchmarks: []
117- container_image: null
118- datasets: []
119- external_providers_dir: null
120- inference_store:
121- db_path: /app-root/.llama/distributions/ollama/inference_store.db
122- type: sqlite
123- logging: null
124- metadata_store:
125- db_path: /app-root/.llama/distributions/ollama/registry.db
126- namespace: null
127- type: sqlite
128- providers:
129- files:
130- - config:
131- storage_dir: /tmp/llama-stack-files
132- metadata_store:
133- type: sqlite
134- db_path: /app-root/.llama/distributions/ollama/files_metadata.db
135- provider_id: localfs
136- provider_type: inline::localfs
137- agents:
138- - config:
139- persistence_store:
140- db_path: /app-root/.llama/distributions/ollama/agents_store.db
141- namespace: null
142- type: sqlite
143- responses_store:
144- db_path: /app-root/.llama/distributions/ollama/responses_store.db
145- type: sqlite
146- provider_id: meta-reference
147- provider_type: inline::meta-reference
148- datasetio:
149- - config:
150- kvstore:
151- db_path: /app-root/.llama/distributions/ollama/huggingface_datasetio.db
152- namespace: null
153- type: sqlite
154- provider_id: huggingface
155- provider_type: remote::huggingface
156- - config:
157- kvstore:
158- db_path: /app-root/.llama/distributions/ollama/localfs_datasetio.db
159- namespace: null
160- type: sqlite
161- provider_id: localfs
162- provider_type: inline::localfs
163- eval:
164- - config:
165- kvstore:
166- db_path: /app-root/.llama/distributions/ollama/meta_reference_eval.db
167- namespace: null
168- type: sqlite
169- provider_id: meta-reference
170- provider_type: inline::meta-reference
171- inference:
172- - provider_id: openai
173- provider_type: remote::openai
174- config:
175- api_key: ${{ env.OPENAI_API_KEY }}
176- post_training:
177- - config:
178- checkpoint_format: huggingface
179- device: cpu
180- distributed_backend: null
181- dpo_output_dir: '.'
182- provider_id: huggingface
183- provider_type: inline::huggingface-gpu
184- safety:
185- - config:
186- excluded_categories: []
187- provider_id: llama-guard
188- provider_type: inline::llama-guard
189- scoring:
190- - config: {}
191- provider_id: basic
192- provider_type: inline::basic
193- - config: {}
194- provider_id: llm-as-judge
195- provider_type: inline::llm-as-judge
196- - config:
197- openai_api_key: '******'
198- provider_id: braintrust
199- provider_type: inline::braintrust
200- telemetry:
201- - config:
202- service_name: 'lightspeed-stack'
203- sinks: sqlite
204- sqlite_db_path: /app-root/.llama/distributions/ollama/trace_store.db
205- provider_id: meta-reference
206- provider_type: inline::meta-reference
207- tool_runtime:
208- - provider_id: model-context-protocol
209- provider_type: remote::model-context-protocol
210- config: {}
211- - provider_id: rag-runtime
212- provider_type: inline::rag-runtime
213- config: {}
214- vector_io:
215- - config:
216- kvstore:
217- db_path: /app-root/.llama/distributions/ollama/faiss_store.db
218- namespace: null
219- type: sqlite
220- provider_id: faiss
221- provider_type: inline::faiss
222- scoring_fns: []
223- server:
224- auth: null
225- host: null
226- port: 8321
227- quota: null
228- tls_cafile: null
229- tls_certfile: null
230- tls_keyfile: null
231- shields: []
232- vector_dbs: []
233-
234- models:
235- - model_id: gpt-4o-mini
236- provider_id: openai
237- model_type: llm
238- provider_model_id: gpt-4o-mini
239-
240- tool_groups:
241- - toolgroup_id: builtin::rag
242- provider_id: rag-runtime
76+ - name : Select and configure run.yaml
77+ env :
78+ CONFIG_ENVIRONMENT : ${{ matrix.environment || 'ci' }}
79+ run : |
80+ CONFIGS_DIR="tests/e2e/configs"
81+ ENVIRONMENT="$CONFIG_ENVIRONMENT"
82+
83+ echo "Looking for configurations in $CONFIGS_DIR/"
84+
85+ # List available configurations
86+ if [ -d "$CONFIGS_DIR" ]; then
87+ echo "Available configurations:"
88+ ls -la "$CONFIGS_DIR"/*.yaml 2>/dev/null || echo "No YAML files found in $CONFIGS_DIR/"
89+ else
90+ echo "Configs directory '$CONFIGS_DIR' not found!"
91+ exit 1
92+ fi
93+
94+ # Determine which config file to use
95+ CONFIG_FILE="$CONFIGS_DIR/run-$ENVIRONMENT.yaml"
96+
97+ echo "Looking for: $CONFIG_FILE"
98+
99+ if [ -f "$CONFIG_FILE" ]; then
100+ echo "Found config for environment: $ENVIRONMENT"
101+ cp "$CONFIG_FILE" run-ci.yaml
102+ else
103+ echo "Configuration file not found: $CONFIG_FILE"
104+ echo "Available files in $CONFIGS_DIR:"
105+ ls -la "$CONFIGS_DIR/"
106+ exit 1
107+ fi
108+
109+ # Update paths for container environment (relative -> absolute)
110+ sed -i 's|db_path: \.llama/distributions|db_path: /app-root/.llama/distributions|g' run-ci.yaml
111+ sed -i 's|db_path: tmp/|db_path: /app-root/.llama/distributions/|g' run-ci.yaml
112+
113+ # Set OpenAI API key for container
114+ sed -i "s|api_key: \${env\.OPENAI_API_KEY}|api_key: $OPENAI_API_KEY|g" run-ci.yaml
115+
116+ echo "Successfully configured for environment: $ENVIRONMENT"
117+ echo "Using configuration: $(basename "$CONFIG_FILE")"
243118
244- - name : list files
119+ - name : Show final configuration
245120 run : |
246- ls
247- cat lightspeed-stack.yaml
248- cat run.yaml
121+ echo "=== Configuration Summary ==="
122+ echo "Source config: tests/e2e/configs/run-ci.yaml"
123+ echo "Final file: run-ci.yaml"
124+ echo "Container mount: /app-root/run.yaml"
125+ echo ""
126+ echo "=== Final Configuration Preview ==="
127+ echo "Providers: $(grep -c "provider_id:" run-ci.yaml)"
128+ echo "Models: $(grep -c "model_id:" run-ci.yaml)"
129+ echo ""
249130
250131 - name : Run service manually
251132 env :
0 commit comments