Skip to content

Commit f924e3e

Browse files
committed
Applying fixes for all multi agent notebooks
1 parent a796ea6 commit f924e3e

File tree

3 files changed

+16
-16
lines changed

3 files changed

+16
-16
lines changed

ai/ai-starter-kit/helm-chart/ai-starter-kit/files/multi-agent-ollama.ipynb

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -224,7 +224,7 @@
224224
" return JSONResponse(status_code=500, content={\"error\": str(e)})\n",
225225
"\n",
226226
"if __name__ == \"__main__\":\n",
227-
" uvicorn.run(app, host=\"0.0.0.0\", port=8000)\n",
227+
" uvicorn.run(app, host=\"0.0.0.0\", port=8001)\n",
228228
"'''\n",
229229
"\n",
230230
"with open('/tmp/ollama_wrapper.py', 'w') as f:\n",
@@ -292,12 +292,12 @@
292292
"for i in range(30):\n",
293293
" time.sleep(1)\n",
294294
" try:\n",
295-
" r = requests.get(\"http://localhost:8000/v1/healthz\", timeout=1)\n",
295+
" r = requests.get(\"http://localhost:8001/v1/healthz\", timeout=1)\n",
296296
" if r.status_code == 200:\n",
297297
" print(f\"\\n API is ready! Response: {r.json()}\")\n",
298-
" print(f\"\\nOpenAI-compatible API running at: http://localhost:8000/v1\")\n",
299-
" print(f\"Health: http://localhost:8000/v1/healthz\")\n",
300-
" print(f\"Chat: http://localhost:8000/v1/chat/completions\")\n",
298+
" print(f\"\\nOpenAI-compatible API running at: http://localhost:8001/v1\")\n",
299+
" print(f\"Health: http://localhost:8001/v1/healthz\")\n",
300+
" print(f\"Chat: http://localhost:8001/v1/chat/completions\")\n",
301301
" api_ready = True\n",
302302
" break\n",
303303
" except requests.exceptions.ConnectionError:\n",
@@ -317,8 +317,8 @@
317317
" print(\"\\nLast 50 lines of logs:\")\n",
318318
" !tail -50 /tmp/wrapper.log\n",
319319
" \n",
320-
" print(\"\\nChecking if port 8000 is in use:\")\n",
321-
" !netstat -tlnp 2>/dev/null | grep 8000 || echo \"No process on port 8000\"\n",
320+
" print(\"\\nChecking if port 8001 is in use:\")\n",
321+
" !netstat -tlnp 2>/dev/null | grep 8001 || echo \"No process on port 8001\"\n",
322322
" \n",
323323
" print(\"\\nChecking Python processes:\")\n",
324324
" !ps aux | grep python | grep -v grep"
@@ -343,7 +343,7 @@
343343
"source": [
344344
"import os, time, requests, json\n",
345345
"\n",
346-
"BASE_URL = \"http://localhost:8000/v1\"\n",
346+
"BASE_URL = \"http://localhost:8001/v1\"\n",
347347
"OLLAMA_DIRECT = os.getenv(\"OLLAMA_HOST\", \"http://ai-starter-kit-ollama:11434\")\n",
348348
"\n",
349349
"try:\n",
@@ -430,7 +430,7 @@
430430
"source": [
431431
"import os, requests, json, time\n",
432432
"\n",
433-
"BASE_URL = \"http://localhost:8000/v1\" \n",
433+
"BASE_URL = \"http://localhost:8001/v1\" \n",
434434
"OLLAMA_DIRECT = os.getenv(\"OLLAMA_HOST\", \"http://ai-starter-kit-ollama:11434\")\n",
435435
"\n",
436436
"def call_llm(role_prompt, user_message, temperature=0.4, max_tokens=150, use_wrapper=True):\n",

ai/ai-starter-kit/helm-chart/ai-starter-kit/files/multi-agent-ramalama.ipynb

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -225,7 +225,7 @@
225225
" return JSONResponse(status_code=500, content={\"error\": str(e)})\n",
226226
"\n",
227227
"if __name__ == \"__main__\":\n",
228-
" uvicorn.run(app, host=\"0.0.0.0\", port=8000)\n",
228+
" uvicorn.run(app, host=\"0.0.0.0\", port=8002)\n",
229229
"'''\n",
230230
"\n",
231231
"with open('/tmp/ramalama_wrapper.py', 'w') as f:\n",
@@ -284,7 +284,7 @@
284284
"print(\"Process is running\")\n",
285285
"\n",
286286
"print(\"\\nWaiting for API to respond...\")\n",
287-
"API_URL = \"http://localhost:8000\"\n",
287+
"API_URL = \"http://localhost:8002\"\n",
288288
"api_ready = False\n",
289289
"\n",
290290
"for i in range(30):\n",
@@ -316,8 +316,8 @@
316316
" print(\"\\nLast 50 lines of logs:\")\n",
317317
" !tail -50 /tmp/ramalama_wrapper.log\n",
318318
" \n",
319-
" print(\"\\nChecking if port 8000 is in use:\")\n",
320-
" !netstat -tlnp 2>/dev/null | grep 8000 || echo \"No process on port 8000\"\n",
319+
" print(\"\\nChecking if port 8002 is in use:\")\n",
320+
" !netstat -tlnp 2>/dev/null | grep 8002 || echo \"No process on port 8002\"\n",
321321
" \n",
322322
" print(\"\\nNote: You can re-run this cell - the API might just need more time to start\")"
323323
]
@@ -342,7 +342,7 @@
342342
"import os, time, requests, json\n",
343343
"\n",
344344
"USE_WRAPPER = True\n",
345-
"BASE_URL = \"http://localhost:8000/v1\" if USE_WRAPPER else os.getenv(\"RAMALAMA_HOST\", \"http://127.0.0.1:8080\")\n",
345+
"BASE_URL = \"http://localhost:8002/v1\" if USE_WRAPPER else os.getenv(\"RAMALAMA_HOST\", \"http://127.0.0.1:8080\")\n",
346346
"\n",
347347
"def health():\n",
348348
" if USE_WRAPPER:\n",
@@ -401,7 +401,7 @@
401401
"source": [
402402
"import os, requests, json, time\n",
403403
"\n",
404-
"BASE_URL = \"http://localhost:8000/v1\" \n",
404+
"BASE_URL = \"http://localhost:8002/v1\" \n",
405405
"RAMALAMA_DIRECT = os.getenv(\"RAMALAMA_HOST\", \"http://127.0.0.1:8080\")\n",
406406
"\n",
407407
"def call_llm(role_prompt, user_message, temperature=0.4, max_tokens=150, use_wrapper=True):\n",

ai/ai-starter-kit/helm-chart/ai-starter-kit/files/multi-agent.ipynb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -584,7 +584,7 @@
584584
"mlflow.set_tracking_uri(tracking_uri)\n",
585585
"print(f\"MLflow Tracking URI: {tracking_uri}\")\n",
586586
"\n",
587-
"exp_name = os.getenv(\"MLFLOW_EXPERIMENT_NAME\", \"ray-llama-cpp\")\n",
587+
"exp_name = os.getenv(\"MLFLOW_EXPERIMENT_NAME\", \"ray-transformers\")\n",
588588
"exp = mlflow.set_experiment(exp_name)\n",
589589
"print(f\"Experiment: {exp.name} (ID: {exp.experiment_id})\")\n",
590590
"print(\"-\" * 60)\n",

0 commit comments

Comments
 (0)