@@ -118,17 +118,37 @@ services:
118118 openwebui :
119119 image : ghcr.io/open-webui/open-webui:main
120120 container_name : openwebui
121- # Expose externally only if you want to access Open WebUI directly
122121 ports :
123122 - " 3001:8080"
124123 environment :
125124 # Add any Open WebUI env here if needed (auth, providers, etc.)
126125 - WEBUI_NAME=Open WebUI
126+ # Route Open WebUI's OpenAI-compatible calls through Pipelines by default
127+ - OPENAI_API_BASE_URL=http://pipelines:9099
128+ - OPENAI_API_KEY=0p3n-w3bu!
127129 volumes :
128130 - openwebui-data:/app/backend/data
129131 networks :
130132 - semantic-network
131133
134+ # Open WebUI Pipelines server (executes Python pipelines)
135+ pipelines :
136+ image : ghcr.io/open-webui/pipelines:main
137+ container_name : pipelines
138+ # Optional: expose on host for debugging
139+ # ports:
140+ # - "9099:9099"
141+ environment :
142+ # Allow pipelines to reach services running on host if ever required
143+ - PYTHONUNBUFFERED=1
144+ volumes :
145+ # Persistent pipelines storage (auto-loaded on start)
146+ - openwebui-pipelines:/app/pipelines
147+ # Mount our vLLM Semantic Router pipeline
148+ - ../../tools/openwebui-pipe/vllm_semantic_router_pipe.py:/app/pipelines/vllm_semantic_router_pipe.py:ro
149+ networks :
150+ - semantic-network
151+
132152 # LLM Katan service for testing
133153 llm-katan :
134154 build :
@@ -172,6 +192,8 @@ services:
172192 condition : service_started
173193 openwebui :
174194 condition : service_started
195+ pipelines :
196+ condition : service_started
175197 healthcheck :
176198 test : ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://localhost:8700/healthz"]
177199 interval : 10s
@@ -192,3 +214,4 @@ volumes:
192214 prometheus-data :
193215 grafana-data :
194216 openwebui-data :
217+ openwebui-pipelines :
0 commit comments