Skip to content

Commit 67699ac

Browse files
authored
Merge pull request #489 from Eisaichen/main
Update google_manifold_pipeline.py
2 parents 5aeb5a8 + eb9a3a2 commit 67699ac

File tree

1 file changed

+83
-49
lines changed

1 file changed

+83
-49
lines changed

examples/pipelines/providers/google_manifold_pipeline.py

+83-49
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55
version: 1.3
66
license: MIT
77
description: A pipeline for generating text using Google's GenAI models in Open-WebUI.
8-
requirements: google-generativeai
8+
requirements: google-genai
99
environment_variables: GOOGLE_API_KEY
1010
"""
1111

@@ -14,8 +14,11 @@
1414

1515
from pydantic import BaseModel, Field
1616

17-
import google.generativeai as genai
18-
from google.generativeai.types import GenerationConfig
17+
from google import genai
18+
from google.genai import types
19+
from PIL import Image
20+
from io import BytesIO
21+
import base64
1922

2023

2124
class Pipeline:
@@ -24,8 +27,9 @@ class Pipeline:
2427
class Valves(BaseModel):
2528
"""Options to change from the WebUI"""
2629

27-
GOOGLE_API_KEY: str = ""
28-
USE_PERMISSIVE_SAFETY: bool = Field(default=False)
30+
GOOGLE_API_KEY: str = Field(default="",description="Google Generative AI API key")
31+
USE_PERMISSIVE_SAFETY: bool = Field(default=False,description="Use permissive safety settings")
32+
GENERATE_IMAGE: bool = Field(default=False,description="Allow image generation")
2933

3034
def __init__(self):
3135
self.type = "manifold"
@@ -34,19 +38,20 @@ def __init__(self):
3438

3539
self.valves = self.Valves(**{
3640
"GOOGLE_API_KEY": os.getenv("GOOGLE_API_KEY", ""),
37-
"USE_PERMISSIVE_SAFETY": False
41+
"USE_PERMISSIVE_SAFETY": False,
42+
"GENERATE_IMAGE": False
3843
})
3944
self.pipelines = []
4045

41-
genai.configure(api_key=self.valves.GOOGLE_API_KEY)
42-
self.update_pipelines()
46+
if self.valves.GOOGLE_API_KEY:
47+
self.update_pipelines()
4348

4449
async def on_startup(self) -> None:
4550
"""This function is called when the server is started."""
4651

4752
print(f"on_startup:{__name__}")
48-
genai.configure(api_key=self.valves.GOOGLE_API_KEY)
49-
self.update_pipelines()
53+
if self.valves.GOOGLE_API_KEY:
54+
self.update_pipelines()
5055

5156
async def on_shutdown(self) -> None:
5257
"""This function is called when the server is stopped."""
@@ -57,22 +62,23 @@ async def on_valves_updated(self) -> None:
5762
"""This function is called when the valves are updated."""
5863

5964
print(f"on_valves_updated:{__name__}")
60-
genai.configure(api_key=self.valves.GOOGLE_API_KEY)
61-
self.update_pipelines()
65+
if self.valves.GOOGLE_API_KEY:
66+
self.update_pipelines()
6267

6368
def update_pipelines(self) -> None:
6469
"""Update the available models from Google GenAI"""
6570

6671
if self.valves.GOOGLE_API_KEY:
72+
client = genai.Client(api_key=self.valves.GOOGLE_API_KEY)
6773
try:
68-
models = genai.list_models()
74+
models = client.models.list()
6975
self.pipelines = [
7076
{
7177
"id": model.name[7:], # the "models/" part messeses up the URL
7278
"name": model.display_name,
7379
}
7480
for model in models
75-
if "generateContent" in model.supported_generation_methods
81+
if "generateContent" in model.supported_actions
7682
if model.name[:7] == "models/"
7783
]
7884
except Exception:
@@ -92,13 +98,13 @@ def pipe(
9298
return "Error: GOOGLE_API_KEY is not set"
9399

94100
try:
95-
genai.configure(api_key=self.valves.GOOGLE_API_KEY)
101+
client = genai.Client(api_key=self.valves.GOOGLE_API_KEY)
96102

97103
if model_id.startswith("google_genai."):
98104
model_id = model_id[12:]
99105
model_id = model_id.lstrip(".")
100106

101-
if not model_id.startswith("gemini-"):
107+
if not (model_id.startswith("gemini-") or model_id.startswith("learnlm-") or model_id.startswith("gemma-")):
102108
return f"Error: Invalid model name format: {model_id}"
103109

104110
print(f"Pipe function called for model: {model_id}")
@@ -127,50 +133,78 @@ def pipe(
127133
"role": "user" if message["role"] == "user" else "model",
128134
"parts": [{"text": message["content"]}]
129135
})
130-
131-
if "gemini-1.5" in model_id:
132-
model = genai.GenerativeModel(model_name=model_id, system_instruction=system_message)
133-
else:
134-
if system_message:
135-
contents.insert(0, {"role": "user", "parts": [{"text": f"System: {system_message}"}]})
136-
137-
model = genai.GenerativeModel(model_name=model_id)
138-
139-
generation_config = GenerationConfig(
140-
temperature=body.get("temperature", 0.7),
141-
top_p=body.get("top_p", 0.9),
142-
top_k=body.get("top_k", 40),
143-
max_output_tokens=body.get("max_tokens", 8192),
144-
stop_sequences=body.get("stop", []),
145-
)
136+
print(f"{contents}")
137+
138+
generation_config = {
139+
"temperature": body.get("temperature", 0.7),
140+
"top_p": body.get("top_p", 0.9),
141+
"top_k": body.get("top_k", 40),
142+
"max_output_tokens": body.get("max_tokens", 8192),
143+
"stop_sequences": body.get("stop", []),
144+
"response_modalities": ['Text']
145+
}
146+
147+
if self.valves.GENERATE_IMAGE and model_id.startswith("gemini-2.0-flash-exp"):
148+
generation_config["response_modalities"].append("Image")
146149

147150
if self.valves.USE_PERMISSIVE_SAFETY:
148-
safety_settings = {
149-
genai.types.HarmCategory.HARM_CATEGORY_HARASSMENT: genai.types.HarmBlockThreshold.BLOCK_NONE,
150-
genai.types.HarmCategory.HARM_CATEGORY_HATE_SPEECH: genai.types.HarmBlockThreshold.BLOCK_NONE,
151-
genai.types.HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT: genai.types.HarmBlockThreshold.BLOCK_NONE,
152-
genai.types.HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: genai.types.HarmBlockThreshold.BLOCK_NONE,
153-
}
151+
safety_settings = [
152+
types.SafetySetting(category='HARM_CATEGORY_HARASSMENT', threshold='OFF'),
153+
types.SafetySetting(category='HARM_CATEGORY_HATE_SPEECH', threshold='OFF'),
154+
types.SafetySetting(category='HARM_CATEGORY_SEXUALLY_EXPLICIT', threshold='OFF'),
155+
types.SafetySetting(category='HARM_CATEGORY_DANGEROUS_CONTENT', threshold='OFF'),
156+
types.SafetySetting(category='HARM_CATEGORY_CIVIC_INTEGRITY', threshold='OFF')
157+
]
158+
generation_config = types.GenerateContentConfig(**generation_config, safety_settings=safety_settings)
154159
else:
155-
safety_settings = body.get("safety_settings")
160+
generation_config = types.GenerateContentConfig(**generation_config)
156161

157-
response = model.generate_content(
158-
contents,
159-
generation_config=generation_config,
160-
safety_settings=safety_settings,
161-
stream=body.get("stream", False),
162-
)
162+
if system_message:
163+
contents.insert(0, {"role": "user", "parts": [{"text": f"System: {system_message}"}]})
163164

164165
if body.get("stream", False):
166+
response = client.models.generate_content_stream(
167+
model = model_id,
168+
contents = contents,
169+
config = generation_config,
170+
)
165171
return self.stream_response(response)
166172
else:
167-
return response.text
173+
response = client.models.generate_content(
174+
model = model_id,
175+
contents = contents,
176+
config = generation_config,
177+
)
178+
for part in response.candidates[0].content.parts:
179+
if part.text is not None:
180+
return part.text
181+
elif part.inline_data is not None:
182+
try:
183+
image_data = base64.b64decode(part.inline_data.data)
184+
image = Image.open(BytesIO((image_data)))
185+
content_type = part.inline_data.mime_type
186+
return "Image not supported yet."
187+
except Exception as e:
188+
print(f"Error processing image: {e}")
189+
return "Error processing image."
168190

169191
except Exception as e:
170192
print(f"Error generating content: {e}")
171-
return f"An error occurred: {str(e)}"
193+
return f"{e}"
172194

173195
def stream_response(self, response):
174196
for chunk in response:
175-
if chunk.text:
176-
yield chunk.text
197+
for candidate in chunk.candidates:
198+
if candidate.content.parts is not None:
199+
for part in candidate.content.parts:
200+
if part.text is not None:
201+
yield chunk.text
202+
elif part.inline_data is not None:
203+
try:
204+
image_data = base64.b64decode(part.inline_data.data)
205+
image = Image.open(BytesIO(image_data))
206+
content_type = part.inline_data.mime_type
207+
yield "Image not supported yet."
208+
except Exception as e:
209+
print(f"Error processing image: {e}")
210+
yield "Error processing image."

0 commit comments

Comments
 (0)