|
32 | 32 | ) |
33 | 33 |
|
34 | 34 | # Import specific items still needed for special cases and safety checker |
35 | | -from diffusers import DiffusionPipeline, AutoPipelineForText2Image, ControlNetModel |
| 35 | +from diffusers import DiffusionPipeline, ControlNetModel |
36 | 36 | from diffusers import FluxPipeline, FluxTransformer2DModel, AutoencoderKLWan |
37 | 37 | from diffusers.pipelines.stable_diffusion import safety_checker |
38 | 38 | from diffusers.utils import load_image, export_to_video |
@@ -297,38 +297,33 @@ def _load_pipeline(self, request, modelFile, fromSingleFile, torchType, variant) |
297 | 297 |
|
298 | 298 | # Build kwargs for dynamic loading |
299 | 299 | load_kwargs = {"torch_dtype": torchType} |
300 | | - |
| 300 | + |
301 | 301 | # Add variant if not loading from single file |
302 | 302 | if not fromSingleFile and variant: |
303 | 303 | load_kwargs["variant"] = variant |
304 | | - |
| 304 | + |
305 | 305 | # Add use_safetensors for from_pretrained |
306 | 306 | if not fromSingleFile: |
307 | 307 | load_kwargs["use_safetensors"] = SAFETENSORS |
308 | 308 |
|
309 | | - # Determine pipeline class name or use default |
310 | | - if pipeline_type == "" or pipeline_type == "AutoPipelineForText2Image": |
311 | | - # Default to AutoPipelineForText2Image for empty pipeline type |
312 | | - pipe = AutoPipelineForText2Image.from_pretrained( |
313 | | - request.Model, |
| 309 | + # Determine pipeline class name - default to AutoPipelineForText2Image |
| 310 | + effective_pipeline_type = pipeline_type if pipeline_type else "AutoPipelineForText2Image" |
| 311 | + |
| 312 | + # Use dynamic loader for all pipelines |
| 313 | + try: |
| 314 | + pipe = load_diffusers_pipeline( |
| 315 | + class_name=effective_pipeline_type, |
| 316 | + model_id=modelFile if fromSingleFile else request.Model, |
| 317 | + from_single_file=fromSingleFile, |
314 | 318 | **load_kwargs |
315 | 319 | ) |
316 | | - else: |
317 | | - # Use dynamic loader for all other pipelines |
318 | | - try: |
319 | | - pipe = load_diffusers_pipeline( |
320 | | - class_name=pipeline_type, |
321 | | - model_id=modelFile if fromSingleFile else request.Model, |
322 | | - from_single_file=fromSingleFile, |
323 | | - **load_kwargs |
324 | | - ) |
325 | | - except Exception as e: |
326 | | - # Provide helpful error with available pipelines |
327 | | - available = get_available_pipelines() |
328 | | - raise ValueError( |
329 | | - f"Failed to load pipeline '{pipeline_type}': {e}\n" |
330 | | - f"Available pipelines: {', '.join(available[:30])}..." |
331 | | - ) from e |
| 320 | + except Exception as e: |
| 321 | + # Provide helpful error with available pipelines |
| 322 | + available = get_available_pipelines() |
| 323 | + raise ValueError( |
| 324 | + f"Failed to load pipeline '{effective_pipeline_type}': {e}\n" |
| 325 | + f"Available pipelines: {', '.join(available[:30])}..." |
| 326 | + ) from e |
332 | 327 |
|
333 | 328 | # Apply LowVRAM optimization if supported and requested |
334 | 329 | if request.LowVRAM and hasattr(pipe, 'enable_model_cpu_offload'): |
|
0 commit comments