diff --git a/tests/test_models_vae.py b/tests/test_models_vae.py index 9fb7e8ea3bb7..49610f848364 100644 --- a/tests/test_models_vae.py +++ b/tests/test_models_vae.py @@ -106,7 +106,21 @@ def test_output_pretrained(self): # Since the VAE Gaussian prior's generator is seeded on the appropriate device, # the expected output slices are not the same for CPU and GPU. - if torch_device in ("mps", "cpu"): + if torch_device == "mps": + expected_output_slice = torch.tensor( + [ + -4.0078e-01, + -3.8323e-04, + -1.2681e-01, + -1.1462e-01, + 2.0095e-01, + 1.0893e-01, + -8.8247e-02, + -3.0361e-01, + -9.8644e-03, + ] + ) + elif torch_device == "cpu": expected_output_slice = torch.tensor( [-0.1352, 0.0878, 0.0419, -0.0818, -0.1069, 0.0688, -0.1458, -0.4446, -0.0026] ) diff --git a/tests/test_pipelines.py b/tests/test_pipelines.py index bbda797bf694..6123ab5f6712 100644 --- a/tests/test_pipelines.py +++ b/tests/test_pipelines.py @@ -1418,7 +1418,13 @@ def test_components(self): text2img = StableDiffusionPipeline(**inpaint.components).to(torch_device) prompt = "A painting of a squirrel eating a burger" - generator = torch.Generator(device=torch_device).manual_seed(0) + + # Device type MPS is not supported for torch.Generator() api. + if torch_device == "mps": + generator = torch.manual_seed(0) + else: + generator = torch.Generator(device=torch_device).manual_seed(0) + image_inpaint = inpaint( [prompt], generator=generator,