@@ -1858,14 +1858,14 @@ def test_callback_fn(step: int, timestep: int, latents: torch.FloatTensor) -> No
18581858 expected_slice = np .array (
18591859 [1.1078 , 1.5803 , 0.2773 , - 0.0589 , - 1.7928 , - 0.3665 , - 0.4695 , - 1.0727 , - 1.1601 ]
18601860 )
1861- assert np .abs (latents_slice .flatten () - expected_slice ).max () < 1e-3
1861+ assert np .abs (latents_slice .flatten () - expected_slice ).max () < 1e-2
18621862
18631863 test_callback_fn .has_been_called = False
18641864
18651865 pipe = StableDiffusionPipeline .from_pretrained (
18661866 "CompVis/stable-diffusion-v1-4" , revision = "fp16" , torch_dtype = torch .float16
18671867 )
1868- pipe .to (torch_device )
1868+ pipe = pipe .to (torch_device )
18691869 pipe .set_progress_bar_config (disable = None )
18701870 pipe .enable_attention_slicing ()
18711871
@@ -1904,7 +1904,7 @@ def test_callback_fn(step: int, timestep: int, latents: torch.FloatTensor) -> No
19041904 assert latents .shape == (1 , 4 , 64 , 96 )
19051905 latents_slice = latents [0 , - 3 :, - 3 :, - 1 ]
19061906 expected_slice = np .array ([0.7071 , 0.7831 , 0.8300 , 1.8140 , 1.7840 , 1.9402 , 1.3651 , 1.6590 , 1.2828 ])
1907- assert np .abs (latents_slice .flatten () - expected_slice ).max () < 1e-3
1907+ assert np .abs (latents_slice .flatten () - expected_slice ).max () < 1e-2
19081908
19091909 test_callback_fn .has_been_called = False
19101910
0 commit comments