-
Notifications
You must be signed in to change notification settings - Fork 6.6k
[Tests] Speed up some fast pipeline tests #7477
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
508c472
d45699e
43f5496
75d5fff
ec4240b
70c459f
a6781d7
eee7563
031e374
0f71843
62f0669
aedccad
50de2cb
da64f0c
f8f79e3
77f93cd
ae43285
7d583e5
b5e1da0
9fa2ffd
b7ec1c2
8de2bdd
90736b0
9752100
7260a1a
2cdc18e
f562e75
0b0c290
fe4c043
29a0b14
5b44d4e
a70526d
833b759
279c5b0
1b833ea
c0a6a11
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -131,6 +131,42 @@ def test_motion_unet_loading(self): | |
| def test_attention_slicing_forward_pass(self): | ||
| pass | ||
|
|
||
| def test_ip_adapter_single(self): | ||
| expected_pipe_slice = None | ||
| if torch_device == "cpu": | ||
|
Comment on lines
+134
to
+136
Member
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. We make use of For accelerators, the tests will run much faster anyway (even on MPS). So, I think this is the way to go here. But I welcome any other ideas too. |
||
| expected_pipe_slice = np.array( | ||
| [ | ||
| 0.5541, | ||
| 0.5802, | ||
| 0.5074, | ||
| 0.4583, | ||
| 0.4729, | ||
| 0.5374, | ||
| 0.4051, | ||
| 0.4495, | ||
| 0.4480, | ||
| 0.5292, | ||
| 0.6322, | ||
| 0.6265, | ||
| 0.5455, | ||
| 0.4771, | ||
| 0.5795, | ||
| 0.5845, | ||
| 0.4172, | ||
| 0.6066, | ||
| 0.6535, | ||
| 0.4113, | ||
| 0.6833, | ||
| 0.5736, | ||
| 0.3589, | ||
| 0.5730, | ||
| 0.4205, | ||
| 0.3786, | ||
| 0.5323, | ||
| ] | ||
| ) | ||
| return super().test_ip_adapter_single(expected_pipe_slice=expected_pipe_slice) | ||
|
|
||
| def test_inference_batch_single_identical( | ||
| self, | ||
| batch_size=2, | ||
|
|
@@ -299,6 +335,9 @@ def test_xformers_attention_forwardGenerator_pass(self): | |
| max_diff = np.abs(to_np(output_with_offload) - to_np(output_without_offload)).max() | ||
| self.assertLess(max_diff, 1e-4, "XFormers attention should not affect the inference results") | ||
|
|
||
| def test_vae_slicing(self): | ||
| return super().test_vae_slicing(image_count=2) | ||
|
|
||
|
|
||
| @slow | ||
| @require_torch_gpu | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -191,6 +191,15 @@ def get_dummy_inputs(self, device, seed=0): | |
| def test_attention_slicing_forward_pass(self): | ||
| return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3) | ||
|
|
||
| def test_ip_adapter_single(self, from_ssd1b=False, expected_pipe_slice=None): | ||
| if not from_ssd1b: | ||
| expected_pipe_slice = None | ||
| if torch_device == "cpu": | ||
| expected_pipe_slice = np.array( | ||
| [0.7331, 0.5907, 0.5667, 0.6029, 0.5679, 0.5968, 0.4033, 0.4761, 0.5090] | ||
| ) | ||
| return super().test_ip_adapter_single(expected_pipe_slice=expected_pipe_slice) | ||
|
Collaborator
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. should we pass device = "cpu" to test_ip_adapter_single when using slice? would it help precision?
Member
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. See: #7477 (comment).
|
||
|
|
||
| @unittest.skipIf( | ||
| torch_device != "cuda" or not is_xformers_available(), | ||
| reason="XFormers attention is only available with CUDA and `xformers` installed", | ||
|
|
@@ -1042,6 +1051,12 @@ def test_controlnet_sdxl_guess(self): | |
| # make sure that it's equal | ||
| assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-4 | ||
|
|
||
| def test_ip_adapter_single(self): | ||
| expected_pipe_slice = None | ||
| if torch_device == "cpu": | ||
| expected_pipe_slice = np.array([0.6832, 0.5703, 0.5460, 0.6300, 0.5856, 0.6034, 0.4494, 0.4613, 0.5036]) | ||
| return super().test_ip_adapter_single(from_ssd1b=True, expected_pipe_slice=expected_pipe_slice) | ||
|
|
||
| def test_controlnet_sdxl_lcm(self): | ||
| device = "cpu" # ensure determinism for the device-dependent torch.Generator | ||
|
|
||
|
|
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Convenience options. They are not harmful.