diff --git a/tests/pipelines/amused/test_amused.py b/tests/pipelines/amused/test_amused.py index f03751e2f830..9a9e2551d642 100644 --- a/tests/pipelines/amused/test_amused.py +++ b/tests/pipelines/amused/test_amused.py @@ -38,17 +38,17 @@ class AmusedPipelineFastTests(PipelineTesterMixin, unittest.TestCase): def get_dummy_components(self): torch.manual_seed(0) transformer = UVit2DModel( - hidden_size=32, + hidden_size=8, use_bias=False, hidden_dropout=0.0, - cond_embed_dim=32, + cond_embed_dim=8, micro_cond_encode_dim=2, micro_cond_embed_dim=10, - encoder_hidden_size=32, + encoder_hidden_size=8, vocab_size=32, - codebook_size=32, - in_channels=32, - block_out_channels=32, + codebook_size=8, + in_channels=8, + block_out_channels=8, num_res_blocks=1, downsample=True, upsample=True, @@ -56,7 +56,7 @@ def get_dummy_components(self): num_hidden_layers=1, num_attention_heads=1, attention_dropout=0.0, - intermediate_size=32, + intermediate_size=8, layer_norm_eps=1e-06, ln_elementwise_affine=True, ) @@ -64,17 +64,17 @@ def get_dummy_components(self): torch.manual_seed(0) vqvae = VQModel( act_fn="silu", - block_out_channels=[32], + block_out_channels=[8], down_block_types=[ "DownEncoderBlock2D", ], in_channels=3, - latent_channels=32, - layers_per_block=2, - norm_num_groups=32, - num_vq_embeddings=32, + latent_channels=8, + layers_per_block=1, + norm_num_groups=8, + num_vq_embeddings=8, out_channels=3, - sample_size=32, + sample_size=8, up_block_types=[ "UpDecoderBlock2D", ], @@ -85,14 +85,14 @@ def get_dummy_components(self): text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, - hidden_size=32, - intermediate_size=64, + hidden_size=8, + intermediate_size=8, layer_norm_eps=1e-05, - num_attention_heads=8, - num_hidden_layers=3, + num_attention_heads=1, + num_hidden_layers=1, pad_token_id=1, vocab_size=1000, - projection_dim=32, + projection_dim=8, ) text_encoder = CLIPTextModelWithProjection(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") diff --git a/tests/pipelines/amused/test_amused_img2img.py b/tests/pipelines/amused/test_amused_img2img.py index efbca1f437a4..24bc34d330e9 100644 --- a/tests/pipelines/amused/test_amused_img2img.py +++ b/tests/pipelines/amused/test_amused_img2img.py @@ -42,17 +42,17 @@ class AmusedImg2ImgPipelineFastTests(PipelineTesterMixin, unittest.TestCase): def get_dummy_components(self): torch.manual_seed(0) transformer = UVit2DModel( - hidden_size=32, + hidden_size=8, use_bias=False, hidden_dropout=0.0, - cond_embed_dim=32, + cond_embed_dim=8, micro_cond_encode_dim=2, micro_cond_embed_dim=10, - encoder_hidden_size=32, + encoder_hidden_size=8, vocab_size=32, - codebook_size=32, - in_channels=32, - block_out_channels=32, + codebook_size=8, + in_channels=8, + block_out_channels=8, num_res_blocks=1, downsample=True, upsample=True, @@ -60,7 +60,7 @@ def get_dummy_components(self): num_hidden_layers=1, num_attention_heads=1, attention_dropout=0.0, - intermediate_size=32, + intermediate_size=8, layer_norm_eps=1e-06, ln_elementwise_affine=True, ) @@ -68,17 +68,17 @@ def get_dummy_components(self): torch.manual_seed(0) vqvae = VQModel( act_fn="silu", - block_out_channels=[32], + block_out_channels=[8], down_block_types=[ "DownEncoderBlock2D", ], in_channels=3, - latent_channels=32, - layers_per_block=2, - norm_num_groups=32, - num_vq_embeddings=32, + latent_channels=8, + layers_per_block=1, + norm_num_groups=8, + num_vq_embeddings=32, # reducing this to 16 or 8 -> RuntimeError: "cdist_cuda" not implemented for 'Half' out_channels=3, - sample_size=32, + sample_size=8, up_block_types=[ "UpDecoderBlock2D", ], @@ -89,14 +89,14 @@ def get_dummy_components(self): text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, - hidden_size=32, - intermediate_size=64, + hidden_size=8, + intermediate_size=8, layer_norm_eps=1e-05, - num_attention_heads=8, - num_hidden_layers=3, + num_attention_heads=1, + num_hidden_layers=1, pad_token_id=1, vocab_size=1000, - projection_dim=32, + projection_dim=8, ) text_encoder = CLIPTextModelWithProjection(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") diff --git a/tests/pipelines/amused/test_amused_inpaint.py b/tests/pipelines/amused/test_amused_inpaint.py index d397f8d81297..d0c1ed09c706 100644 --- a/tests/pipelines/amused/test_amused_inpaint.py +++ b/tests/pipelines/amused/test_amused_inpaint.py @@ -42,17 +42,17 @@ class AmusedInpaintPipelineFastTests(PipelineTesterMixin, unittest.TestCase): def get_dummy_components(self): torch.manual_seed(0) transformer = UVit2DModel( - hidden_size=32, + hidden_size=8, use_bias=False, hidden_dropout=0.0, - cond_embed_dim=32, + cond_embed_dim=8, micro_cond_encode_dim=2, micro_cond_embed_dim=10, - encoder_hidden_size=32, + encoder_hidden_size=8, vocab_size=32, - codebook_size=32, - in_channels=32, - block_out_channels=32, + codebook_size=32, # codebook size needs to be consistent with num_vq_embeddings for inpaint tests + in_channels=8, + block_out_channels=8, num_res_blocks=1, downsample=True, upsample=True, @@ -60,7 +60,7 @@ def get_dummy_components(self): num_hidden_layers=1, num_attention_heads=1, attention_dropout=0.0, - intermediate_size=32, + intermediate_size=8, layer_norm_eps=1e-06, ln_elementwise_affine=True, ) @@ -68,17 +68,17 @@ def get_dummy_components(self): torch.manual_seed(0) vqvae = VQModel( act_fn="silu", - block_out_channels=[32], + block_out_channels=[8], down_block_types=[ "DownEncoderBlock2D", ], in_channels=3, - latent_channels=32, - layers_per_block=2, - norm_num_groups=32, - num_vq_embeddings=32, + latent_channels=8, + layers_per_block=1, + norm_num_groups=8, + num_vq_embeddings=32, # reducing this to 16 or 8 -> RuntimeError: "cdist_cuda" not implemented for 'Half' out_channels=3, - sample_size=32, + sample_size=8, up_block_types=[ "UpDecoderBlock2D", ], @@ -89,14 +89,14 @@ def get_dummy_components(self): text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, - hidden_size=32, - intermediate_size=64, + hidden_size=8, + intermediate_size=8, layer_norm_eps=1e-05, - num_attention_heads=8, - num_hidden_layers=3, + num_attention_heads=1, + num_hidden_layers=1, pad_token_id=1, vocab_size=1000, - projection_dim=32, + projection_dim=8, ) text_encoder = CLIPTextModelWithProjection(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")