diff --git a/src/diffusers/models/attention.py b/src/diffusers/models/attention.py index e8ea37970e04..be9203b4d699 100644 --- a/src/diffusers/models/attention.py +++ b/src/diffusers/models/attention.py @@ -557,6 +557,9 @@ def _sliced_attention(self, query, key, value, sequence_length, dim): return hidden_states def _memory_efficient_attention_xformers(self, query, key, value): + query = query.contiguous() + key = key.contiguous() + value = value.contiguous() hidden_states = xformers.ops.memory_efficient_attention(query, key, value, attn_bias=None) hidden_states = self.reshape_batch_dim_to_heads(hidden_states) return hidden_states