Skip to content

Commit 32a5d70

Browse files
authored
Support attn2==None for xformers (huggingface#1759)
1 parent 429e544 commit 32a5d70

File tree

1 file changed

+2
-1
lines changed

1 file changed

+2
-1
lines changed

src/diffusers/models/attention.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -473,7 +473,8 @@ def set_use_memory_efficient_attention_xformers(self, use_memory_efficient_atten
473473
except Exception as e:
474474
raise e
475475
self.attn1._use_memory_efficient_attention_xformers = use_memory_efficient_attention_xformers
476-
self.attn2._use_memory_efficient_attention_xformers = use_memory_efficient_attention_xformers
476+
if self.attn2 is not None:
477+
self.attn2._use_memory_efficient_attention_xformers = use_memory_efficient_attention_xformers
477478

478479
def forward(self, hidden_states, encoder_hidden_states=None, timestep=None, attention_mask=None):
479480
# 1. Self-Attention

0 commit comments

Comments
 (0)