We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent c7b4acf commit b267d28Copy full SHA for b267d28
src/diffusers/models/attention.py
@@ -959,6 +959,7 @@ def forward(
959
960
encoded_states = []
961
tokens_start = 0
962
+ # attention_mask is not used yet
963
for i in range(2):
964
# for each of the two transformers, pass the corresponding condition tokens
965
condition_state = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
@@ -967,7 +968,6 @@ def forward(
967
968
input_states,
969
encoder_hidden_states=condition_state,
970
timestep=timestep,
- attention_mask=attention_mask,
971
return_dict=False,
972
)[0]
973
encoded_states.append(encoded_state - input_states)
0 commit comments