File tree Expand file tree Collapse file tree 1 file changed +0
-4
lines changed Expand file tree Collapse file tree 1 file changed +0
-4
lines changed Original file line number Diff line number Diff line change @@ -222,9 +222,6 @@ def set_use_memory_efficient_attention_xformers(
222222 )
223223 processor .load_state_dict (self .processor .state_dict ())
224224 processor .to (self .processor .to_q_lora .up .weight .device )
225- print (
226- f"is_lora is set to { is_lora } , type: LoRAXFormersAttnProcessor: { isinstance (processor , LoRAXFormersAttnProcessor )} "
227- )
228225 elif is_custom_diffusion :
229226 processor = CustomDiffusionXFormersAttnProcessor (
230227 train_kv = self .processor .train_kv ,
@@ -262,7 +259,6 @@ def set_use_memory_efficient_attention_xformers(
262259 # We use the AttnProcessor2_0 by default when torch 2.x is used which uses
263260 # torch.nn.functional.scaled_dot_product_attention for native Flash/memory_efficient_attention
264261 # but only if it has the default `scale` argument. TODO remove scale_qk check when we move to torch 2.1
265- print ("Still defaulting to: AttnProcessor2_0 :O" )
266262 processor = (
267263 AttnProcessor2_0 ()
268264 if hasattr (F , "scaled_dot_product_attention" ) and self .scale_qk
You can’t perform that action at this time.
0 commit comments