@@ -1142,8 +1142,8 @@ def test_lora_fusion_is_not_affected_by_unloading(self):
11421142 images_with_unloaded_lora = sd_pipe (** pipeline_inputs , generator = torch .manual_seed (0 )).images
11431143 images_with_unloaded_lora_slice = images_with_unloaded_lora [0 , - 3 :, - 3 :, - 1 ]
11441144
1145- assert np . allclose (
1146- lora_image_slice , images_with_unloaded_lora_slice
1145+ assert (
1146+ np . abs ( lora_image_slice - images_with_unloaded_lora_slice ). max () < 2e-1
11471147 ), "`unload_lora_weights()` should have not effect on the semantics of the results as the LoRA parameters were fused."
11481148
11491149 def test_fuse_lora_with_different_scales (self ):
@@ -1345,9 +1345,9 @@ def dummy_input(self):
13451345 num_channels = 4
13461346 sizes = (32 , 32 )
13471347
1348- noise = floats_tensor ((batch_size , num_channels ) + sizes ).to (torch_device )
1348+ noise = floats_tensor ((batch_size , num_channels ) + sizes , rng = random . Random ( 0 ) ).to (torch_device )
13491349 time_step = torch .tensor ([10 ]).to (torch_device )
1350- encoder_hidden_states = floats_tensor ((batch_size , 4 , 32 )).to (torch_device )
1350+ encoder_hidden_states = floats_tensor ((batch_size , 4 , 32 ), rng = random . Random ( 0 ) ).to (torch_device )
13511351
13521352 return {"sample" : noise , "timestep" : time_step , "encoder_hidden_states" : encoder_hidden_states }
13531353
@@ -1554,7 +1554,7 @@ def test_lora_on_off(self, expected_max_diff=1e-3):
15541554 torch_device != "cuda" or not is_xformers_available (),
15551555 reason = "XFormers attention is only available with CUDA and `xformers` installed" ,
15561556 )
1557- def test_lora_xformers_on_off (self , expected_max_diff = 1e-3 ):
1557+ def test_lora_xformers_on_off (self , expected_max_diff = 1e-4 ):
15581558 # enable deterministic behavior for gradient checkpointing
15591559 init_dict , inputs_dict = self .prepare_init_args_and_inputs_for_common ()
15601560
@@ -1594,9 +1594,9 @@ def dummy_input(self):
15941594 num_frames = 4
15951595 sizes = (32 , 32 )
15961596
1597- noise = floats_tensor ((batch_size , num_channels , num_frames ) + sizes ).to (torch_device )
1597+ noise = floats_tensor ((batch_size , num_channels , num_frames ) + sizes , rng = random . Random ( 0 ) ).to (torch_device )
15981598 time_step = torch .tensor ([10 ]).to (torch_device )
1599- encoder_hidden_states = floats_tensor ((batch_size , 4 , 32 )).to (torch_device )
1599+ encoder_hidden_states = floats_tensor ((batch_size , 4 , 32 ), rng = random . Random ( 0 ) ).to (torch_device )
16001600
16011601 return {"sample" : noise , "timestep" : time_step , "encoder_hidden_states" : encoder_hidden_states }
16021602
@@ -1686,7 +1686,7 @@ def test_lora_save_load(self):
16861686 with torch .no_grad ():
16871687 new_sample = new_model (** inputs_dict , cross_attention_kwargs = {"scale" : 0.5 }).sample
16881688
1689- assert (sample - new_sample ).abs ().max () < 1e -3
1689+ assert (sample - new_sample ).abs ().max () < 5e -3
16901690
16911691 # LoRA and no LoRA should NOT be the same
16921692 assert (sample - old_sample ).abs ().max () > 1e-4
0 commit comments