Skip to content

Commit c803a8f

Browse files
[LCM] Fix img2img (huggingface#5698)
* [LCM] Fix img2img * make fix-copies * make fix-copies * make fix-copies * up
1 parent d384265 commit c803a8f

File tree

3 files changed

+6
-6
lines changed

3 files changed

+6
-6
lines changed

src/diffusers/models/attention_processor.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -378,7 +378,7 @@ def set_processor(self, processor: "AttnProcessor", _remove_lora: bool = False)
378378
_remove_lora (`bool`, *optional*, defaults to `False`):
379379
Set to `True` to remove LoRA layers from the model.
380380
"""
381-
if hasattr(self, "processor") and _remove_lora and self.to_q.lora_layer is not None:
381+
if not USE_PEFT_BACKEND and hasattr(self, "processor") and _remove_lora and self.to_q.lora_layer is not None:
382382
deprecate(
383383
"set_processor to offload LoRA",
384384
"0.26.0",

src/diffusers/pipelines/latent_consistency_models/pipeline_latent_consistency_img2img.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -738,7 +738,7 @@ def __call__(
738738
if original_inference_steps is not None
739739
else self.scheduler.config.original_inference_steps
740740
)
741-
latent_timestep = torch.tensor(int(strength * original_inference_steps))
741+
latent_timestep = timesteps[:1]
742742
latents = self.prepare_latents(
743743
image, latent_timestep, batch_size, num_images_per_prompt, prompt_embeds.dtype, device, generator
744744
)

tests/pipelines/latent_consistency_models/test_latent_consistency_models_img2img.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -133,7 +133,7 @@ def test_lcm_onestep(self):
133133
assert image.shape == (1, 32, 32, 3)
134134

135135
image_slice = image[0, -3:, -3:, -1]
136-
expected_slice = np.array([0.5865, 0.2854, 0.2828, 0.7473, 0.6006, 0.4580, 0.4397, 0.6415, 0.6069])
136+
expected_slice = np.array([0.4388, 0.3717, 0.2202, 0.7213, 0.6370, 0.3664, 0.5815, 0.6080, 0.4977])
137137
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
138138

139139
def test_lcm_multistep(self):
@@ -150,7 +150,7 @@ def test_lcm_multistep(self):
150150
assert image.shape == (1, 32, 32, 3)
151151

152152
image_slice = image[0, -3:, -3:, -1]
153-
expected_slice = np.array([0.4903, 0.3304, 0.3503, 0.5241, 0.5153, 0.4585, 0.3222, 0.4764, 0.4891])
153+
expected_slice = np.array([0.4150, 0.3719, 0.2479, 0.6333, 0.6024, 0.3778, 0.5036, 0.5420, 0.4678])
154154
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
155155

156156
def test_inference_batch_single_identical(self):
@@ -237,7 +237,7 @@ def test_lcm_onestep(self):
237237
assert image.shape == (1, 512, 512, 3)
238238

239239
image_slice = image[0, -3:, -3:, -1].flatten()
240-
expected_slice = np.array([0.1025, 0.0911, 0.0984, 0.0981, 0.0901, 0.0918, 0.1055, 0.0940, 0.0730])
240+
expected_slice = np.array([0.1950, 0.1961, 0.2308, 0.1786, 0.1837, 0.2320, 0.1898, 0.1885, 0.2309])
241241
assert np.abs(image_slice - expected_slice).max() < 1e-3
242242

243243
def test_lcm_multistep(self):
@@ -253,5 +253,5 @@ def test_lcm_multistep(self):
253253
assert image.shape == (1, 512, 512, 3)
254254

255255
image_slice = image[0, -3:, -3:, -1].flatten()
256-
expected_slice = np.array([0.01855, 0.01855, 0.01489, 0.01392, 0.01782, 0.01465, 0.01831, 0.02539, 0.0])
256+
expected_slice = np.array([0.3756, 0.3816, 0.3767, 0.3718, 0.3739, 0.3735, 0.3863, 0.3803, 0.3563])
257257
assert np.abs(image_slice - expected_slice).max() < 1e-3

0 commit comments

Comments
 (0)