Skip to content

Commit ba59e92

Browse files
authored
Fix memory issues in tests (huggingface#5183)
* fix memory issues * set _offload_gpu_id * set gpu offload id
1 parent 02247d9 commit ba59e92

File tree

4 files changed

+36
-19
lines changed

4 files changed

+36
-19
lines changed

src/diffusers/pipelines/pipeline_utils.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1360,7 +1360,7 @@ def enable_model_cpu_offload(self, gpu_id: Optional[int] = None, device: Union[t
13601360
)
13611361

13621362
# _offload_gpu_id should be set to passed gpu_id (or id in passed `device`) or default to previously set id or default to 0
1363-
self._offload_gpu_id = gpu_id or torch_device.index or self._offload_gpu_id or 0
1363+
self._offload_gpu_id = gpu_id or torch_device.index or getattr(self, "_offload_gpu_id", 0)
13641364

13651365
device_type = torch_device.type
13661366
device = torch.device(f"{device_type}:{self._offload_gpu_id}")
@@ -1445,7 +1445,7 @@ def enable_sequential_cpu_offload(self, gpu_id: Optional[int] = None, device: Un
14451445
)
14461446

14471447
# _offload_gpu_id should be set to passed gpu_id (or id in passed `device`) or default to previously set id or default to 0
1448-
self._offload_gpu_id = gpu_id or torch_device.index or self._offload_gpu_id or 0
1448+
self._offload_gpu_id = gpu_id or torch_device.index or getattr(self, "_offload_gpu_id", 0)
14491449

14501450
device_type = torch_device.type
14511451
device = torch.device(f"{device_type}:{self._offload_gpu_id}")

tests/pipelines/stable_diffusion_2/test_stable_diffusion_diffedit.py

Lines changed: 15 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -37,6 +37,7 @@
3737
floats_tensor,
3838
load_image,
3939
nightly,
40+
numpy_cosine_similarity_distance,
4041
require_torch_gpu,
4142
slow,
4243
torch_device,
@@ -303,18 +304,19 @@ def setUpClass(cls):
303304
raw_image = load_image(
304305
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png"
305306
)
306-
307-
raw_image = raw_image.convert("RGB").resize((768, 768))
307+
raw_image = raw_image.convert("RGB").resize((256, 256))
308308

309309
cls.raw_image = raw_image
310310

311311
def test_stable_diffusion_diffedit_full(self):
312312
generator = torch.manual_seed(0)
313313

314314
pipe = StableDiffusionDiffEditPipeline.from_pretrained(
315-
"stabilityai/stable-diffusion-2-1", safety_checker=None, torch_dtype=torch.float16
315+
"stabilityai/stable-diffusion-2-1-base", safety_checker=None, torch_dtype=torch.float16
316316
)
317317
pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
318+
pipe.scheduler.clip_sample = True
319+
318320
pipe.inverse_scheduler = DDIMInverseScheduler.from_config(pipe.scheduler.config)
319321
pipe.enable_model_cpu_offload()
320322
pipe.set_progress_bar_config(disable=None)
@@ -330,7 +332,11 @@ def test_stable_diffusion_diffedit_full(self):
330332
)
331333

332334
inv_latents = pipe.invert(
333-
prompt=source_prompt, image=self.raw_image, inpaint_strength=0.7, generator=generator
335+
prompt=source_prompt,
336+
image=self.raw_image,
337+
inpaint_strength=0.7,
338+
generator=generator,
339+
num_inference_steps=5,
334340
).latents
335341

336342
image = pipe(
@@ -340,19 +346,21 @@ def test_stable_diffusion_diffedit_full(self):
340346
generator=generator,
341347
negative_prompt=source_prompt,
342348
inpaint_strength=0.7,
343-
output_type="numpy",
349+
num_inference_steps=5,
350+
output_type="np",
344351
).images[0]
345352

346353
expected_image = (
347354
np.array(
348355
load_image(
349356
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
350357
"/diffedit/pears.png"
351-
).resize((768, 768))
358+
).resize((256, 256))
352359
)
353360
/ 255
354361
)
355-
assert np.abs((expected_image - image).max()) < 5e-1
362+
363+
assert numpy_cosine_similarity_distance(expected_image.flatten(), image.flatten()) < 2e-1
356364

357365

358366
@nightly

tests/pipelines/stable_diffusion_2/test_stable_diffusion_v_pred.py

Lines changed: 12 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -405,13 +405,20 @@ def test_stable_diffusion_text2img_pipeline_unflawed(self):
405405
pipe.scheduler.config, timestep_spacing="trailing", rescale_betas_zero_snr=True
406406
)
407407
pipe.to(torch_device)
408-
pipe.enable_attention_slicing()
408+
pipe.enable_model_cpu_offload()
409409
pipe.set_progress_bar_config(disable=None)
410410

411411
prompt = "A lion in galaxies, spirals, nebulae, stars, smoke, iridescent, intricate detail, octane render, 8k"
412412

413413
generator = torch.Generator("cpu").manual_seed(0)
414-
output = pipe(prompt=prompt, guidance_scale=7.5, guidance_rescale=0.7, generator=generator, output_type="np")
414+
output = pipe(
415+
prompt=prompt,
416+
guidance_scale=7.5,
417+
num_inference_steps=10,
418+
guidance_rescale=0.7,
419+
generator=generator,
420+
output_type="np",
421+
)
415422
image = output.images[0]
416423

417424
assert image.shape == (768, 768, 3)
@@ -443,7 +450,7 @@ def test_download_local(self):
443450

444451
pipe = StableDiffusionPipeline.from_single_file(filename, torch_dtype=torch.float16)
445452
pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
446-
pipe.to("cuda")
453+
pipe.enable_model_cpu_offload()
447454

448455
image_out = pipe("test", num_inference_steps=1, output_type="np").images[0]
449456

@@ -460,15 +467,15 @@ def test_download_ckpt_diff_format_is_same(self):
460467
pipe_single.enable_model_cpu_offload()
461468

462469
generator = torch.Generator(device="cpu").manual_seed(0)
463-
image_ckpt = pipe_single("a turtle", num_inference_steps=5, generator=generator, output_type="np").images[0]
470+
image_ckpt = pipe_single("a turtle", num_inference_steps=2, generator=generator, output_type="np").images[0]
464471

465472
pipe = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1")
466473
pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
467474
pipe.unet.set_attn_processor(AttnProcessor())
468475
pipe.enable_model_cpu_offload()
469476

470477
generator = torch.Generator(device="cpu").manual_seed(0)
471-
image = pipe("a turtle", num_inference_steps=5, generator=generator, output_type="np").images[0]
478+
image = pipe("a turtle", num_inference_steps=2, generator=generator, output_type="np").images[0]
472479

473480
max_diff = numpy_cosine_similarity_distance(image.flatten(), image_ckpt.flatten())
474481
assert max_diff < 1e-3

tests/pipelines/text_to_video/test_video_to_video.py

Lines changed: 7 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -31,6 +31,7 @@
3131
enable_full_determinism,
3232
floats_tensor,
3333
is_flaky,
34+
numpy_cosine_similarity_distance,
3435
skip_mps,
3536
slow,
3637
torch_device,
@@ -198,17 +199,18 @@ def test_progress_bar(self):
198199
@skip_mps
199200
class VideoToVideoSDPipelineSlowTests(unittest.TestCase):
200201
def test_two_step_model(self):
201-
pipe = VideoToVideoSDPipeline.from_pretrained("cerspense/zeroscope_v2_XL", torch_dtype=torch.float16)
202+
pipe = VideoToVideoSDPipeline.from_pretrained("cerspense/zeroscope_v2_576w", torch_dtype=torch.float16)
202203
pipe.enable_model_cpu_offload()
203204

204205
# 10 frames
205206
generator = torch.Generator(device="cpu").manual_seed(0)
206-
video = torch.randn((1, 10, 3, 1024, 576), generator=generator)
207-
video = video.to("cuda")
207+
video = torch.randn((1, 10, 3, 320, 576), generator=generator)
208208

209209
prompt = "Spiderman is surfing"
210210

211211
video_frames = pipe(prompt, video=video, generator=generator, num_inference_steps=3, output_type="pt").frames
212212

213-
expected_array = np.array([-1.0458984, -1.1279297, -0.9663086, -0.91503906, -0.75097656])
214-
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array).sum() < 1e-2
213+
expected_array = np.array([-0.9770508, -0.8027344, -0.62646484, -0.8334961, -0.7573242])
214+
output_array = video_frames.cpu().numpy()[0, 0, 0, 0, -5:]
215+
216+
assert numpy_cosine_similarity_distance(expected_array, output_array) < 1e-2

0 commit comments

Comments
 (0)