Skip to content

Commit c78ee14

Browse files
authored
Move more slow tests to nightly (huggingface#5220)
* move to nightly * fix mistake
1 parent 622f35b commit c78ee14

14 files changed

+27
-28
lines changed

tests/pipelines/audio_diffusion/test_audio_diffusion.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@
2929
UNet2DConditionModel,
3030
UNet2DModel,
3131
)
32-
from diffusers.utils.testing_utils import enable_full_determinism, nightly, require_torch_gpu, slow, torch_device
32+
from diffusers.utils.testing_utils import enable_full_determinism, nightly, require_torch_gpu, torch_device
3333

3434

3535
enable_full_determinism()
@@ -95,7 +95,7 @@ def dummy_vqvae_and_unet(self):
9595
)
9696
return vqvae, unet
9797

98-
@slow
98+
@nightly
9999
def test_audio_diffusion(self):
100100
device = "cpu" # ensure determinism for the device-dependent torch.Generator
101101
mel = Mel(

tests/pipelines/audioldm/test_audioldm.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,7 @@
3737
UNet2DConditionModel,
3838
)
3939
from diffusers.utils import is_xformers_available
40-
from diffusers.utils.testing_utils import enable_full_determinism, nightly, slow, torch_device
40+
from diffusers.utils.testing_utils import enable_full_determinism, nightly, torch_device
4141

4242
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
4343
from ..test_pipelines_common import PipelineTesterMixin
@@ -369,7 +369,7 @@ def test_xformers_attention_forwardGenerator_pass(self):
369369
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=False)
370370

371371

372-
@slow
372+
@nightly
373373
class AudioLDMPipelineSlowTests(unittest.TestCase):
374374
def tearDown(self):
375375
super().tearDown()

tests/pipelines/kandinsky_v22/test_kandinsky_controlnet_img2img.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -33,8 +33,8 @@
3333
floats_tensor,
3434
load_image,
3535
load_numpy,
36+
nightly,
3637
require_torch_gpu,
37-
slow,
3838
torch_device,
3939
)
4040

@@ -232,7 +232,7 @@ def test_float16_inference(self):
232232
super().test_float16_inference(expected_max_diff=2e-1)
233233

234234

235-
@slow
235+
@nightly
236236
@require_torch_gpu
237237
class KandinskyV22ControlnetImg2ImgPipelineIntegrationTests(unittest.TestCase):
238238
def tearDown(self):

tests/pipelines/latent_diffusion/test_latent_diffusion_uncond.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@
2020
from transformers import CLIPTextConfig, CLIPTextModel
2121

2222
from diffusers import DDIMScheduler, LDMPipeline, UNet2DModel, VQModel
23-
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
23+
from diffusers.utils.testing_utils import enable_full_determinism, nightly, require_torch, torch_device
2424

2525

2626
enable_full_determinism()
@@ -96,7 +96,7 @@ def test_inference_uncond(self):
9696
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < tolerance
9797

9898

99-
@slow
99+
@nightly
100100
@require_torch
101101
class LDMPipelineIntegrationTests(unittest.TestCase):
102102
def test_inference_uncond(self):

tests/pipelines/stable_diffusion/test_stable_diffusion_k_diffusion.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -20,13 +20,13 @@
2020
import torch
2121

2222
from diffusers import StableDiffusionKDiffusionPipeline
23-
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
23+
from diffusers.utils.testing_utils import enable_full_determinism, nightly, require_torch_gpu, torch_device
2424

2525

2626
enable_full_determinism()
2727

2828

29-
@slow
29+
@nightly
3030
@require_torch_gpu
3131
class StableDiffusionPipelineIntegrationTests(unittest.TestCase):
3232
def tearDown(self):

tests/pipelines/stable_diffusion/test_stable_diffusion_model_editing.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@
2828
StableDiffusionModelEditingPipeline,
2929
UNet2DConditionModel,
3030
)
31-
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps, slow, torch_device
31+
from diffusers.utils.testing_utils import enable_full_determinism, nightly, require_torch_gpu, skip_mps, torch_device
3232

3333
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
3434
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
@@ -184,7 +184,7 @@ def test_attention_slicing_forward_pass(self):
184184
super().test_attention_slicing_forward_pass(expected_max_diff=5e-3)
185185

186186

187-
@slow
187+
@nightly
188188
@require_torch_gpu
189189
class StableDiffusionModelEditingSlowTests(unittest.TestCase):
190190
def tearDown(self):

tests/pipelines/stable_diffusion/test_stable_diffusion_paradigms.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -29,8 +29,8 @@
2929
)
3030
from diffusers.utils.testing_utils import (
3131
enable_full_determinism,
32+
nightly,
3233
require_torch_gpu,
33-
slow,
3434
torch_device,
3535
)
3636

@@ -188,7 +188,7 @@ def test_stable_diffusion_paradigms_negative_prompt(self):
188188
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
189189

190190

191-
@slow
191+
@nightly
192192
@require_torch_gpu
193193
class StableDiffusionParadigmsPipelineSlowTests(unittest.TestCase):
194194
def tearDown(self):

tests/pipelines/stable_diffusion/test_stable_diffusion_sag.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@
2626
StableDiffusionSAGPipeline,
2727
UNet2DConditionModel,
2828
)
29-
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
29+
from diffusers.utils.testing_utils import enable_full_determinism, nightly, require_torch_gpu, torch_device
3030

3131
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
3232
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
@@ -115,7 +115,7 @@ def test_inference_batch_single_identical(self):
115115
super().test_inference_batch_single_identical(expected_max_diff=3e-3)
116116

117117

118-
@slow
118+
@nightly
119119
@require_torch_gpu
120120
class StableDiffusionPipelineIntegrationTests(unittest.TestCase):
121121
def tearDown(self):

tests/pipelines/stable_diffusion_2/test_stable_diffusion_attend_and_excite.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -28,10 +28,10 @@
2828
)
2929
from diffusers.utils.testing_utils import (
3030
load_numpy,
31+
nightly,
3132
numpy_cosine_similarity_distance,
3233
require_torch_gpu,
3334
skip_mps,
34-
slow,
3535
)
3636

3737
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
@@ -187,7 +187,7 @@ def test_save_load_optional_components(self):
187187

188188

189189
@require_torch_gpu
190-
@slow
190+
@nightly
191191
class StableDiffusionAttendAndExcitePipelineIntegrationTests(unittest.TestCase):
192192
# Attend and excite requires being able to run a backward pass at
193193
# inference time. There's no deterministic backward operator for pad

tests/pipelines/stable_diffusion_2/test_stable_diffusion_diffedit.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,6 @@
3939
nightly,
4040
numpy_cosine_similarity_distance,
4141
require_torch_gpu,
42-
slow,
4342
torch_device,
4443
)
4544

@@ -292,7 +291,7 @@ def test_inversion_dpm(self):
292291

293292

294293
@require_torch_gpu
295-
@slow
294+
@nightly
296295
class StableDiffusionDiffEditPipelineIntegrationTests(unittest.TestCase):
297296
def tearDown(self):
298297
super().tearDown()

0 commit comments

Comments
 (0)