Skip to content

Commit 6999693

Browse files
yiyixuxuyiyixuxu
andauthored
speed up Shap-E fast test (huggingface#5686)
skip rendering Co-authored-by: yiyixuxu <yixu310@gmail,com>
1 parent 9ae9059 commit 6999693

File tree

3 files changed

+12
-34
lines changed

3 files changed

+12
-34
lines changed

tests/pipelines/shap_e/test_shap_e.py

Lines changed: 6 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -160,7 +160,7 @@ def get_dummy_inputs(self, device, seed=0):
160160
"generator": generator,
161161
"num_inference_steps": 1,
162162
"frame_size": 32,
163-
"output_type": "np",
163+
"output_type": "latent",
164164
}
165165
return inputs
166166

@@ -176,24 +176,12 @@ def test_shap_e(self):
176176

177177
output = pipe(**self.get_dummy_inputs(device))
178178
image = output.images[0]
179-
image_slice = image[0, -3:, -3:, -1]
180-
181-
assert image.shape == (20, 32, 32, 3)
182-
183-
expected_slice = np.array(
184-
[
185-
0.00039216,
186-
0.00039216,
187-
0.00039216,
188-
0.00039216,
189-
0.00039216,
190-
0.00039216,
191-
0.00039216,
192-
0.00039216,
193-
0.00039216,
194-
]
195-
)
179+
image = image.cpu().numpy()
180+
image_slice = image[-3:, -3:]
181+
182+
assert image.shape == (32, 16)
196183

184+
expected_slice = np.array([-1.0000, -0.6241, 1.0000, -0.8978, -0.6866, 0.7876, -0.7473, -0.2874, 0.6103])
197185
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
198186

199187
def test_inference_batch_consistent(self):

tests/pipelines/shap_e/test_shap_e_img2img.py

Lines changed: 4 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -181,7 +181,7 @@ def get_dummy_inputs(self, device, seed=0):
181181
"generator": generator,
182182
"num_inference_steps": 1,
183183
"frame_size": 32,
184-
"output_type": "np",
184+
"output_type": "latent",
185185
}
186186
return inputs
187187

@@ -197,22 +197,12 @@ def test_shap_e(self):
197197

198198
output = pipe(**self.get_dummy_inputs(device))
199199
image = output.images[0]
200-
image_slice = image[0, -3:, -3:, -1]
200+
image_slice = image[-3:, -3:].cpu().numpy()
201201

202-
assert image.shape == (20, 32, 32, 3)
202+
assert image.shape == (32, 16)
203203

204204
expected_slice = np.array(
205-
[
206-
0.00039216,
207-
0.00039216,
208-
0.00039216,
209-
0.00039216,
210-
0.00039216,
211-
0.00039216,
212-
0.00039216,
213-
0.00039216,
214-
0.00039216,
215-
]
205+
[-1.0, 0.40668195, 0.57322013, -0.9469888, 0.4283227, 0.30348337, -0.81094897, 0.74555075, 0.15342723]
216206
)
217207

218208
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2

tests/pipelines/test_pipelines_common.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -493,7 +493,7 @@ def _test_inference_batch_single_identical(
493493

494494
assert output_batch[0].shape[0] == batch_size
495495

496-
max_diff = np.abs(output_batch[0][0] - output[0][0]).max()
496+
max_diff = np.abs(to_np(output_batch[0][0]) - to_np(output[0][0])).max()
497497
assert max_diff < expected_max_diff
498498

499499
def test_dict_tuple_outputs_equivalent(self, expected_max_difference=1e-4):
@@ -702,7 +702,7 @@ def _test_attention_slicing_forward_pass(
702702
self.assertLess(max_diff, expected_max_diff, "Attention slicing should not affect the inference results")
703703

704704
if test_mean_pixel_difference:
705-
assert_mean_pixel_difference(output_with_slicing[0], output_without_slicing[0])
705+
assert_mean_pixel_difference(to_np(output_with_slicing[0]), to_np(output_without_slicing[0]))
706706

707707
@unittest.skipIf(
708708
torch_device != "cuda" or not is_accelerate_available() or is_accelerate_version("<", "0.14.0"),

0 commit comments

Comments
 (0)