Skip to content

Commit c8d86e9

Browse files
chiral-carbonDN6
andauthored
Remove code snippets containing is_safetensors_available() (huggingface#4521)
* [WIP] Remove code snippets containing `is_safetensors_available()` * Modifying `import_utils.py` * update pipeline tests for safetensor default * fix test related to cached requests * address import nits --------- Co-authored-by: Dhruv Nair <[email protected]>
1 parent b28cd3f commit c8d86e9

File tree

12 files changed

+58
-143
lines changed

12 files changed

+58
-143
lines changed

examples/community/checkpoint_merger.py

Lines changed: 3 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -2,14 +2,8 @@
22
import os
33
from typing import Dict, List, Union
44

5+
import safetensors.torch
56
import torch
6-
7-
from diffusers.utils import is_safetensors_available
8-
9-
10-
if is_safetensors_available():
11-
import safetensors.torch
12-
137
from huggingface_hub import snapshot_download
148

159
from diffusers import DiffusionPipeline, __version__
@@ -229,14 +223,14 @@ def merge(self, pretrained_model_name_or_path_list: List[Union[str, os.PathLike]
229223
update_theta_0 = getattr(module, "load_state_dict")
230224
theta_1 = (
231225
safetensors.torch.load_file(checkpoint_path_1)
232-
if (is_safetensors_available() and checkpoint_path_1.endswith(".safetensors"))
226+
if (checkpoint_path_1.endswith(".safetensors"))
233227
else torch.load(checkpoint_path_1, map_location="cpu")
234228
)
235229
theta_2 = None
236230
if checkpoint_path_2:
237231
theta_2 = (
238232
safetensors.torch.load_file(checkpoint_path_2)
239-
if (is_safetensors_available() and checkpoint_path_2.endswith(".safetensors"))
233+
if (checkpoint_path_2.endswith(".safetensors"))
240234
else torch.load(checkpoint_path_2, map_location="cpu")
241235
)
242236

scripts/convert_original_audioldm_to_diffusers.py

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,7 @@
3838
PNDMScheduler,
3939
UNet2DConditionModel,
4040
)
41-
from diffusers.utils import is_omegaconf_available, is_safetensors_available
41+
from diffusers.utils import is_omegaconf_available
4242
from diffusers.utils.import_utils import BACKENDS_MAPPING
4343

4444

@@ -824,9 +824,6 @@ def load_pipeline_from_original_audioldm_ckpt(
824824
from omegaconf import OmegaConf
825825

826826
if from_safetensors:
827-
if not is_safetensors_available():
828-
raise ValueError(BACKENDS_MAPPING["safetensors"][1])
829-
830827
from safetensors import safe_open
831828

832829
checkpoint = {}

scripts/convert_tiny_autoencoder_to_diffusers.py

Lines changed: 1 addition & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,6 @@
11
import argparse
22

3-
from diffusers.utils import is_safetensors_available
4-
5-
6-
if is_safetensors_available():
7-
import safetensors.torch
8-
else:
9-
raise ImportError("Please install `safetensors`.")
3+
import safetensors.torch
104

115
from diffusers import AutoencoderTiny
126

src/diffusers/commands/fp16_safetensors.py

Lines changed: 2 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@
2727
from huggingface_hub import hf_hub_download
2828
from packaging import version
2929

30-
from ..utils import is_safetensors_available, logging
30+
from ..utils import logging
3131
from . import BaseDiffusersCLICommand
3232

3333

@@ -68,12 +68,7 @@ def __init__(self, ckpt_id: str, fp16: bool, use_safetensors: bool, use_auth_tok
6868
self.local_ckpt_dir = f"/tmp/{ckpt_id}"
6969
self.fp16 = fp16
7070

71-
if is_safetensors_available():
72-
self.use_safetensors = use_safetensors
73-
else:
74-
raise ImportError(
75-
"When `use_safetensors` is set to True, the `safetensors` library needs to be installed. Install it via `pip install safetensors`."
76-
)
71+
self.use_safetensors = use_safetensors
7772

7873
if not self.use_safetensors and not self.fp16:
7974
raise NotImplementedError(

src/diffusers/loaders.py

Lines changed: 8 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,7 @@
2222
from typing import Callable, Dict, List, Optional, Union
2323

2424
import requests
25+
import safetensors
2526
import torch
2627
import torch.nn.functional as F
2728
from huggingface_hub import hf_hub_download
@@ -34,16 +35,12 @@
3435
deprecate,
3536
is_accelerate_available,
3637
is_omegaconf_available,
37-
is_safetensors_available,
3838
is_transformers_available,
3939
logging,
4040
)
4141
from .utils.import_utils import BACKENDS_MAPPING
4242

4343

44-
if is_safetensors_available():
45-
import safetensors
46-
4744
if is_transformers_available():
4845
from transformers import CLIPTextModel, CLIPTextModelWithProjection, PreTrainedModel, PreTrainedTokenizer
4946

@@ -261,14 +258,10 @@ def load_attn_procs(self, pretrained_model_name_or_path_or_dict: Union[str, Dict
261258
network_alphas = kwargs.pop("network_alphas", None)
262259
is_network_alphas_none = network_alphas is None
263260

264-
if use_safetensors and not is_safetensors_available():
265-
raise ValueError(
266-
"`use_safetensors`=True but safetensors is not installed. Please install safetensors with `pip install safetensors"
267-
)
268-
269261
allow_pickle = False
262+
270263
if use_safetensors is None:
271-
use_safetensors = is_safetensors_available()
264+
use_safetensors = True
272265
allow_pickle = True
273266

274267
user_agent = {
@@ -757,14 +750,9 @@ def load_textual_inversion(
757750
weight_name = kwargs.pop("weight_name", None)
758751
use_safetensors = kwargs.pop("use_safetensors", None)
759752

760-
if use_safetensors and not is_safetensors_available():
761-
raise ValueError(
762-
"`use_safetensors`=True but safetensors is not installed. Please install safetensors with `pip install safetensors"
763-
)
764-
765753
allow_pickle = False
766754
if use_safetensors is None:
767-
use_safetensors = is_safetensors_available()
755+
use_safetensors = True
768756
allow_pickle = True
769757

770758
user_agent = {
@@ -1014,14 +1002,9 @@ def lora_state_dict(
10141002
unet_config = kwargs.pop("unet_config", None)
10151003
use_safetensors = kwargs.pop("use_safetensors", None)
10161004

1017-
if use_safetensors and not is_safetensors_available():
1018-
raise ValueError(
1019-
"`use_safetensors`=True but safetensors is not installed. Please install safetensors with `pip install safetensors"
1020-
)
1021-
10221005
allow_pickle = False
10231006
if use_safetensors is None:
1024-
use_safetensors = is_safetensors_available()
1007+
use_safetensors = True
10251008
allow_pickle = True
10261009

10271010
user_agent = {
@@ -1853,7 +1836,7 @@ def from_single_file(cls, pretrained_model_link_or_path, **kwargs):
18531836

18541837
torch_dtype = kwargs.pop("torch_dtype", None)
18551838

1856-
use_safetensors = kwargs.pop("use_safetensors", None if is_safetensors_available() else False)
1839+
use_safetensors = kwargs.pop("use_safetensors", None)
18571840

18581841
pipeline_name = cls.__name__
18591842
file_extension = pretrained_model_link_or_path.rsplit(".", 1)[-1]
@@ -2050,7 +2033,7 @@ def from_single_file(cls, pretrained_model_link_or_path, **kwargs):
20502033

20512034
torch_dtype = kwargs.pop("torch_dtype", None)
20522035

2053-
use_safetensors = kwargs.pop("use_safetensors", None if is_safetensors_available() else False)
2036+
use_safetensors = kwargs.pop("use_safetensors", None)
20542037

20552038
file_extension = pretrained_model_link_or_path.rsplit(".", 1)[-1]
20562039
from_safetensors = file_extension == "safetensors"
@@ -2223,7 +2206,7 @@ def from_single_file(cls, pretrained_model_link_or_path, **kwargs):
22232206

22242207
torch_dtype = kwargs.pop("torch_dtype", None)
22252208

2226-
use_safetensors = kwargs.pop("use_safetensors", None if is_safetensors_available() else False)
2209+
use_safetensors = kwargs.pop("use_safetensors", None)
22272210

22282211
file_extension = pretrained_model_link_or_path.rsplit(".", 1)[-1]
22292212
from_safetensors = file_extension == "safetensors"

src/diffusers/models/modeling_utils.py

Lines changed: 2 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,7 @@
2121
from functools import partial
2222
from typing import Any, Callable, List, Optional, Tuple, Union
2323

24+
import safetensors
2425
import torch
2526
from torch import Tensor, device, nn
2627

@@ -36,7 +37,6 @@
3637
_get_model_file,
3738
deprecate,
3839
is_accelerate_available,
39-
is_safetensors_available,
4040
is_torch_version,
4141
logging,
4242
)
@@ -56,9 +56,6 @@
5656
from accelerate.utils import set_module_tensor_to_device
5757
from accelerate.utils.versions import is_torch_version
5858

59-
if is_safetensors_available():
60-
import safetensors
61-
6259

6360
def get_parameter_device(parameter: torch.nn.Module):
6461
try:
@@ -296,9 +293,6 @@ def save_pretrained(
296293
variant (`str`, *optional*):
297294
If specified, weights are saved in the format `pytorch_model.<variant>.bin`.
298295
"""
299-
if safe_serialization and not is_safetensors_available():
300-
raise ImportError("`safe_serialization` requires the `safetensors library: `pip install safetensors`.")
301-
302296
if os.path.isfile(save_directory):
303297
logger.error(f"Provided path ({save_directory}) should be a directory, not a file")
304298
return
@@ -454,14 +448,9 @@ def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.P
454448
variant = kwargs.pop("variant", None)
455449
use_safetensors = kwargs.pop("use_safetensors", None)
456450

457-
if use_safetensors and not is_safetensors_available():
458-
raise ValueError(
459-
"`use_safetensors`=True but safetensors is not installed. Please install safetensors with `pip install safetensors"
460-
)
461-
462451
allow_pickle = False
463452
if use_safetensors is None:
464-
use_safetensors = is_safetensors_available()
453+
use_safetensors = True
465454
allow_pickle = True
466455

467456
if low_cpu_mem_usage and not is_accelerate_available():

src/diffusers/pipelines/pipeline_utils.py

Lines changed: 2 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -52,7 +52,6 @@
5252
is_accelerate_available,
5353
is_accelerate_version,
5454
is_compiled_module,
55-
is_safetensors_available,
5655
is_torch_version,
5756
is_transformers_available,
5857
logging,
@@ -899,7 +898,7 @@ def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.P
899898
offload_state_dict = kwargs.pop("offload_state_dict", False)
900899
low_cpu_mem_usage = kwargs.pop("low_cpu_mem_usage", _LOW_CPU_MEM_USAGE_DEFAULT)
901900
variant = kwargs.pop("variant", None)
902-
use_safetensors = kwargs.pop("use_safetensors", None if is_safetensors_available() else False)
901+
use_safetensors = kwargs.pop("use_safetensors", None)
903902
load_connected_pipeline = kwargs.pop("load_connected_pipeline", False)
904903

905904
# 1. Download the checkpoints and configs
@@ -1311,14 +1310,9 @@ def download(cls, pretrained_model_name, **kwargs) -> Union[str, os.PathLike]:
13111310
use_onnx = kwargs.pop("use_onnx", None)
13121311
load_connected_pipeline = kwargs.pop("load_connected_pipeline", False)
13131312

1314-
if use_safetensors and not is_safetensors_available():
1315-
raise ValueError(
1316-
"`use_safetensors`=True but safetensors is not installed. Please install safetensors with `pip install safetensors"
1317-
)
1318-
13191313
allow_pickle = False
13201314
if use_safetensors is None:
1321-
use_safetensors = is_safetensors_available()
1315+
use_safetensors = True
13221316
allow_pickle = True
13231317

13241318
allow_patterns = None

src/diffusers/pipelines/stable_diffusion/convert_from_ckpt.py

Lines changed: 1 addition & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -50,7 +50,7 @@
5050
PNDMScheduler,
5151
UnCLIPScheduler,
5252
)
53-
from ...utils import is_accelerate_available, is_omegaconf_available, is_safetensors_available, logging
53+
from ...utils import is_accelerate_available, is_omegaconf_available, logging
5454
from ...utils.import_utils import BACKENDS_MAPPING
5555
from ..latent_diffusion.pipeline_latent_diffusion import LDMBertConfig, LDMBertModel
5656
from ..paint_by_example import PaintByExampleImageEncoder
@@ -1225,9 +1225,6 @@ def download_from_original_stable_diffusion_ckpt(
12251225
from omegaconf import OmegaConf
12261226

12271227
if from_safetensors:
1228-
if not is_safetensors_available():
1229-
raise ValueError(BACKENDS_MAPPING["safetensors"][1])
1230-
12311228
from safetensors.torch import load_file as safe_load
12321229

12331230
checkpoint = safe_load(checkpoint_path, device="cpu")
@@ -1650,9 +1647,6 @@ def download_controlnet_from_original_ckpt(
16501647
from omegaconf import OmegaConf
16511648

16521649
if from_safetensors:
1653-
if not is_safetensors_available():
1654-
raise ValueError(BACKENDS_MAPPING["safetensors"][1])
1655-
16561650
from safetensors import safe_open
16571651

16581652
checkpoint = {}

src/diffusers/utils/__init__.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -64,7 +64,6 @@
6464
is_note_seq_available,
6565
is_omegaconf_available,
6666
is_onnx_available,
67-
is_safetensors_available,
6867
is_scipy_available,
6968
is_tensorboard_available,
7069
is_tf_available,

src/diffusers/utils/import_utils.py

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -306,10 +306,6 @@ def is_torch_available():
306306
return _torch_available
307307

308308

309-
def is_safetensors_available():
310-
return _safetensors_available
311-
312-
313309
def is_tf_available():
314310
return _tf_available
315311

0 commit comments

Comments
 (0)