@@ -270,7 +270,7 @@ def save_pretrained(
270270 save_function : Optional [Callable ] = None ,
271271 safe_serialization : bool = True ,
272272 variant : Optional [str ] = None ,
273- max_shard_size : Union [int , str ] = "5GB " ,
273+ max_shard_size : Union [int , str ] = "10GB " ,
274274 push_to_hub : bool = False ,
275275 ** kwargs ,
276276 ):
@@ -293,10 +293,13 @@ def save_pretrained(
293293 Whether to save the model using `safetensors` or the traditional PyTorch way with `pickle`.
294294 variant (`str`, *optional*):
295295 If specified, weights are saved in the format `pytorch_model.<variant>.bin`.
296- max_shard_size (`int` or `str`, defaults to `"5GB "`):
296+ max_shard_size (`int` or `str`, defaults to `"10GB "`):
297297 The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size
298298 lower than this size. If expressed as a string, needs to be digits followed by a unit (like `"5GB"`).
299- If expressed as an integer, the unit is bytes.
299+ If expressed as an integer, the unit is bytes. Note that this limit will be decreased after a certain
300+ period of time (starting from Oct 2024) to allow users to upgrade to the latest version of `diffusers`.
301+ This is to establish a common default size for this argument across different libraries in the Hugging
302+ Face ecosystem (`transformers`, and `accelerate`, for example).
300303 push_to_hub (`bool`, *optional*, defaults to `False`):
301304 Whether or not to push your model to the Hugging Face Hub after saving it. You can specify the
302305 repository you want to push to with `repo_id` (will default to the name of `save_directory` in your
0 commit comments