@@ -30,10 +30,10 @@ class MultiAdapter(ModelMixin):
3030 MultiAdapter is a wrapper model that contains multiple adapter models and merges their outputs according to
3131 user-assigned weighting.
3232
33- This model inherits from [`ModelMixin`]. Check the superclass documentation for the generic methods the library
34- implements for all the model (such as downloading or saving, etc.)
33+ This model inherits from [`ModelMixin`]. Check the superclass documentation for common methods such as downloading
34+ or saving.
3535
36- Parameters :
36+ Args :
3737 adapters (`List[T2IAdapter]`, *optional*, defaults to None):
3838 A list of `T2IAdapter` model instances.
3939 """
@@ -77,11 +77,13 @@ def forward(self, xs: torch.Tensor, adapter_weights: Optional[List[float]] = Non
7777 r"""
7878 Args:
7979 xs (`torch.Tensor`):
80- (batch, channel, height, width) input images for multiple adapter models concated along dimension 1,
81- `channel` should equal to `num_adapter` * "number of channel of image".
80+ A tensor of shape (batch, channel, height, width) representing input images for multiple adapter
81+ models, concatenated along dimension 1(channel dimension). The `channel` dimension should be equal to
82+ `num_adapter` * number of channel per image.
83+
8284 adapter_weights (`List[float]`, *optional*, defaults to None):
83- List of floats representing the weight which will be multiply to each adapter's output before adding
84- them together.
85+ A list of floats representing the weights which will be multiplied by each adapter's output before
86+ summing them together. If `None`, equal weights will be used for all adapters .
8587 """
8688 if adapter_weights is None :
8789 adapter_weights = torch .tensor ([1 / self .num_adapter ] * self .num_adapter )
@@ -109,24 +111,24 @@ def save_pretrained(
109111 variant : Optional [str ] = None ,
110112 ):
111113 """
112- Save a model and its configuration file to a directory, so that it can be re-loaded using the
114+ Save a model and its configuration file to a specified directory, allowing it to be re-loaded with the
113115 `[`~models.adapter.MultiAdapter.from_pretrained`]` class method.
114116
115- Arguments :
117+ Args :
116118 save_directory (`str` or `os.PathLike`):
117- Directory to which to save. Will be created if it doesn't exist.
118- is_main_process (`bool`, * optional* , defaults to ` True` ):
119- Whether the process calling this is the main process or not. Useful when in distributed training like
120- TPUs and need to call this function on all processes. In this case, set `is_main_process=True` only on
121- the main process to avoid race conditions.
119+ The directory where the model will be saved. If the directory does not exist, it will be created .
120+ is_main_process (`bool`, optional, defaults= True):
121+ Indicates whether current process is the main process or not. Useful for distributed training (e.g.,
122+ TPUs) and need to call this function on all processes. In this case, set `is_main_process=True` only
123+ for the main process to avoid race conditions.
122124 save_function (`Callable`):
123- The function to use to save the state dictionary. Useful on distributed training like TPUs when one
124- need to replace `torch.save` by another method. Can be configured with the environment variable
125- `DIFFUSERS_SAVE_MODE` .
126- safe_serialization (`bool`, * optional* , defaults to ` True` ):
127- Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`) .
125+ Function used to save the state dictionary. Useful for distributed training (e.g., TPUs) to replace
126+ `torch.save` with another method. Can also be configured using`DIFFUSERS_SAVE_MODE` environment
127+ variable .
128+ safe_serialization (`bool`, optional, defaults= True):
129+ If `True`, save the model using `safetensors`. If `False`, save the model with `pickle`.
128130 variant (`str`, *optional*):
129- If specified, weights are saved in the format pytorch_model.<variant>.bin.
131+ If specified, weights are saved in the format ` pytorch_model.<variant>.bin` .
130132 """
131133 idx = 0
132134 model_path_to_save = save_directory
@@ -145,19 +147,17 @@ def save_pretrained(
145147 @classmethod
146148 def from_pretrained (cls , pretrained_model_path : Optional [Union [str , os .PathLike ]], ** kwargs ):
147149 r"""
148- Instantiate a pretrained MultiAdapter model from multiple pre-trained adapter models.
150+ Instantiate a pretrained ` MultiAdapter` model from multiple pre-trained adapter models.
149151
150152 The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated). To train
151- the model, you should first set it back in training mode with `model.train()`.
153+ the model, set it back to training mode using `model.train()`.
152154
153- The warning *Weights from XXX not initialized from pretrained model* means that the weights of XXX do not come
154- pretrained with the rest of the model. It is up to you to train those weights with a downstream fine-tuning
155- task.
155+ Warnings:
156+ *Weights from XXX not initialized from pretrained model* means that the weights of XXX are not pretrained
157+ with the rest of the model. It is up to you to train those weights with a downstream fine-tuning. *Weights
158+ from XXX not used in YYY* means that the layer XXX is not used by YYY, so those weights are discarded.
156159
157- The warning *Weights from XXX not used in YYY* means that the layer XXX is not used by YYY, therefore those
158- weights are discarded.
159-
160- Parameters:
160+ Args:
161161 pretrained_model_path (`os.PathLike`):
162162 A path to a *directory* containing model weights saved using
163163 [`~diffusers.models.adapter.MultiAdapter.save_pretrained`], e.g., `./my_model_directory/adapter`.
@@ -175,20 +175,20 @@ def from_pretrained(cls, pretrained_model_path: Optional[Union[str, os.PathLike]
175175 more information about each option see [designing a device
176176 map](https://hf.co/docs/accelerate/main/en/usage_guides/big_modeling#designing-a-device-map).
177177 max_memory (`Dict`, *optional*):
178- A dictionary device identifier to maximum memory. Will default to the maximum memory available for each
179- GPU and the available CPU RAM if unset.
178+ A dictionary mapping device identifiers to their maximum memory. Default to the maximum memory
179+ available for each GPU and the available CPU RAM if unset.
180180 low_cpu_mem_usage (`bool`, *optional*, defaults to `True` if torch version >= 1.9.0 else `False`):
181181 Speed up model loading by not initializing the weights and only loading the pre-trained weights. This
182182 also tries to not use more than 1x model size in CPU memory (including peak memory) while loading the
183183 model. This is only supported when torch version >= 1.9.0. If you are using an older version of torch,
184184 setting this argument to `True` will raise an error.
185185 variant (`str`, *optional*):
186- If specified load weights from `variant` filename, *e.g.* pytorch_model.<variant>.bin. `variant` is
187- ignored when using `from_flax`.
186+ If specified, load weights from a `variant` file ( *e.g.* pytorch_model.<variant>.bin) . `variant` will
187+ be ignored when using `from_flax`.
188188 use_safetensors (`bool`, *optional*, defaults to `None`):
189- If set to `None`, the `safetensors` weights will be downloaded if they're available **and** if the
190- `safetensors` library is installed. If set to `True`, the model will be forcibly loaded from
191- `safetensors` weights. If set to `False`, loading will * not* use `safetensors` .
189+ If `None`, the `safetensors` weights will be downloaded if available **and** if`safetensors` library is
190+ installed. If `True`, the model will be forcibly loaded from`safetensors` weights. If `False`,
191+ `safetensors` is not used .
192192 """
193193 idx = 0
194194 adapters = []
@@ -223,22 +223,22 @@ class T2IAdapter(ModelMixin, ConfigMixin):
223223 and
224224 [AdapterLight](https://github.com/TencentARC/T2I-Adapter/blob/686de4681515662c0ac2ffa07bf5dda83af1038a/ldm/modules/encoders/adapter.py#L235).
225225
226- This model inherits from [`ModelMixin`]. Check the superclass documentation for the generic methods the library
227- implements for all the model (such as downloading or saving, etc.)
226+ This model inherits from [`ModelMixin`]. Check the superclass documentation for the common methods, such as
227+ downloading or saving.
228228
229- Parameters :
230- in_channels (`int`, *optional*, defaults to 3 ):
231- Number of channels of Aapter 's input(*control image*). Set this parameter to 1 if you're using gray scale
232- image as *control image* .
229+ Args :
230+ in_channels (`int`, *optional*, defaults to `3` ):
231+ The number of channels in the adapter 's input (*control image*). Set it to 1 if you're using a gray scale
232+ image.
233233 channels (`List[int]`, *optional*, defaults to `(320, 640, 1280, 1280)`):
234- The number of channel of each downsample block's output hidden state. The `len(block_out_channels)` will
235- also determine the number of downsample blocks in the Adapter .
236- num_res_blocks (`int`, *optional*, defaults to 2 ):
234+ The number of channels in each downsample block's output hidden state. The `len(block_out_channels)`
235+ determines the number of downsample blocks in the adapter .
236+ num_res_blocks (`int`, *optional*, defaults to `2` ):
237237 Number of ResNet blocks in each downsample block.
238- downscale_factor (`int`, *optional*, defaults to 8 ):
238+ downscale_factor (`int`, *optional*, defaults to `8` ):
239239 A factor that determines the total downscale factor of the Adapter.
240240 adapter_type (`str`, *optional*, defaults to `full_adapter`):
241- The type of Adapter to use. Choose either `full_adapter` or `full_adapter_xl` or `light_adapter`.
241+ Adapter type ( `full_adapter` or `full_adapter_xl` or `light_adapter`) to use .
242242 """
243243
244244 @register_to_config
@@ -393,15 +393,15 @@ class AdapterBlock(nn.Module):
393393 An AdapterBlock is a helper model that contains multiple ResNet-like blocks. It is used in the `FullAdapter` and
394394 `FullAdapterXL` models.
395395
396- Parameters :
396+ Args :
397397 in_channels (`int`):
398398 Number of channels of AdapterBlock's input.
399399 out_channels (`int`):
400400 Number of channels of AdapterBlock's output.
401401 num_res_blocks (`int`):
402402 Number of ResNet blocks in the AdapterBlock.
403403 down (`bool`, *optional*, defaults to `False`):
404- Whether to perform downsampling on AdapterBlock's input.
404+ If `True`, perform downsampling on AdapterBlock's input.
405405 """
406406
407407 def __init__ (self , in_channels : int , out_channels : int , num_res_blocks : int , down : bool = False ):
@@ -440,7 +440,7 @@ class AdapterResnetBlock(nn.Module):
440440 r"""
441441 An `AdapterResnetBlock` is a helper model that implements a ResNet-like block.
442442
443- Parameters :
443+ Args :
444444 channels (`int`):
445445 Number of channels of AdapterResnetBlock's input and output.
446446 """
@@ -518,15 +518,15 @@ class LightAdapterBlock(nn.Module):
518518 A `LightAdapterBlock` is a helper model that contains multiple `LightAdapterResnetBlocks`. It is used in the
519519 `LightAdapter` model.
520520
521- Parameters :
521+ Args :
522522 in_channels (`int`):
523523 Number of channels of LightAdapterBlock's input.
524524 out_channels (`int`):
525525 Number of channels of LightAdapterBlock's output.
526526 num_res_blocks (`int`):
527527 Number of LightAdapterResnetBlocks in the LightAdapterBlock.
528528 down (`bool`, *optional*, defaults to `False`):
529- Whether to perform downsampling on LightAdapterBlock's input.
529+ If `True`, perform downsampling on LightAdapterBlock's input.
530530 """
531531
532532 def __init__ (self , in_channels : int , out_channels : int , num_res_blocks : int , down : bool = False ):
@@ -561,7 +561,7 @@ class LightAdapterResnetBlock(nn.Module):
561561 A `LightAdapterResnetBlock` is a helper model that implements a ResNet-like block with a slightly different
562562 architecture than `AdapterResnetBlock`.
563563
564- Parameters :
564+ Args :
565565 channels (`int`):
566566 Number of channels of LightAdapterResnetBlock's input and output.
567567 """
0 commit comments