Skip to content

Commit f275625

Browse files
yiyixuxuyiyixuxusayakpaul
authored
Fix a bug in AutoPipeline.from_pipe when switching pipeline with optional components (huggingface#6820)
* fix * add tests --------- Co-authored-by: yiyixuxu <yixu310@gmail,com> Co-authored-by: Sayak Paul <[email protected]>
1 parent 0071478 commit f275625

File tree

3 files changed

+33
-14
lines changed

3 files changed

+33
-14
lines changed

src/diffusers/loaders/ip_adapter.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -166,8 +166,7 @@ def load_ip_adapter(
166166
pretrained_model_name_or_path_or_dict,
167167
subfolder=Path(subfolder, "image_encoder").as_posix(),
168168
).to(self.device, dtype=self.dtype)
169-
self.image_encoder = image_encoder
170-
self.register_to_config(image_encoder=["transformers", "CLIPVisionModelWithProjection"])
169+
self.register_modules(image_encoder=image_encoder)
171170
else:
172171
raise ValueError("`image_encoder` cannot be None when using IP Adapters.")
173172

src/diffusers/pipelines/auto_pipeline.py

Lines changed: 3 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,6 @@
1313
# See the License for the specific language governing permissions and
1414
# limitations under the License.
1515

16-
import inspect
1716
from collections import OrderedDict
1817

1918
from huggingface_hub.utils import validate_hf_hub_args
@@ -164,14 +163,6 @@ def get_model(pipeline_class_name):
164163
raise ValueError(f"AutoPipeline can't find a pipeline linked to {pipeline_class_name} for {model_name}")
165164

166165

167-
def _get_signature_keys(obj):
168-
parameters = inspect.signature(obj.__init__).parameters
169-
required_parameters = {k: v for k, v in parameters.items() if v.default == inspect._empty}
170-
optional_parameters = set({k for k, v in parameters.items() if v.default != inspect._empty})
171-
expected_modules = set(required_parameters.keys()) - {"self"}
172-
return expected_modules, optional_parameters
173-
174-
175166
class AutoPipelineForText2Image(ConfigMixin):
176167
r"""
177168
@@ -391,7 +382,7 @@ def from_pipe(cls, pipeline, **kwargs):
391382
)
392383

393384
# define expected module and optional kwargs given the pipeline signature
394-
expected_modules, optional_kwargs = _get_signature_keys(text_2_image_cls)
385+
expected_modules, optional_kwargs = text_2_image_cls._get_signature_keys(text_2_image_cls)
395386

396387
pretrained_model_name_or_path = original_config.pop("_name_or_path", None)
397388

@@ -668,7 +659,7 @@ def from_pipe(cls, pipeline, **kwargs):
668659
)
669660

670661
# define expected module and optional kwargs given the pipeline signature
671-
expected_modules, optional_kwargs = _get_signature_keys(image_2_image_cls)
662+
expected_modules, optional_kwargs = image_2_image_cls._get_signature_keys(image_2_image_cls)
672663

673664
pretrained_model_name_or_path = original_config.pop("_name_or_path", None)
674665

@@ -943,7 +934,7 @@ def from_pipe(cls, pipeline, **kwargs):
943934
)
944935

945936
# define expected module and optional kwargs given the pipeline signature
946-
expected_modules, optional_kwargs = _get_signature_keys(inpainting_cls)
937+
expected_modules, optional_kwargs = inpainting_cls._get_signature_keys(inpainting_cls)
947938

948939
pretrained_model_name_or_path = original_config.pop("_name_or_path", None)
949940

tests/pipelines/test_pipelines_auto.py

Lines changed: 29 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,7 @@
2121
from pathlib import Path
2222

2323
import torch
24+
from transformers import CLIPVisionConfig, CLIPVisionModelWithProjection
2425

2526
from diffusers import (
2627
AutoPipelineForImage2Image,
@@ -48,6 +49,20 @@
4849

4950

5051
class AutoPipelineFastTest(unittest.TestCase):
52+
@property
53+
def dummy_image_encoder(self):
54+
torch.manual_seed(0)
55+
config = CLIPVisionConfig(
56+
hidden_size=1,
57+
projection_dim=1,
58+
num_hidden_layers=1,
59+
num_attention_heads=1,
60+
image_size=1,
61+
intermediate_size=1,
62+
patch_size=1,
63+
)
64+
return CLIPVisionModelWithProjection(config)
65+
5166
def test_from_pipe_consistent(self):
5267
pipe = AutoPipelineForText2Image.from_pretrained(
5368
"hf-internal-testing/tiny-stable-diffusion-pipe", requires_safety_checker=False
@@ -204,6 +219,20 @@ def test_from_pipe_controlnet_new_task(self):
204219
assert pipe_control_img2img.__class__.__name__ == "StableDiffusionControlNetImg2ImgPipeline"
205220
assert "controlnet" in pipe_control_img2img.components
206221

222+
def test_from_pipe_optional_components(self):
223+
image_encoder = self.dummy_image_encoder
224+
225+
pipe = AutoPipelineForText2Image.from_pretrained(
226+
"hf-internal-testing/tiny-stable-diffusion-pipe",
227+
image_encoder=image_encoder,
228+
)
229+
230+
pipe = AutoPipelineForImage2Image.from_pipe(pipe)
231+
assert pipe.image_encoder is not None
232+
233+
pipe = AutoPipelineForText2Image.from_pipe(pipe, image_encoder=None)
234+
assert pipe.image_encoder is None
235+
207236

208237
@slow
209238
class AutoPipelineIntegrationTest(unittest.TestCase):

0 commit comments

Comments
 (0)