Skip to content

Commit bb4d605

Browse files
authored
add inpainting example script (huggingface#241)
* add inpainting * added proper noising of init_latent as reccommened by jackloomen (huggingface#241 (comment)) * move image preprocessing inside pipeline and allow non 512x512 mask
1 parent e5b5dea commit bb4d605

File tree

1 file changed

+193
-0
lines changed

1 file changed

+193
-0
lines changed

examples/inference/inpainting.py

Lines changed: 193 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,193 @@
1+
import inspect
2+
from typing import List, Optional, Union
3+
4+
import numpy as np
5+
import torch
6+
7+
import PIL
8+
from diffusers import AutoencoderKL, DDIMScheduler, DiffusionPipeline, PNDMScheduler, UNet2DConditionModel
9+
from diffusers.pipelines.stable_diffusion import StableDiffusionSafetyChecker
10+
from tqdm.auto import tqdm
11+
from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer
12+
13+
14+
def preprocess(image):
15+
w, h = image.size
16+
w, h = map(lambda x: x - x % 32, (w, h)) # resize to integer multiple of 32
17+
image = image.resize((w, h), resample=PIL.Image.LANCZOS)
18+
image = np.array(image).astype(np.float32) / 255.0
19+
image = image[None].transpose(0, 3, 1, 2)
20+
image = torch.from_numpy(image)
21+
return 2.0 * image - 1.0
22+
23+
def preprocess_mask(mask):
24+
mask=mask.convert("L")
25+
w, h = mask.size
26+
w, h = map(lambda x: x - x % 32, (w, h)) # resize to integer multiple of 32
27+
mask = mask.resize((w//8, h//8), resample=PIL.Image.NEAREST)
28+
mask = np.array(mask).astype(np.float32) / 255.0
29+
mask = np.tile(mask,(4,1,1))
30+
mask = mask[None].transpose(0, 1, 2, 3)#what does this step do?
31+
mask = 1 - mask #repaint white, keep black
32+
mask = torch.from_numpy(mask)
33+
return mask
34+
35+
36+
class StableDiffusionInpaintingPipeline(DiffusionPipeline):
37+
def __init__(
38+
self,
39+
vae: AutoencoderKL,
40+
text_encoder: CLIPTextModel,
41+
tokenizer: CLIPTokenizer,
42+
unet: UNet2DConditionModel,
43+
scheduler: Union[DDIMScheduler, PNDMScheduler],
44+
safety_checker: StableDiffusionSafetyChecker,
45+
feature_extractor: CLIPFeatureExtractor,
46+
):
47+
super().__init__()
48+
scheduler = scheduler.set_format("pt")
49+
self.register_modules(
50+
vae=vae,
51+
text_encoder=text_encoder,
52+
tokenizer=tokenizer,
53+
unet=unet,
54+
scheduler=scheduler,
55+
safety_checker=safety_checker,
56+
feature_extractor=feature_extractor,
57+
)
58+
59+
@torch.no_grad()
60+
def __call__(
61+
self,
62+
prompt: Union[str, List[str]],
63+
init_image: torch.FloatTensor,
64+
mask_image: torch.FloatTensor,
65+
strength: float = 0.8,
66+
num_inference_steps: Optional[int] = 50,
67+
guidance_scale: Optional[float] = 7.5,
68+
eta: Optional[float] = 0.0,
69+
generator: Optional[torch.Generator] = None,
70+
output_type: Optional[str] = "pil",
71+
):
72+
73+
if isinstance(prompt, str):
74+
batch_size = 1
75+
elif isinstance(prompt, list):
76+
batch_size = len(prompt)
77+
else:
78+
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
79+
80+
if strength < 0 or strength > 1:
81+
raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}")
82+
83+
# set timesteps
84+
accepts_offset = "offset" in set(inspect.signature(self.scheduler.set_timesteps).parameters.keys())
85+
extra_set_kwargs = {}
86+
offset = 0
87+
if accepts_offset:
88+
offset = 1
89+
extra_set_kwargs["offset"] = 1
90+
91+
self.scheduler.set_timesteps(num_inference_steps, **extra_set_kwargs)
92+
93+
#preprocess image
94+
init_image = preprocess(init_image).to(self.device)
95+
96+
# encode the init image into latents and scale the latents
97+
init_latents = self.vae.encode(init_image).sample()
98+
init_latents = 0.18215 * init_latents
99+
init_latents_orig = init_latents
100+
101+
# preprocess mask
102+
mask = preprocess_mask(mask_image).to(self.device)
103+
mask = torch.cat([mask] * batch_size)
104+
105+
#check sizes
106+
if not mask.shape == init_latents.shape:
107+
raise ValueError(f"The mask and init_image should be the same size!")
108+
109+
# prepare init_latents noise to latents
110+
init_latents = torch.cat([init_latents] * batch_size)
111+
112+
# get the original timestep using init_timestep
113+
init_timestep = int(num_inference_steps * strength) + offset
114+
init_timestep = min(init_timestep, num_inference_steps)
115+
timesteps = self.scheduler.timesteps[-init_timestep]
116+
timesteps = torch.tensor([timesteps] * batch_size, dtype=torch.long, device=self.device)
117+
118+
# add noise to latents using the timesteps
119+
noise = torch.randn(init_latents.shape, generator=generator, device=self.device)
120+
init_latents = self.scheduler.add_noise(init_latents, noise, timesteps)
121+
122+
# get prompt text embeddings
123+
text_input = self.tokenizer(
124+
prompt,
125+
padding="max_length",
126+
max_length=self.tokenizer.model_max_length,
127+
truncation=True,
128+
return_tensors="pt",
129+
)
130+
text_embeddings = self.text_encoder(text_input.input_ids.to(self.device))[0]
131+
132+
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
133+
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
134+
# corresponds to doing no classifier free guidance.
135+
do_classifier_free_guidance = guidance_scale > 1.0
136+
# get unconditional embeddings for classifier free guidance
137+
if do_classifier_free_guidance:
138+
max_length = text_input.input_ids.shape[-1]
139+
uncond_input = self.tokenizer(
140+
[""] * batch_size, padding="max_length", max_length=max_length, return_tensors="pt"
141+
)
142+
uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
143+
144+
# For classifier free guidance, we need to do two forward passes.
145+
# Here we concatenate the unconditional and text embeddings into a single batch
146+
# to avoid doing two forward passes
147+
text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
148+
149+
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
150+
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
151+
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
152+
# and should be between [0, 1]
153+
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
154+
extra_step_kwargs = {}
155+
if accepts_eta:
156+
extra_step_kwargs["eta"] = eta
157+
158+
latents = init_latents
159+
t_start = max(num_inference_steps - init_timestep + offset, 0)
160+
for i, t in tqdm(enumerate(self.scheduler.timesteps[t_start:])):
161+
# expand the latents if we are doing classifier free guidance
162+
latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
163+
164+
# predict the noise residual
165+
noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings)["sample"]
166+
167+
# perform guidance
168+
if do_classifier_free_guidance:
169+
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
170+
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
171+
172+
# compute the previous noisy sample x_t -> x_t-1
173+
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs)["prev_sample"]
174+
175+
#masking
176+
init_latents_proper = self.scheduler.add_noise(init_latents_orig, noise, t)
177+
latents = ( init_latents_proper * mask ) + ( latents * (1-mask) )
178+
179+
# scale and decode the image latents with vae
180+
latents = 1 / 0.18215 * latents
181+
image = self.vae.decode(latents)
182+
183+
image = (image / 2 + 0.5).clamp(0, 1)
184+
image = image.cpu().permute(0, 2, 3, 1).numpy()
185+
186+
# run safety checker
187+
safety_cheker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(self.device)
188+
image, has_nsfw_concept = self.safety_checker(images=image, clip_input=safety_cheker_input.pixel_values)
189+
190+
if output_type == "pil":
191+
image = self.numpy_to_pil(image)
192+
193+
return {"sample": image, "nsfw_content_detected": has_nsfw_concept}

0 commit comments

Comments
 (0)