nightfury commited on
Commit
aa114a9
1 Parent(s): d34c157

Create new file

Browse files
Files changed (1) hide show
  1. inpainting.py +194 -0
inpainting.py ADDED
@@ -0,0 +1,194 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import inspect
2
+ from typing import List, Optional, Union
3
+
4
+ import numpy as np
5
+ import torch
6
+
7
+ import PIL
8
+ from diffusers import AutoencoderKL, DDIMScheduler, DiffusionPipeline, PNDMScheduler, UNet2DConditionModel
9
+ from diffusers.pipelines.stable_diffusion import StableDiffusionSafetyChecker
10
+ from tqdm.auto import tqdm
11
+ from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer
12
+
13
+
14
+ def preprocess_image(image):
15
+ w, h = image.size
16
+ w, h = map(lambda x: x - x % 32, (w, h)) # resize to integer multiple of 32
17
+ image = image.resize((w, h), resample=PIL.Image.LANCZOS)
18
+ image = np.array(image).astype(np.float32) / 255.0
19
+ image = image[None].transpose(0, 3, 1, 2)
20
+ image = torch.from_numpy(image)
21
+ return 2.0 * image - 1.0
22
+
23
+
24
+ def preprocess_mask(mask):
25
+ mask = mask.convert("L")
26
+ w, h = mask.size
27
+ w, h = map(lambda x: x - x % 32, (w, h)) # resize to integer multiple of 32
28
+ mask = mask.resize((w // 8, h // 8), resample=PIL.Image.NEAREST)
29
+ mask = np.array(mask).astype(np.float32) / 255.0
30
+ mask = np.tile(mask, (4, 1, 1))
31
+ mask = mask[None].transpose(0, 1, 2, 3) # what does this step do?
32
+ mask = 1 - mask # repaint white, keep black
33
+ mask = torch.from_numpy(mask)
34
+ return mask
35
+
36
+ class StableDiffusionInpaintingPipeline(DiffusionPipeline):
37
+ def __init__(
38
+ self,
39
+ vae: AutoencoderKL,
40
+ text_encoder: CLIPTextModel,
41
+ tokenizer: CLIPTokenizer,
42
+ unet: UNet2DConditionModel,
43
+ scheduler: Union[DDIMScheduler, PNDMScheduler],
44
+ safety_checker: StableDiffusionSafetyChecker,
45
+ feature_extractor: CLIPFeatureExtractor,
46
+ ):
47
+ super().__init__()
48
+ scheduler = scheduler.set_format("pt")
49
+ self.register_modules(
50
+ vae=vae,
51
+ text_encoder=text_encoder,
52
+ tokenizer=tokenizer,
53
+ unet=unet,
54
+ scheduler=scheduler,
55
+ safety_checker=safety_checker,
56
+ feature_extractor=feature_extractor,
57
+ )
58
+
59
+ @torch.no_grad()
60
+ def __call__(
61
+ self,
62
+ prompt: Union[str, List[str]],
63
+ init_image: torch.FloatTensor,
64
+ mask_image: torch.FloatTensor,
65
+ strength: float = 0.8,
66
+ num_inference_steps: Optional[int] = 50,
67
+ guidance_scale: Optional[float] = 7.5,
68
+ eta: Optional[float] = 0.0,
69
+ generator: Optional[torch.Generator] = None,
70
+ output_type: Optional[str] = "pil",
71
+ ):
72
+
73
+ if isinstance(prompt, str):
74
+ batch_size = 1
75
+ elif isinstance(prompt, list):
76
+ batch_size = len(prompt)
77
+ else:
78
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
79
+
80
+ if strength < 0 or strength > 1:
81
+ raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}")
82
+
83
+ # set timesteps
84
+ accepts_offset = "offset" in set(inspect.signature(self.scheduler.set_timesteps).parameters.keys())
85
+ extra_set_kwargs = {}
86
+ offset = 0
87
+ if accepts_offset:
88
+ offset = 1
89
+ extra_set_kwargs["offset"] = 1
90
+
91
+ self.scheduler.set_timesteps(num_inference_steps, **extra_set_kwargs)
92
+
93
+ # preprocess image
94
+ init_image = preprocess_image(init_image).to(self.device)
95
+
96
+ # encode the init image into latents and scale the latents
97
+ init_latent_dist = self.vae.encode(init_image).latent_dist
98
+ init_latents = init_latent_dist.sample(generator=generator)
99
+ init_latents = 0.18215 * init_latents
100
+
101
+ # prepare init_latents noise to latents
102
+ init_latents = torch.cat([init_latents] * batch_size)
103
+ init_latents_orig = init_latents
104
+
105
+ # preprocess mask
106
+ mask = preprocess_mask(mask_image).to(self.device)
107
+ mask = torch.cat([mask] * batch_size)
108
+
109
+ # check sizes
110
+ if not mask.shape == init_latents.shape:
111
+ raise ValueError(f"The mask and init_image should be the same size!")
112
+
113
+ # get the original timestep using init_timestep
114
+ init_timestep = int(num_inference_steps * strength) + offset
115
+ init_timestep = min(init_timestep, num_inference_steps)
116
+ timesteps = self.scheduler.timesteps[-init_timestep]
117
+ timesteps = torch.tensor([timesteps] * batch_size, dtype=torch.long, device=self.device)
118
+
119
+ # add noise to latents using the timesteps
120
+ noise = torch.randn(init_latents.shape, generator=generator, device=self.device)
121
+ init_latents = self.scheduler.add_noise(init_latents, noise, timesteps)
122
+
123
+ # get prompt text embeddings
124
+ text_input = self.tokenizer(
125
+ prompt,
126
+ padding="max_length",
127
+ max_length=self.tokenizer.model_max_length,
128
+ truncation=True,
129
+ return_tensors="pt",
130
+ )
131
+ text_embeddings = self.text_encoder(text_input.input_ids.to(self.device))[0]
132
+
133
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
134
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
135
+ # corresponds to doing no classifier free guidance.
136
+ do_classifier_free_guidance = guidance_scale > 1.0
137
+ # get unconditional embeddings for classifier free guidance
138
+ if do_classifier_free_guidance:
139
+ max_length = text_input.input_ids.shape[-1]
140
+ uncond_input = self.tokenizer(
141
+ [""] * batch_size, padding="max_length", max_length=max_length, return_tensors="pt"
142
+ )
143
+ uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
144
+
145
+ # For classifier free guidance, we need to do two forward passes.
146
+ # Here we concatenate the unconditional and text embeddings into a single batch
147
+ # to avoid doing two forward passes
148
+ text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
149
+
150
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
151
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
152
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
153
+ # and should be between [0, 1]
154
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
155
+ extra_step_kwargs = {}
156
+ if accepts_eta:
157
+ extra_step_kwargs["eta"] = eta
158
+
159
+ latents = init_latents
160
+ t_start = max(num_inference_steps - init_timestep + offset, 0)
161
+ for i, t in tqdm(enumerate(self.scheduler.timesteps[t_start:])):
162
+ # expand the latents if we are doing classifier free guidance
163
+ latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
164
+
165
+ # predict the noise residual
166
+ noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings)["sample"]
167
+
168
+ # perform guidance
169
+ if do_classifier_free_guidance:
170
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
171
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
172
+
173
+ # compute the previous noisy sample x_t -> x_t-1
174
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs)["prev_sample"]
175
+
176
+ # masking
177
+ init_latents_proper = self.scheduler.add_noise(init_latents_orig, noise, t)
178
+ latents = (init_latents_proper * mask) + (latents * (1 - mask))
179
+
180
+ # scale and decode the image latents with vae
181
+ latents = 1 / 0.18215 * latents
182
+ image = self.vae.decode(latents).sample
183
+
184
+ image = (image / 2 + 0.5).clamp(0, 1)
185
+ image = image.cpu().permute(0, 2, 3, 1).numpy()
186
+
187
+ # run safety checker
188
+ safety_cheker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(self.device)
189
+ image, has_nsfw_concept = self.safety_checker(images=image, clip_input=safety_cheker_input.pixel_values)
190
+
191
+ if output_type == "pil":
192
+ image = self.numpy_to_pil(image)
193
+
194
+ return {"sample": image, "nsfw_content_detected": has_nsfw_concept}