krrishD commited on
Commit
eafa34e
1 Parent(s): 8ba8f58

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -160
app.py CHANGED
@@ -14,170 +14,12 @@ from tqdm.auto import tqdm
14
  from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer
15
  import gradio as gr
16
  import random
 
17
 
18
  device = "cuda"
19
  model_path = "CompVis/stable-diffusion-v1-4"
20
 
21
- class StableDiffusionInpaintingPipeline(DiffusionPipeline):
22
- def __init__(
23
- self,
24
- vae: AutoencoderKL,
25
- text_encoder: CLIPTextModel,
26
- tokenizer: CLIPTokenizer,
27
- unet: UNet2DConditionModel,
28
- scheduler: Union[DDIMScheduler, PNDMScheduler],
29
- safety_checker: StableDiffusionSafetyChecker,
30
- feature_extractor: CLIPFeatureExtractor,
31
- ):
32
- super().__init__()
33
- scheduler = scheduler.set_format("pt")
34
- self.register_modules(
35
- vae=vae,
36
- text_encoder=text_encoder,
37
- tokenizer=tokenizer,
38
- unet=unet,
39
- scheduler=scheduler,
40
- safety_checker=safety_checker,
41
- feature_extractor=feature_extractor,
42
- )
43
-
44
- @torch.no_grad()
45
- def __call__(
46
- self,
47
- prompt: Union[str, List[str]],
48
- init_image: torch.FloatTensor,
49
- mask_image: torch.FloatTensor,
50
- strength: float = 0.8,
51
- num_inference_steps: Optional[int] = 50,
52
- guidance_scale: Optional[float] = 7.5,
53
- eta: Optional[float] = 0.0,
54
- generator: Optional[torch.Generator] = None,
55
- output_type: Optional[str] = "pil",
56
- ):
57
-
58
- if isinstance(prompt, str):
59
- batch_size = 1
60
- elif isinstance(prompt, list):
61
- batch_size = len(prompt)
62
- else:
63
- raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
64
-
65
- if strength < 0 or strength > 1:
66
- raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}")
67
-
68
- # set timesteps
69
- accepts_offset = "offset" in set(inspect.signature(self.scheduler.set_timesteps).parameters.keys())
70
- extra_set_kwargs = {}
71
- offset = 0
72
- if accepts_offset:
73
- offset = 1
74
- extra_set_kwargs["offset"] = 1
75
-
76
- self.scheduler.set_timesteps(num_inference_steps, **extra_set_kwargs)
77
-
78
- #preprocess image
79
- init_image = preprocess_image(init_image).to(self.device)
80
-
81
- # encode the init image into latents and scale the latents
82
- init_latents = self.vae.encode(init_image).sample()
83
- init_latents = 0.18215 * init_latents
84
-
85
- # prepare init_latents noise to latents
86
- init_latents = torch.cat([init_latents] * batch_size)
87
- init_latents_orig = init_latents
88
-
89
- # preprocess mask
90
- mask = preprocess_mask(mask_image).to(self.device)
91
- mask = torch.cat([mask] * batch_size)
92
-
93
- #check sizes
94
- if not mask.shape == init_latents.shape:
95
- raise ValueError(f"The mask and init_image should be the same size!")
96
-
97
-
98
- # get the original timestep using init_timestep
99
- init_timestep = int(num_inference_steps * strength) + offset
100
- init_timestep = min(init_timestep, num_inference_steps)
101
- timesteps = self.scheduler.timesteps[-init_timestep]
102
- timesteps = torch.tensor([timesteps] * batch_size, dtype=torch.long, device=self.device)
103
-
104
- # add noise to latents using the timesteps
105
- noise = torch.randn(init_latents.shape, generator=generator, device=self.device)
106
- init_latents = self.scheduler.add_noise(init_latents, noise, timesteps)
107
-
108
- # get prompt text embeddings
109
- text_input = self.tokenizer(
110
- prompt,
111
- padding="max_length",
112
- max_length=self.tokenizer.model_max_length,
113
- truncation=True,
114
- return_tensors="pt",
115
- )
116
- text_embeddings = self.text_encoder(text_input.input_ids.to(self.device))[0]
117
-
118
- # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
119
- # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
120
- # corresponds to doing no classifier free guidance.
121
- do_classifier_free_guidance = guidance_scale > 1.0
122
- # get unconditional embeddings for classifier free guidance
123
- if do_classifier_free_guidance:
124
- max_length = text_input.input_ids.shape[-1]
125
- uncond_input = self.tokenizer(
126
- [""] * batch_size, padding="max_length", max_length=max_length, return_tensors="pt"
127
- )
128
- uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
129
-
130
- # For classifier free guidance, we need to do two forward passes.
131
- # Here we concatenate the unconditional and text embeddings into a single batch
132
- # to avoid doing two forward passes
133
- text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
134
-
135
- # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
136
- # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
137
- # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
138
- # and should be between [0, 1]
139
- accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
140
- extra_step_kwargs = {}
141
- if accepts_eta:
142
- extra_step_kwargs["eta"] = eta
143
-
144
- latents = init_latents
145
- t_start = max(num_inference_steps - init_timestep + offset, 0)
146
- for i, t in tqdm(enumerate(self.scheduler.timesteps[t_start:])):
147
- # expand the latents if we are doing classifier free guidance
148
- latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
149
-
150
- # predict the noise residual
151
- noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings)["sample"]
152
-
153
- # perform guidance
154
- if do_classifier_free_guidance:
155
- noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
156
- noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
157
-
158
- # compute the previous noisy sample x_t -> x_t-1
159
- latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs)["prev_sample"]
160
-
161
- #masking
162
- init_latents_proper = self.scheduler.add_noise(init_latents_orig, noise, t)
163
- latents = ( init_latents_proper * mask ) + ( latents * (1-mask) )
164
-
165
- # scale and decode the image latents with vae
166
- latents = 1 / 0.18215 * latents
167
- image = self.vae.decode(latents)
168
-
169
- image = (image / 2 + 0.5).clamp(0, 1)
170
- image = image.cpu().permute(0, 2, 3, 1).numpy()
171
-
172
- # run safety checker
173
- safety_cheker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(self.device)
174
- image, has_nsfw_concept = self.safety_checker(images=image, clip_input=safety_cheker_input.pixel_values)
175
-
176
- if output_type == "pil":
177
- image = self.numpy_to_pil(image)
178
-
179
- return {"sample": image, "nsfw_content_detected": has_nsfw_concept}
180
-
181
  pipe = StableDiffusionInpaintingPipeline.from_pretrained(
182
  model_path,
183
  revision="fp16",
 
14
  from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer
15
  import gradio as gr
16
  import random
17
+ import StableDiffusionInpaintingPipelineCustom
18
 
19
  device = "cuda"
20
  model_path = "CompVis/stable-diffusion-v1-4"
21
 
22
+ StableDiffusionInpaintingPipeline = StableDiffusionInpaintingPipelineCustom.StableDiffusionInpaintingPipeline
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
23
  pipe = StableDiffusionInpaintingPipeline.from_pretrained(
24
  model_path,
25
  revision="fp16",