TharunSivamani commited on
Commit
fb4198f
β€’
1 Parent(s): cc399a4

final commit

Browse files
Files changed (3) hide show
  1. README.md +4 -3
  2. app.py +287 -0
  3. requirements.txt +9 -0
README.md CHANGED
@@ -1,12 +1,13 @@
1
  ---
2
  title: Stable Diffusion
3
- emoji: πŸ“Š
4
- colorFrom: yellow
5
- colorTo: purple
6
  sdk: gradio
7
  sdk_version: 3.47.1
8
  app_file: app.py
9
  pinned: false
 
10
  ---
11
 
12
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
  title: Stable Diffusion
3
+ emoji: πŸ“‰
4
+ colorFrom: blue
5
+ colorTo: blue
6
  sdk: gradio
7
  sdk_version: 3.47.1
8
  app_file: app.py
9
  pinned: false
10
+ license: mit
11
  ---
12
 
13
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,287 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from base64 import b64encode
2
+
3
+ import gradio as gr
4
+ import numpy
5
+ import torch
6
+ import torchvision.transforms as T
7
+
8
+ from diffusers import AutoencoderKL, LMSDiscreteScheduler, UNet2DConditionModel
9
+ from PIL import Image
10
+ from torch import autocast
11
+ from torchvision import transforms as tfms
12
+ from tqdm.auto import tqdm
13
+ from transformers import CLIPTextModel, CLIPTokenizer, logging
14
+
15
+ torch.manual_seed(1)
16
+ logging.set_verbosity_error()
17
+
18
+ torch_device = "cpu"
19
+
20
+ # Load the autoencoder model which will be used to decode the latents into image space.
21
+ vae = AutoencoderKL.from_pretrained("CompVis/stable-diffusion-v1-4", subfolder="vae")
22
+
23
+ # Load the tokenizer and text encoder to tokenize and encode the text.
24
+ tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-large-patch14")
25
+ text_encoder = CLIPTextModel.from_pretrained("openai/clip-vit-large-patch14")
26
+
27
+ # The UNet model for generating the latents.
28
+ unet = UNet2DConditionModel.from_pretrained("CompVis/stable-diffusion-v1-4", subfolder="unet")
29
+
30
+ # The noise scheduler
31
+ scheduler = LMSDiscreteScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", num_train_timesteps=1000)
32
+
33
+ # To the GPU we go!
34
+ vae = vae.to(torch_device)
35
+ text_encoder = text_encoder.to(torch_device)
36
+ unet = unet.to(torch_device);
37
+
38
+ token_emb_layer = text_encoder.text_model.embeddings.token_embedding
39
+ pos_emb_layer = text_encoder.text_model.embeddings.position_embedding
40
+ position_ids = text_encoder.text_model.embeddings.position_ids[:, :77]
41
+ position_embeddings = pos_emb_layer(position_ids)
42
+
43
+ def pil_to_latent(input_im):
44
+ # Single image -> single latent in a batch (so size 1, 4, 64, 64)
45
+ with torch.no_grad():
46
+ latent = vae.encode(tfms.ToTensor()(input_im).unsqueeze(0).to(torch_device)*2-1) # Note scaling
47
+ return 0.18215 * latent.latent_dist.sample()
48
+
49
+ def latents_to_pil(latents):
50
+ # bath of latents -> list of images
51
+ latents = (1 / 0.18215) * latents
52
+ with torch.no_grad():
53
+ image = vae.decode(latents).sample
54
+ image = (image / 2 + 0.5).clamp(0, 1)
55
+ image = image.detach().cpu().permute(0, 2, 3, 1).numpy()
56
+ images = (image * 255).round().astype("uint8")
57
+ pil_images = [Image.fromarray(image) for image in images]
58
+ return pil_images
59
+
60
+ def get_output_embeds(input_embeddings):
61
+ # CLIP's text model uses causal mask, so we prepare it here:
62
+ bsz, seq_len = input_embeddings.shape[:2]
63
+ causal_attention_mask = text_encoder.text_model._build_causal_attention_mask(bsz, seq_len, dtype=input_embeddings.dtype)
64
+
65
+ # Getting the output embeddings involves calling the model with passing output_hidden_states=True
66
+ # so that it doesn't just return the pooled final predictions:
67
+ encoder_outputs = text_encoder.text_model.encoder(
68
+ inputs_embeds=input_embeddings,
69
+ attention_mask=None, # We aren't using an attention mask so that can be None
70
+ causal_attention_mask=causal_attention_mask.to(torch_device),
71
+ output_attentions=None,
72
+ output_hidden_states=True, # We want the output embs not the final output
73
+ return_dict=None,
74
+ )
75
+
76
+ # We're interested in the output hidden state only
77
+ output = encoder_outputs[0]
78
+
79
+ # There is a final layer norm we need to pass these through
80
+ output = text_encoder.text_model.final_layer_norm(output)
81
+
82
+ # And now they're ready!
83
+ return output
84
+
85
+ def generate_with_embs(text_embeddings, seed, max_length):
86
+ height = 512 # default height of Stable Diffusion
87
+ width = 512 # default width of Stable Diffusion
88
+ num_inference_steps = 30 # Number of denoising steps
89
+ guidance_scale = 7.5 # Scale for classifier-free guidance
90
+ generator = torch.manual_seed(seed) # Seed generator to create the inital latent noise
91
+ batch_size = 1
92
+
93
+ # max_length = text_input.input_ids.shape[-1]
94
+ uncond_input = tokenizer(
95
+ [""] * batch_size, padding="max_length", max_length=max_length, return_tensors="pt"
96
+ )
97
+ with torch.no_grad():
98
+ uncond_embeddings = text_encoder(uncond_input.input_ids.to(torch_device))[0]
99
+ text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
100
+
101
+ # Prep Scheduler
102
+ set_timesteps(scheduler, num_inference_steps)
103
+
104
+ # Prep latents
105
+ latents = torch.randn(
106
+ (batch_size, unet.in_channels, height // 8, width // 8),
107
+ generator=generator,
108
+ )
109
+ latents = latents.to(torch_device)
110
+ latents = latents * scheduler.init_noise_sigma
111
+
112
+ # Loop
113
+ for i, t in tqdm(enumerate(scheduler.timesteps), total=len(scheduler.timesteps)):
114
+ # expand the latents if we are doing classifier-free guidance to avoid doing two forward passes.
115
+ latent_model_input = torch.cat([latents] * 2)
116
+ sigma = scheduler.sigmas[i]
117
+ latent_model_input = scheduler.scale_model_input(latent_model_input, t)
118
+
119
+ # predict the noise residual
120
+ with torch.no_grad():
121
+ noise_pred = unet(latent_model_input, t, encoder_hidden_states=text_embeddings)["sample"]
122
+
123
+ # perform guidance
124
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
125
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
126
+
127
+ # compute the previous noisy sample x_t -> x_t-1
128
+ latents = scheduler.step(noise_pred, t, latents).prev_sample
129
+
130
+ return latents_to_pil(latents)[0]
131
+
132
+ # Prep Scheduler
133
+ def set_timesteps(scheduler, num_inference_steps):
134
+ scheduler.set_timesteps(num_inference_steps)
135
+ scheduler.timesteps = scheduler.timesteps.to(torch.float32) # minor fix to ensure MPS compatibility, fixed in diffusers PR 3925
136
+
137
+ def eos_pos(prompt):
138
+ return len(prompt.split()) + 1
139
+
140
+ def embed_style(prompt, style_embed, style_seed):
141
+ # Tokenize
142
+ text_input = tokenizer(prompt, padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt")
143
+ input_ids = text_input.input_ids.to(torch_device)
144
+
145
+ # Get token embeddings
146
+ token_embeddings = token_emb_layer(input_ids)
147
+
148
+ # The new embedding - our special birb word
149
+ replacement_token_embedding = style_embed.to(torch_device)
150
+
151
+ # Insert this into the token embeddings
152
+ token_embeddings[0, torch.tensor(eos_pos(prompt))] = replacement_token_embedding.to(torch_device)
153
+
154
+ # Combine with pos embs
155
+ input_embeddings = token_embeddings + position_embeddings
156
+
157
+ # Feed through to get final output embs
158
+ modified_output_embeddings = get_output_embeds(input_embeddings)
159
+
160
+ # And generate an image with this:
161
+ max_length = text_input.input_ids.shape[-1]
162
+ return generate_with_embs(modified_output_embeddings, style_seed, max_length)
163
+
164
+ def custom_loss(image):
165
+ # Calculate colorfulness metric (standard deviation of RGB channels)
166
+ std_dev = torch.std(image, dim=(1, 2))
167
+ loss = torch.mean(std_dev)
168
+ return loss
169
+
170
+ def generate_image_on_loss(prompt, seed):
171
+
172
+ height = 64 # default height of Stable Diffusion
173
+ width = 64 # default width of Stable Diffusion
174
+ num_inference_steps = 50 # Number of denoising steps
175
+ guidance_scale = 8 # Scale for classifier-free guidance
176
+ generator = torch.manual_seed(64) # Seed generator to create the inital latent noise
177
+ batch_size = 1
178
+ loss_scale = 200
179
+
180
+ # Prep text
181
+ text_input = tokenizer([prompt], padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt")
182
+ with torch.no_grad():
183
+ text_embeddings = text_encoder(text_input.input_ids.to(torch_device))[0]
184
+
185
+ # And the uncond. input as before:
186
+ max_length = text_input.input_ids.shape[-1]
187
+ uncond_input = tokenizer(
188
+ [""] * batch_size, padding="max_length", max_length=max_length, return_tensors="pt"
189
+ )
190
+ with torch.no_grad():
191
+ uncond_embeddings = text_encoder(uncond_input.input_ids.to(torch_device))[0]
192
+ text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
193
+
194
+ # Prep Scheduler
195
+ set_timesteps(scheduler, num_inference_steps+1)
196
+
197
+ # Prep latents
198
+ latents = torch.randn(
199
+ (batch_size, unet.in_channels, height // 8, width // 8),
200
+ generator=generator,
201
+ )
202
+ latents = latents.to(torch_device)
203
+ latents = latents * scheduler.init_noise_sigma
204
+
205
+ sched_out = None
206
+
207
+ # Loop
208
+ for i, t in tqdm(enumerate(scheduler.timesteps), total=len(scheduler.timesteps)):
209
+ # expand the latents if we are doing classifier-free guidance to avoid doing two forward passes.
210
+ latent_model_input = torch.cat([latents] * 2)
211
+ sigma = scheduler.sigmas[i]
212
+ latent_model_input = scheduler.scale_model_input(latent_model_input, t)
213
+
214
+ # predict the noise residual
215
+ with torch.no_grad():
216
+ noise_pred = unet(latent_model_input, t, encoder_hidden_states=text_embeddings)["sample"]
217
+
218
+ # perform CFG
219
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
220
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
221
+
222
+ ### ADDITIONAL GUIDANCE ###
223
+ if i%5 == 0 and i>0:
224
+ # Requires grad on the latents
225
+ latents = latents.detach().requires_grad_()
226
+
227
+ # Get the predicted x0:
228
+ scheduler._step_index -= 1
229
+ latents_x0 = scheduler.step(noise_pred, t, latents).pred_original_sample
230
+
231
+ # Decode to image space
232
+ denoised_images = vae.decode((1 / 0.18215) * latents_x0).sample / 2 + 0.5 # range (0, 1)
233
+
234
+ # Calculate loss
235
+ loss = custom_loss(denoised_images) * loss_scale
236
+
237
+ # Occasionally print it out
238
+ # if i%10==0:
239
+ print(i, 'loss:', loss)
240
+
241
+ # Get gradient
242
+ cond_grad = torch.autograd.grad(loss, latents)[0]
243
+
244
+ # Modify the latents based on this gradient
245
+ latents = latents.detach() - cond_grad * sigma**2
246
+ # To PIL Images
247
+ im_t0 = latents_to_pil(latents_x0)[0]
248
+ im_next = latents_to_pil(latents)[0]
249
+
250
+ # Now step with scheduler
251
+ latents = scheduler.step(noise_pred, t, latents).prev_sample
252
+
253
+ return latents_to_pil(latents)[0]
254
+
255
+ def generate_image_from_prompt(prompt, style):
256
+ style_list = ['dreamy_painting.bin', 'egorey.bin', 'fairy_tale_painting.bin', 'matrix.bin', 'pjablonski_style.bin']
257
+ style_seeds = [16, 64, 32, 128, 8]
258
+
259
+ style_file = style + '.bin'
260
+ idx = style_list.index(style_file)
261
+
262
+ style_seed = style_seeds[idx]
263
+ style_dict = torch.load(style_file)
264
+ style_embed = [val for val in style_dict.values()]
265
+
266
+ generated_image = embed_style(prompt, style_embed[0], style_seed)
267
+ loss_generated_img = generate_image_on_loss(prompt, style_seed)
268
+
269
+ return [generated_image, loss_generated_img]
270
+
271
+ demo = gr.Interface(
272
+ fn = generate_image_from_prompt,
273
+ inputs = [
274
+ gr.Textbox(label="Prompt", value="Enter your prompt"),
275
+ gr.Dropdown(
276
+ ["dreamy_painting", "egorey", "fairy_tale_painting", "matrix", "pjablonski_style"], value="dreamy_painting", label="Pretrained Styles"
277
+ )
278
+ ],
279
+ outputs = [
280
+ gr.Gallery(label="Generated images", show_label=False, elem_id="gallery", columns=[2], rows=[1], object_fit="contain", height="512")
281
+ ],
282
+ examples = [
283
+ ["a cat climbing a tree", "dreamy_painting"]
284
+ ]
285
+ )
286
+
287
+ demo.launch(debug=True)
requirements.txt ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ gradio
2
+ torch
3
+ transformers==4.25.1
4
+ diffusers
5
+ ftfy
6
+ accelerate
7
+ torchvision
8
+ scipy
9
+ tqdm