Linoy Tsaban commited on
Commit
b34b4e8
1 Parent(s): 9ad499d

add ddpm inversion (#4)

Browse files

- add ddpm inversion (e24d40d1c2c8663f66146b7451847baa9ba55c08)
- Update preprocess_utils.py (248a53d9dad0242311a3673b4b6c6e9e4188e6fd)
- Update tokenflow_pnp.py (2bd26712f2ff49f7fd34429b0a6d2677fa918139)

Files changed (3) hide show
  1. app.py +68 -37
  2. preprocess_utils.py +262 -28
  3. tokenflow_pnp.py +90 -31
app.py CHANGED
@@ -61,20 +61,22 @@ def prep(config):
61
  model_key = "stabilityai/stable-diffusion-2-depth"
62
  toy_scheduler = DDIMScheduler.from_pretrained(model_key, subfolder="scheduler")
63
  toy_scheduler.set_timesteps(config["save_steps"])
64
- print("config[save_steps]", config["save_steps"])
65
  timesteps_to_save, num_inference_steps = get_timesteps(toy_scheduler, num_inference_steps=config["save_steps"],
66
  strength=1.0,
67
  device=device)
68
- print("YOOOO timesteps to save", timesteps_to_save)
69
 
70
  # seed_everything(config["seed"])
71
  if not config["frames"]: # original non demo setting
72
  save_path = os.path.join(config["save_dir"],
 
73
  f'sd_{config["sd_version"]}',
74
  Path(config["data_path"]).stem,
75
  f'steps_{config["steps"]}',
76
  f'nframes_{config["n_frames"]}')
77
  os.makedirs(os.path.join(save_path, f'latents'), exist_ok=True)
 
 
78
  add_dict_to_yaml_file(os.path.join(config["save_dir"], 'inversion_prompts.yaml'), Path(config["data_path"]).stem, config["inversion_prompt"])
79
  # save inversion prompt in a txt file
80
  with open(os.path.join(save_path, 'inversion_prompt.txt'), 'w') as f:
@@ -82,43 +84,53 @@ def prep(config):
82
  else:
83
  save_path = None
84
 
85
- model = Preprocess(device, config,
 
86
  vae=vae,
87
  text_encoder=text_encoder,
88
  scheduler=scheduler,
89
  tokenizer=tokenizer,
90
  unet=unet)
91
- print(type(model.config["batch_size"]))
92
- frames, latents, total_inverted_latents, rgb_reconstruction = model.extract_latents(
93
  num_steps=model.config["steps"],
94
  save_path=save_path,
95
  batch_size=model.config["batch_size"],
96
  timesteps_to_save=timesteps_to_save,
97
  inversion_prompt=model.config["inversion_prompt"],
 
 
 
98
  )
99
 
100
-
101
- return frames, latents, total_inverted_latents, rgb_reconstruction
 
 
 
 
 
102
 
103
  def preprocess_and_invert(input_video,
104
  frames,
105
  latents,
106
  inverted_latents,
 
107
  seed,
108
  randomize_seed,
109
  do_inversion,
110
- # save_dir: str = "latents",
111
  steps,
112
  n_timesteps = 50,
113
  batch_size: int = 8,
114
  n_frames: int = 40,
115
  inversion_prompt:str = '',
 
116
 
117
  ):
118
  sd_version = "2.1"
119
- height = 512
120
  weidth: int = 512
121
- print("n timesteps", n_timesteps)
122
  if do_inversion or randomize_seed:
123
  preprocess_config = {}
124
  preprocess_config['H'] = height
@@ -134,30 +146,37 @@ def preprocess_and_invert(input_video,
134
  preprocess_config['frames'] = video_to_frames(input_video)
135
  preprocess_config['data_path'] = input_video.split(".")[0]
136
 
 
 
 
 
 
137
 
138
  if randomize_seed:
139
  seed = randomize_seed_fn()
140
  seed_everything(seed)
141
 
142
- frames, latents, total_inverted_latents, rgb_reconstruction = prep(preprocess_config)
143
- print(total_inverted_latents.keys())
144
- print(len(total_inverted_latents.keys()))
145
- frames = gr.State(value=frames)
146
- latents = gr.State(value=latents)
147
- inverted_latents = gr.State(value=total_inverted_latents)
148
  do_inversion = False
149
 
150
- return frames, latents, inverted_latents, do_inversion
151
 
152
 
153
  def edit_with_pnp(input_video,
154
  frames,
155
  latents,
156
  inverted_latents,
 
157
  seed,
158
  randomize_seed,
159
  do_inversion,
160
  steps,
 
161
  prompt: str = "a marble sculpture of a woman running, Venus de Milo",
162
  # negative_prompt: str = "ugly, blurry, low res, unrealistic, unaesthetic",
163
  pnp_attn_t: float = 0.5,
@@ -183,14 +202,18 @@ def edit_with_pnp(input_video,
183
  config["pnp_attn_t"] = pnp_attn_t
184
  config["pnp_f_t"] = pnp_f_t
185
  config["pnp_inversion_prompt"] = inversion_prompt
 
 
 
186
 
187
 
188
  if do_inversion:
189
- frames, latents, inverted_latents, do_inversion = preprocess_and_invert(
190
  input_video,
191
  frames,
192
  latents,
193
  inverted_latents,
 
194
  seed,
195
  randomize_seed,
196
  do_inversion,
@@ -198,7 +221,8 @@ def edit_with_pnp(input_video,
198
  n_timesteps,
199
  batch_size,
200
  n_frames,
201
- inversion_prompt)
 
202
  do_inversion = False
203
 
204
 
@@ -207,12 +231,13 @@ def edit_with_pnp(input_video,
207
  seed_everything(seed)
208
 
209
 
210
- editor = TokenFlow(config=config,pipe=tokenflow_pipe, frames=frames.value, inverted_latents=inverted_latents.value)
211
  edited_frames = editor.edit_video()
212
-
213
- save_video(edited_frames, 'tokenflow_PnP_fps_30.mp4', fps=n_fps)
 
214
  # path = export_to_video(edited_frames)
215
- return 'tokenflow_PnP_fps_30.mp4', frames, latents, inverted_latents, do_inversion
216
 
217
  ########
218
  # demo #
@@ -238,6 +263,7 @@ with gr.Blocks(css="style.css") as demo:
238
  frames = gr.State()
239
  inverted_latents = gr.State()
240
  latents = gr.State()
 
241
  do_inversion = gr.State(value=True)
242
 
243
  with gr.Row():
@@ -252,15 +278,7 @@ with gr.Blocks(css="style.css") as demo:
252
  label="Describe your edited video",
253
  max_lines=1, value=""
254
  )
255
- # with gr.Group(visible=False) as share_btn_container:
256
- # with gr.Group(elem_id="share-btn-container"):
257
- # community_icon = gr.HTML(community_icon_html, visible=True)
258
- # loading_icon = gr.HTML(loading_icon_html, visible=False)
259
- # share_button = gr.Button("Share to community", elem_id="share-btn", visible=True)
260
-
261
-
262
- # with gr.Row():
263
- # inversion_progress = gr.Textbox(visible=False, label="Inversion progress")
264
 
265
  with gr.Row():
266
  run_button = gr.Button("Edit your video!", visible=True)
@@ -274,8 +292,10 @@ with gr.Blocks(css="style.css") as demo:
274
  randomize_seed = gr.Checkbox(label='Randomize seed', value=False)
275
  gudiance_scale = gr.Slider(label='Guidance Scale', minimum=1, maximum=30,
276
  value=7.5, step=0.5, interactive=True)
277
- steps = gr.Slider(label='Inversion steps', minimum=10, maximum=500,
278
- value=200, step=1, interactive=True)
 
 
279
 
280
  with gr.Column(min_width=100):
281
  inversion_prompt = gr.Textbox(lines=1, label="Inversion prompt", interactive=True, placeholder="")
@@ -284,7 +304,7 @@ with gr.Blocks(css="style.css") as demo:
284
  n_frames = gr.Slider(label='Num frames', minimum=2, maximum=200,
285
  value=24, step=1, interactive=True)
286
  n_timesteps = gr.Slider(label='Diffusion steps', minimum=25, maximum=100,
287
- value=25, step=25, interactive=True)
288
  n_fps = gr.Slider(label='Frames per second', minimum=1, maximum=60,
289
  value=10, step=1, interactive=True)
290
 
@@ -300,6 +320,11 @@ with gr.Blocks(css="style.css") as demo:
300
  fn = reset_do_inversion,
301
  outputs = [do_inversion],
302
  queue = False)
 
 
 
 
 
303
 
304
  inversion_prompt.change(
305
  fn = reset_do_inversion,
@@ -326,6 +351,7 @@ with gr.Blocks(css="style.css") as demo:
326
  frames,
327
  latents,
328
  inverted_latents,
 
329
  seed,
330
  randomize_seed,
331
  do_inversion,
@@ -333,11 +359,13 @@ with gr.Blocks(css="style.css") as demo:
333
  n_timesteps,
334
  batch_size,
335
  n_frames,
336
- inversion_prompt
 
337
  ],
338
  outputs = [frames,
339
  latents,
340
  inverted_latents,
 
341
  do_inversion
342
 
343
  ])
@@ -347,10 +375,12 @@ with gr.Blocks(css="style.css") as demo:
347
  frames,
348
  latents,
349
  inverted_latents,
 
350
  seed,
351
  randomize_seed,
352
  do_inversion,
353
  steps,
 
354
  prompt,
355
  pnp_attn_t,
356
  pnp_f_t,
@@ -360,7 +390,7 @@ with gr.Blocks(css="style.css") as demo:
360
  gudiance_scale,
361
  inversion_prompt,
362
  n_fps ],
363
- outputs = [output_video, frames, latents, inverted_latents, do_inversion]
364
  )
365
 
366
  gr.Examples(
@@ -371,4 +401,5 @@ with gr.Blocks(css="style.css") as demo:
371
  )
372
 
373
  demo.queue()
 
374
  demo.launch()
 
61
  model_key = "stabilityai/stable-diffusion-2-depth"
62
  toy_scheduler = DDIMScheduler.from_pretrained(model_key, subfolder="scheduler")
63
  toy_scheduler.set_timesteps(config["save_steps"])
 
64
  timesteps_to_save, num_inference_steps = get_timesteps(toy_scheduler, num_inference_steps=config["save_steps"],
65
  strength=1.0,
66
  device=device)
67
+
68
 
69
  # seed_everything(config["seed"])
70
  if not config["frames"]: # original non demo setting
71
  save_path = os.path.join(config["save_dir"],
72
+ f'inversion_{config[inversion]}',
73
  f'sd_{config["sd_version"]}',
74
  Path(config["data_path"]).stem,
75
  f'steps_{config["steps"]}',
76
  f'nframes_{config["n_frames"]}')
77
  os.makedirs(os.path.join(save_path, f'latents'), exist_ok=True)
78
+ if opt[inversion] == 'ddpm':
79
+ os.makedirs(os.path.join(save_path, f'latents'), exist_ok=True)
80
  add_dict_to_yaml_file(os.path.join(config["save_dir"], 'inversion_prompts.yaml'), Path(config["data_path"]).stem, config["inversion_prompt"])
81
  # save inversion prompt in a txt file
82
  with open(os.path.join(save_path, 'inversion_prompt.txt'), 'w') as f:
 
84
  else:
85
  save_path = None
86
 
87
+ model = Preprocess(device,
88
+ config,
89
  vae=vae,
90
  text_encoder=text_encoder,
91
  scheduler=scheduler,
92
  tokenizer=tokenizer,
93
  unet=unet)
94
+
95
+ frames_and_latents, rgb_reconstruction = model.extract_latents(
96
  num_steps=model.config["steps"],
97
  save_path=save_path,
98
  batch_size=model.config["batch_size"],
99
  timesteps_to_save=timesteps_to_save,
100
  inversion_prompt=model.config["inversion_prompt"],
101
+ inversion_type=model.config["inversion"],
102
+ skip_steps=model.config["skip_steps"],
103
+ reconstruction=model.config["reconstruct"]
104
  )
105
 
106
+ if model.config["inversion"] == 'ddpm':
107
+ frames, latents, total_inverted_latents, zs = frames_and_latents
108
+ return frames, latents, total_inverted_latents, zs, rgb_reconstruction
109
+ else:
110
+ frames, latents, total_inverted_latents = frames_and_latents
111
+ return frames, latents, total_inverted_latents, rgb_reconstruction
112
+
113
 
114
  def preprocess_and_invert(input_video,
115
  frames,
116
  latents,
117
  inverted_latents,
118
+ zs,
119
  seed,
120
  randomize_seed,
121
  do_inversion,
 
122
  steps,
123
  n_timesteps = 50,
124
  batch_size: int = 8,
125
  n_frames: int = 40,
126
  inversion_prompt:str = '',
127
+ skip_steps: int = 15,
128
 
129
  ):
130
  sd_version = "2.1"
131
+ height: int = 512
132
  weidth: int = 512
133
+
134
  if do_inversion or randomize_seed:
135
  preprocess_config = {}
136
  preprocess_config['H'] = height
 
146
  preprocess_config['frames'] = video_to_frames(input_video)
147
  preprocess_config['data_path'] = input_video.split(".")[0]
148
 
149
+ preprocess_config['inversion'] = 'ddpm'
150
+ preprocess_config['skip_steps'] = skip_steps
151
+ preprocess_config['reconstruct'] = False
152
+
153
+
154
 
155
  if randomize_seed:
156
  seed = randomize_seed_fn()
157
  seed_everything(seed)
158
 
159
+ frames, latents, total_inverted_latents, zs, rgb_reconstruction = prep(preprocess_config)
160
+
161
+ frames = gr.State(value = frames)
162
+ latents = gr.State(value = latents)
163
+ inverted_latents = gr.State(value = total_inverted_latents)
164
+ zs = gr.State(value = zs)
165
  do_inversion = False
166
 
167
+ return frames, latents, inverted_latents, zs, do_inversion
168
 
169
 
170
  def edit_with_pnp(input_video,
171
  frames,
172
  latents,
173
  inverted_latents,
174
+ zs,
175
  seed,
176
  randomize_seed,
177
  do_inversion,
178
  steps,
179
+ skip_steps: int = 15,
180
  prompt: str = "a marble sculpture of a woman running, Venus de Milo",
181
  # negative_prompt: str = "ugly, blurry, low res, unrealistic, unaesthetic",
182
  pnp_attn_t: float = 0.5,
 
202
  config["pnp_attn_t"] = pnp_attn_t
203
  config["pnp_f_t"] = pnp_f_t
204
  config["pnp_inversion_prompt"] = inversion_prompt
205
+ config["inversion"] = "ddpm"
206
+ config["skip_steps"] = skip_steps
207
+
208
 
209
 
210
  if do_inversion:
211
+ frames, latents, inverted_latents, zs, do_inversion = preprocess_and_invert(
212
  input_video,
213
  frames,
214
  latents,
215
  inverted_latents,
216
+ zs,
217
  seed,
218
  randomize_seed,
219
  do_inversion,
 
221
  n_timesteps,
222
  batch_size,
223
  n_frames,
224
+ inversion_prompt,
225
+ skip_steps)
226
  do_inversion = False
227
 
228
 
 
231
  seed_everything(seed)
232
 
233
 
234
+ editor = TokenFlow(config=config,pipe=tokenflow_pipe, frames=frames.value, inverted_latents=inverted_latents.value, zs= zs.value)
235
  edited_frames = editor.edit_video()
236
+
237
+ edit_video_path = f'tokenflow_PnP_fps_{n_fps}.mp4'
238
+ save_video(edited_frames, edit_video_path, fps=n_fps)
239
  # path = export_to_video(edited_frames)
240
+ return edit_video_path, frames, latents, inverted_latents, zs, do_inversion
241
 
242
  ########
243
  # demo #
 
263
  frames = gr.State()
264
  inverted_latents = gr.State()
265
  latents = gr.State()
266
+ zs = gr.State()
267
  do_inversion = gr.State(value=True)
268
 
269
  with gr.Row():
 
278
  label="Describe your edited video",
279
  max_lines=1, value=""
280
  )
281
+
 
 
 
 
 
 
 
 
282
 
283
  with gr.Row():
284
  run_button = gr.Button("Edit your video!", visible=True)
 
292
  randomize_seed = gr.Checkbox(label='Randomize seed', value=False)
293
  gudiance_scale = gr.Slider(label='Guidance Scale', minimum=1, maximum=30,
294
  value=7.5, step=0.5, interactive=True)
295
+ steps = gr.Slider(label='Inversion steps', minimum=10, maximum=200,
296
+ value=50, step=1, interactive=True)
297
+ skip_steps = gr.Slider(label='Skip Steps', minimum=5, maximum=25,
298
+ value=5, step=1, interactive=True)
299
 
300
  with gr.Column(min_width=100):
301
  inversion_prompt = gr.Textbox(lines=1, label="Inversion prompt", interactive=True, placeholder="")
 
304
  n_frames = gr.Slider(label='Num frames', minimum=2, maximum=200,
305
  value=24, step=1, interactive=True)
306
  n_timesteps = gr.Slider(label='Diffusion steps', minimum=25, maximum=100,
307
+ value=50, step=25, interactive=True)
308
  n_fps = gr.Slider(label='Frames per second', minimum=1, maximum=60,
309
  value=10, step=1, interactive=True)
310
 
 
320
  fn = reset_do_inversion,
321
  outputs = [do_inversion],
322
  queue = False)
323
+
324
+ steps.change(
325
+ fn = reset_do_inversion,
326
+ outputs = [do_inversion],
327
+ queue = False)
328
 
329
  inversion_prompt.change(
330
  fn = reset_do_inversion,
 
351
  frames,
352
  latents,
353
  inverted_latents,
354
+ zs,
355
  seed,
356
  randomize_seed,
357
  do_inversion,
 
359
  n_timesteps,
360
  batch_size,
361
  n_frames,
362
+ inversion_prompt,
363
+ skip_steps
364
  ],
365
  outputs = [frames,
366
  latents,
367
  inverted_latents,
368
+ zs,
369
  do_inversion
370
 
371
  ])
 
375
  frames,
376
  latents,
377
  inverted_latents,
378
+ zs,
379
  seed,
380
  randomize_seed,
381
  do_inversion,
382
  steps,
383
+ skip_steps,
384
  prompt,
385
  pnp_attn_t,
386
  pnp_f_t,
 
390
  gudiance_scale,
391
  inversion_prompt,
392
  n_fps ],
393
+ outputs = [output_video, frames, latents, inverted_latents, zs, do_inversion]
394
  )
395
 
396
  gr.Examples(
 
401
  )
402
 
403
  demo.queue()
404
+
405
  demo.launch()
preprocess_utils.py CHANGED
@@ -1,5 +1,6 @@
1
  from transformers import CLIPTextModel, CLIPTokenizer, logging
2
  from diffusers import AutoencoderKL, UNet2DConditionModel, DDIMScheduler
 
3
  # suppress partial model loading warning
4
  logging.set_verbosity_error()
5
 
@@ -12,6 +13,8 @@ from torchvision.io import write_video
12
  from pathlib import Path
13
  from utils import *
14
  import torchvision.transforms as T
 
 
15
 
16
 
17
  def get_timesteps(scheduler, num_inference_steps, strength, device):
@@ -64,7 +67,9 @@ class Preprocess(nn.Module):
64
  self.text_encoder = text_encoder
65
  self.unet = unet
66
  self.scheduler=scheduler
 
67
  self.total_inverted_latents = {}
 
68
 
69
  self.paths, self.frames, self.latents = self.get_data(self.config["data_path"], self.config["n_frames"])
70
  print("self.frames", self.frames.shape)
@@ -163,14 +168,34 @@ class Preprocess(nn.Module):
163
  )[0]
164
  return noise_pred
165
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
166
  @torch.no_grad()
167
  def get_text_embeds(self, prompt, negative_prompt, device="cuda"):
168
- text_input = self.tokenizer(prompt, padding='max_length', max_length=self.tokenizer.model_max_length,
169
- truncation=True, return_tensors='pt')
170
- text_embeddings = self.text_encoder(text_input.input_ids.to(device))[0]
171
- uncond_input = self.tokenizer(negative_prompt, padding='max_length', max_length=self.tokenizer.model_max_length,
172
- return_tensors='pt')
173
- uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(device))[0]
174
  text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
175
  return text_embeddings
176
 
@@ -192,7 +217,7 @@ class Preprocess(nn.Module):
192
  for i in range(0, len(imgs), batch_size):
193
  posterior = self.vae.encode(imgs[i:i + batch_size]).latent_dist
194
  latent = posterior.mean if deterministic else posterior.sample()
195
- latents.append(latent * 0.18215)
196
  latents = torch.cat(latents)
197
  return latents
198
 
@@ -264,6 +289,137 @@ class Preprocess(nn.Module):
264
  self.total_inverted_latents[f'noisy_latents_{t}'] = latent_frames.clone()
265
 
266
  return latent_frames
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
267
 
268
  @torch.no_grad()
269
  def ddim_sample(self, x, cond, batch_size):
@@ -295,6 +451,8 @@ class Preprocess(nn.Module):
295
  pred_x0 = (x_batch - sigma * eps) / mu
296
  x[b:b + batch_size] = mu_prev * pred_x0 + sigma_prev * eps
297
  return x
 
 
298
 
299
  @torch.no_grad()
300
  def extract_latents(self,
@@ -303,31 +461,89 @@ class Preprocess(nn.Module):
303
  batch_size,
304
  timesteps_to_save,
305
  inversion_prompt='',
306
- reconstruct=False):
 
 
 
 
307
  self.scheduler.set_timesteps(num_steps)
308
  cond = self.get_text_embeds(inversion_prompt, "")[1].unsqueeze(0)
309
  latent_frames = self.latents
310
- print("latent_frames", latent_frames.shape)
311
-
312
- inverted_x= self.ddim_inversion(cond,
313
- latent_frames,
314
- save_path,
315
- batch_size=batch_size,
316
- save_latents=True if save_path else False,
317
- timesteps_to_save=timesteps_to_save)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
318
 
 
 
319
 
320
-
321
- # print("total_inverted_latents", len(total_inverted_latents.keys()))
322
-
323
- if reconstruct:
324
- latent_reconstruction = self.ddim_sample(inverted_x, cond, batch_size=batch_size)
 
 
 
 
 
 
325
 
326
- rgb_reconstruction = self.decode_latents(latent_reconstruction)
327
- return self.frames, self.latents, self.total_inverted_latents, rgb_reconstruction
 
328
 
329
- return self.frames, self.latents, self.total_inverted_latents, None
 
 
330
 
 
 
 
 
 
 
 
 
 
 
 
 
 
331
 
332
  def prep(opt):
333
  # timesteps to save
@@ -348,11 +564,14 @@ def prep(opt):
348
  seed_everything(opt["seed"])
349
  if not opt["frames"]: # original non demo setting
350
  save_path = os.path.join(opt["save_dir"],
 
351
  f'sd_{opt["sd_version"]}',
352
  Path(opt["data_path"]).stem,
353
  f'steps_{opt["steps"]}',
354
  f'nframes_{opt["n_frames"]}')
355
  os.makedirs(os.path.join(save_path, f'latents'), exist_ok=True)
 
 
356
  add_dict_to_yaml_file(os.path.join(opt["save_dir"], 'inversion_prompts.yaml'), Path(opt["data_path"]).stem, opt["inversion_prompt"])
357
  # save inversion prompt in a txt file
358
  with open(os.path.join(save_path, 'inversion_prompt.txt'), 'w') as f:
@@ -360,16 +579,31 @@ def prep(opt):
360
  else:
361
  save_path = None
362
 
363
- model = Preprocess(device, opt)
 
 
 
 
 
 
364
 
365
- frames, latents, total_inverted_latents, rgb_reconstruction = model.extract_latents(
366
  num_steps=model.config["steps"],
367
  save_path=save_path,
368
  batch_size=model.config["batch_size"],
369
  timesteps_to_save=timesteps_to_save,
370
  inversion_prompt=model.config["inversion_prompt"],
 
 
 
371
  )
372
 
373
-
374
- return frames, latents, total_inverted_latents, rgb_reconstruction
 
 
 
 
 
 
375
 
 
1
  from transformers import CLIPTextModel, CLIPTokenizer, logging
2
  from diffusers import AutoencoderKL, UNet2DConditionModel, DDIMScheduler
3
+ from diffusers.utils.torch_utils import randn_tensor
4
  # suppress partial model loading warning
5
  logging.set_verbosity_error()
6
 
 
13
  from pathlib import Path
14
  from utils import *
15
  import torchvision.transforms as T
16
+ import cv2
17
+ import numpy as np
18
 
19
 
20
  def get_timesteps(scheduler, num_inference_steps, strength, device):
 
67
  self.text_encoder = text_encoder
68
  self.unet = unet
69
  self.scheduler=scheduler
70
+
71
  self.total_inverted_latents = {}
72
+ self.noise_total = None # will contain all zs if inversion == 'ddpm', var name chosen to match the save path of zs used in pr https://github.com/omerbt/TokenFlow/pull/24/files#
73
 
74
  self.paths, self.frames, self.latents = self.get_data(self.config["data_path"], self.config["n_frames"])
75
  print("self.frames", self.frames.shape)
 
168
  )[0]
169
  return noise_pred
170
 
171
+ @torch.no_grad()
172
+ def encode_text(self, prompts, device=None):
173
+ if device is None:
174
+ device = self.device
175
+ text_inputs = self.tokenizer(
176
+ prompts,
177
+ padding="max_length",
178
+ max_length=self.tokenizer.model_max_length,
179
+ return_tensors="pt",
180
+ )
181
+ text_input_ids = text_inputs.input_ids
182
+
183
+ if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
184
+ removed_text = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length:])
185
+ print(
186
+ "The following part of your input was truncated because CLIP can only handle sequences up to"
187
+ f" {self.tokenizer.model_max_length} tokens: {removed_text}"
188
+ )
189
+ text_input_ids = text_input_ids[:, : self.tokenizer.model_max_length]
190
+ text_embeddings = self.text_encoder(text_input_ids.to(device))[0]
191
+
192
+ return text_embeddings
193
+
194
  @torch.no_grad()
195
  def get_text_embeds(self, prompt, negative_prompt, device="cuda"):
196
+ text_embeddings = self.encode_text(prompt, device=device)
197
+ uncond_embeddings = self.encode_text(negative_prompt, device=device)
198
+
 
 
 
199
  text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
200
  return text_embeddings
201
 
 
217
  for i in range(0, len(imgs), batch_size):
218
  posterior = self.vae.encode(imgs[i:i + batch_size]).latent_dist
219
  latent = posterior.mean if deterministic else posterior.sample()
220
+ latents.append(latent * self.vae.config.scaling_factor)
221
  latents = torch.cat(latents)
222
  return latents
223
 
 
289
  self.total_inverted_latents[f'noisy_latents_{t}'] = latent_frames.clone()
290
 
291
  return latent_frames
292
+
293
+ @torch.no_grad()
294
+ def ddpm_inversion(self, cond,
295
+ latent_frames,
296
+ batch_size,
297
+ num_inversion_steps,
298
+ save_path=None,
299
+ save_latents=True,
300
+ eta: float = 1.0,
301
+ skip_steps=20):
302
+ timesteps = self.scheduler.timesteps
303
+ return_inverted_latents = self.config["frames"] is not None
304
+
305
+ variance_noise_shape = (
306
+ num_inversion_steps,
307
+ *latent_frames.shape)
308
+ x0 = latent_frames
309
+
310
+ t_to_idx = {int(v): k for k, v in enumerate(timesteps)}
311
+ xts = torch.zeros(size=variance_noise_shape, device=self.device, dtype=cond.dtype)
312
+
313
+ for t in reversed(timesteps):
314
+ idx = t_to_idx[int(t)]
315
+ for b in range(0, x0.shape[0], batch_size):
316
+ x_batch = x0[b:b + batch_size]
317
+
318
+ noise = randn_tensor(shape=x_batch.shape, device=self.device, dtype=x0.dtype)
319
+ xts[idx, b:b + batch_size] = self.scheduler.add_noise(x_batch, noise, t)
320
+
321
+ xts = torch.cat([xts, x0.unsqueeze(0)], dim=0)
322
+
323
+ zs = torch.zeros(size=variance_noise_shape, device=self.device, dtype=cond.dtype)
324
+
325
+ for t in tqdm(timesteps):
326
+ idx = t_to_idx[int(t)]
327
+ # 1. predict noise residual
328
+ for b in range(0, x0.shape[0], batch_size):
329
+ xt = xts[idx, b:b + batch_size]
330
+
331
+ cond_batch = cond.repeat(xt.shape[0], 1, 1)
332
+ noise_pred = self.unet(xt, timestep=t, encoder_hidden_states=cond_batch).sample
333
+
334
+ xtm1 = xts[idx + 1, b:b + batch_size]
335
+ z, xtm1_corrected = compute_noise(self.scheduler, xtm1, xt, t, noise_pred, eta)
336
+ zs[idx, b:b + batch_size] = z
337
+
338
+ # correction to avoid error accumulation
339
+ xts[idx + 1, b:b + batch_size] = xtm1_corrected
340
+
341
+ if save_latents:
342
+ torch.save(xts[idx], os.path.join(save_path, 'latents', f'noisy_latents_{t}.pt'))
343
+
344
+ if return_inverted_latents:
345
+ self.total_inverted_latents[f'noisy_latents_{t}'] = xts[idx].clone()
346
+
347
+ if save_path:
348
+ torch.save(xts[idx], os.path.join(save_path, 'latents', f'noisy_latents_{t}.pt'))
349
+ torch.save(zs, os.path.join(save_path, 'latents', f'noise_total.pt'))
350
+
351
+ if return_inverted_latents:
352
+ self.total_inverted_latents[f'noisy_latents_{t}'] = xts[idx].clone()
353
+ self.noise_total = zs.clone()
354
+
355
+ return xts[skip_steps].expand(latent_frames.shape[0], -1, -1, -1), zs
356
+
357
+ def prepare_extra_step_kwargs(self, eta):
358
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
359
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
360
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
361
+ # and should be between [0, 1]
362
+
363
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
364
+ extra_step_kwargs = {}
365
+ if accepts_eta:
366
+ extra_step_kwargs["eta"] = eta
367
+
368
+ # check if the scheduler accepts generator
369
+ accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
370
+ return extra_step_kwargs
371
+
372
+ @torch.no_grad()
373
+ def ddpm_sample(self, init_latents, cond, batch_size, num_inversion_steps, skip_steps, eta, zs_all,
374
+ guidance_scale=0):
375
+ use_ddpm = True
376
+ do_classifier_free_guidance = guidance_scale > 1.0
377
+
378
+ total_latents = init_latents
379
+ self.scheduler.set_timesteps(num_inversion_steps, device=device)
380
+ timesteps = self.scheduler.timesteps
381
+ zs_total = zs_all[skip_steps:]
382
+
383
+ if use_ddpm:
384
+ t_to_idx = {int(v): k for k, v in enumerate(timesteps[-zs_total.shape[0]:])}
385
+ timesteps = timesteps[-zs_total.shape[0]:]
386
+
387
+ num_warmup_steps = len(timesteps) - num_inversion_steps * self.scheduler.order
388
+ extra_step_kwargs = self.prepare_extra_step_kwargs(eta)
389
+
390
+ for i, t in enumerate(tqdm(timesteps)):
391
+ for b in range(0, total_latents.shape[0], batch_size):
392
+ latents = total_latents[b:b + batch_size]
393
+ if do_classifier_free_guidance:
394
+ latent_model_input = torch.cat([latents] * 2)
395
+ else:
396
+ latent_model_input = latents
397
+ cond_batch = cond.repeat(latents.shape[0], 1, 1)
398
+
399
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
400
+
401
+ noise_pred = self.unet(
402
+ latent_model_input,
403
+ t,
404
+ encoder_hidden_states=cond_batch,
405
+ return_dict=False,
406
+ )[0]
407
+
408
+ if do_classifier_free_guidance:
409
+ noise_pred_out = noise_pred.chunk(2) # [b,4, 64, 64]
410
+ noise_pred_uncond, noise_pred_text = noise_pred_out[0], noise_pred_out[1]
411
+
412
+ # default text guidance
413
+ noise_guidance = guidance_scale * (noise_pred_text - noise_pred_uncond)
414
+
415
+ noise_pred = noise_pred_uncond + noise_guidance
416
+
417
+ idx = t_to_idx[int(t)]
418
+ zs = zs_total[idx, b:b + batch_size]
419
+ latents = self.scheduler.step(noise_pred, t, latents, variance_noise=zs,
420
+ **extra_step_kwargs).prev_sample
421
+ total_latents[b:b + batch_size] = latents
422
+ return total_latents
423
 
424
  @torch.no_grad()
425
  def ddim_sample(self, x, cond, batch_size):
 
451
  pred_x0 = (x_batch - sigma * eps) / mu
452
  x[b:b + batch_size] = mu_prev * pred_x0 + sigma_prev * eps
453
  return x
454
+
455
+
456
 
457
  @torch.no_grad()
458
  def extract_latents(self,
 
461
  batch_size,
462
  timesteps_to_save,
463
  inversion_prompt='',
464
+ skip_steps=20,
465
+ inversion_type='ddim',
466
+ eta=1.0,
467
+ reconstruction=False):
468
+
469
  self.scheduler.set_timesteps(num_steps)
470
  cond = self.get_text_embeds(inversion_prompt, "")[1].unsqueeze(0)
471
  latent_frames = self.latents
472
+
473
+ if inversion_type == 'ddim':
474
+ inverted_x= self.ddim_inversion(cond,
475
+ latent_frames,
476
+ save_path,
477
+ batch_size=batch_size,
478
+ save_latents=True if save_path else False,
479
+ timesteps_to_save=timesteps_to_save)
480
+
481
+ if reconstruction:
482
+ latent_reconstruction = self.ddim_sample(inverted_x, cond, batch_size=batch_size)
483
+
484
+ rgb_reconstruction = self.decode_latents(latent_reconstruction)
485
+ return (self.frames, self.latents, self.total_inverted_latents), rgb_reconstruction
486
+
487
+ else:
488
+ return (self.frames, self.latents, self.total_inverted_latents), None
489
+
490
+ elif inversion_type == 'ddpm':
491
+ inverted_x, zs = self.ddpm_inversion(cond,
492
+ latent_frames,
493
+ save_path= save_path,
494
+ batch_size=batch_size,
495
+ save_latents=True if save_path else False,
496
+ num_inversion_steps=num_steps,
497
+ eta=eta,
498
+ skip_steps=skip_steps)
499
+
500
+ cond = self.encode_text(inversion_prompt)
501
+ if reconstruction:
502
+ latent_reconstruction = self.ddpm_sample(init_latents=inverted_x,
503
+ cond=cond, batch_size=batch_size,
504
+ num_inversion_steps=num_steps, skip_steps=skip_steps,
505
+ eta=eta, zs_all=zs)
506
+ rgb_reconstruction = self.decode_latents(latent_reconstruction)
507
+ return (self.frames, self.latents, self.total_inverted_latents, self.noise_total), rgb_reconstruction
508
+ else:
509
+ return (self.frames, self.latents, self.total_inverted_latents, self.noise_total), None
510
 
511
+ else:
512
+ raise NotImplementedError()
513
 
514
+ def compute_noise(scheduler, prev_latents, latents, timestep, noise_pred, eta):
515
+ # 1. get previous step value (=t-1)
516
+ prev_timestep = timestep - scheduler.config.num_train_timesteps // scheduler.num_inference_steps
517
+
518
+ # 2. compute alphas, betas
519
+ alpha_prod_t = scheduler.alphas_cumprod[timestep]
520
+ alpha_prod_t_prev = (
521
+ scheduler.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else scheduler.final_alpha_cumprod
522
+ )
523
+
524
+ beta_prod_t = 1 - alpha_prod_t
525
 
526
+ # 3. compute predicted original sample from predicted noise also called
527
+ # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
528
+ pred_original_sample = (latents - beta_prod_t ** (0.5) * noise_pred) / alpha_prod_t ** (0.5)
529
 
530
+ # 4. Clip "predicted x_0"
531
+ if scheduler.config.clip_sample:
532
+ pred_original_sample = torch.clamp(pred_original_sample, -1, 1)
533
 
534
+ # 5. compute variance: "sigma_t(η)" -> see formula (16)
535
+ # σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
536
+ variance = scheduler._get_variance(timestep, prev_timestep)
537
+ std_dev_t = eta * variance ** (0.5)
538
+
539
+ # 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
540
+ pred_sample_direction = (1 - alpha_prod_t_prev - std_dev_t ** 2) ** (0.5) * noise_pred
541
+
542
+ # modifed so that updated xtm1 is returned as well (to avoid error accumulation)
543
+ mu_xt = alpha_prod_t_prev ** (0.5) * pred_original_sample + pred_sample_direction
544
+ noise = (prev_latents - mu_xt) / (variance ** (0.5) * eta)
545
+
546
+ return noise, mu_xt + (eta * variance ** 0.5) * noise
547
 
548
  def prep(opt):
549
  # timesteps to save
 
564
  seed_everything(opt["seed"])
565
  if not opt["frames"]: # original non demo setting
566
  save_path = os.path.join(opt["save_dir"],
567
+ f'inversion_{opt[inversion]}',
568
  f'sd_{opt["sd_version"]}',
569
  Path(opt["data_path"]).stem,
570
  f'steps_{opt["steps"]}',
571
  f'nframes_{opt["n_frames"]}')
572
  os.makedirs(os.path.join(save_path, f'latents'), exist_ok=True)
573
+ if opt[inversion] == 'ddpm':
574
+ os.makedirs(os.path.join(save_path, f'latents'), exist_ok=True)
575
  add_dict_to_yaml_file(os.path.join(opt["save_dir"], 'inversion_prompts.yaml'), Path(opt["data_path"]).stem, opt["inversion_prompt"])
576
  # save inversion prompt in a txt file
577
  with open(os.path.join(save_path, 'inversion_prompt.txt'), 'w') as f:
 
579
  else:
580
  save_path = None
581
 
582
+ model = Preprocess(device,
583
+ config,
584
+ vae=vae,
585
+ text_encoder=text_encoder,
586
+ scheduler=scheduler,
587
+ tokenizer=tokenizer,
588
+ unet=unet)
589
 
590
+ frames_and_latents, rgb_reconstruction = model.extract_latents(
591
  num_steps=model.config["steps"],
592
  save_path=save_path,
593
  batch_size=model.config["batch_size"],
594
  timesteps_to_save=timesteps_to_save,
595
  inversion_prompt=model.config["inversion_prompt"],
596
+ inversion_type=model.config[inversion],
597
+ skip_steps=model.config[skip_steps],
598
+ reconstruction=model.config[reconstruct]
599
  )
600
 
601
+ if model.config[inversion] == 'ddpm':
602
+ frames, latents, total_inverted_latents, zs = frames_and_latents
603
+ return frames, latents, total_inverted_latents, zs, rgb_reconstruction
604
+ else:
605
+ frames, latents, total_inverted_latents = frames_and_latents
606
+ return frames, latents, total_inverted_latents, rgb_reconstruction
607
+
608
+
609
 
tokenflow_pnp.py CHANGED
@@ -9,6 +9,7 @@ import torchvision.transforms as T
9
  import argparse
10
  from PIL import Image
11
  import yaml
 
12
  from tqdm import tqdm
13
  from transformers import logging
14
  from diffusers import DDIMScheduler, StableDiffusionPipeline
@@ -25,9 +26,9 @@ VAE_BATCH_SIZE = 10
25
  class TokenFlow(nn.Module):
26
  def __init__(self, config,
27
  pipe,
28
- frames=None,
29
- # latents = None,
30
- inverted_latents = None):
31
  super().__init__()
32
  self.config = config
33
  self.device = config["device"]
@@ -61,7 +62,16 @@ class TokenFlow(nn.Module):
61
  print('SD model loaded')
62
 
63
  # data
64
- self.frames, self.inverted_latents = frames, inverted_latents
 
 
 
 
 
 
 
 
 
65
  self.latents_path = self.get_latents_path()
66
 
67
  # load frames
@@ -120,15 +130,13 @@ class TokenFlow(nn.Module):
120
 
121
  def get_latents_path(self):
122
  read_from_files = self.frames is None
123
- # read_from_files = True
124
  if read_from_files:
125
  latents_path = os.path.join(self.config["latents_path"], f'sd_{self.config["sd_version"]}',
126
  Path(self.config["data_path"]).stem, f'steps_{self.config["n_inversion_steps"]}')
127
  latents_path = [x for x in glob.glob(f'{latents_path}/*') if '.' not in Path(x).name]
128
  n_frames = [int([x for x in latents_path[i].split('/') if 'nframes' in x][0].split('_')[1]) for i in range(len(latents_path))]
129
- print("n_frames", n_frames)
130
  latents_path = latents_path[np.argmax(n_frames)]
131
- print("latents_path", latents_path)
132
  self.config["n_frames"] = min(max(n_frames), self.config["n_frames"])
133
 
134
  else:
@@ -138,9 +146,8 @@ class TokenFlow(nn.Module):
138
  if self.config["n_frames"] % self.config["batch_size"] != 0:
139
  # make n_frames divisible by batch_size
140
  self.config["n_frames"] = self.config["n_frames"] - (self.config["n_frames"] % self.config["batch_size"])
141
- print("Number of frames: ", self.config["n_frames"])
142
  if read_from_files:
143
- print("YOOOOOOO", os.path.join(latents_path, 'latents'))
144
  return os.path.join(latents_path, 'latents')
145
  else:
146
  return None
@@ -206,37 +213,61 @@ class TokenFlow(nn.Module):
206
  # encode to latents
207
  latents = self.encode_imgs(frames, deterministic=True).to(torch.float16).to(self.device)
208
  # get noise
209
- eps = self.get_ddim_eps(latents, range(self.config["n_frames"])).to(torch.float16).to(self.device)
 
 
 
 
 
 
210
  if not read_from_files:
211
  return None, frames, latents, eps
212
  return paths, frames, latents, eps
213
 
214
  def get_ddim_eps(self, latent, indices):
215
  read_from_files = self.inverted_latents is None
216
- # read_from_files = True
217
  if read_from_files:
218
  noisest = max([int(x.split('_')[-1].split('.')[0]) for x in glob.glob(os.path.join(self.latents_path, f'noisy_latents_*.pt'))])
219
- print("noisets:", noisest)
220
- print("indecies:", indices)
221
  latents_path = os.path.join(self.latents_path, f'noisy_latents_{noisest}.pt')
222
  noisy_latent = torch.load(latents_path)[indices].to(self.device)
223
-
224
- # path = os.path.join('test_latents', f'noisy_latents_{noisest}.pt')
225
- # f_noisy_latent = torch.load(path)[indices].to(self.device)
226
- # print(f_noisy_latent==noisy_latent)
227
  else:
228
  noisest = max([int(key.split("_")[-1]) for key in self.inverted_latents.keys()])
229
- print("noisets:", noisest)
230
- print("indecies:", indices)
231
  noisy_latent = self.inverted_latents[f'noisy_latents_{noisest}'][indices]
232
 
233
  alpha_prod_T = self.scheduler.alphas_cumprod[noisest]
234
  mu_T, sigma_T = alpha_prod_T ** 0.5, (1 - alpha_prod_T) ** 0.5
235
  eps = (noisy_latent - mu_T * latent) / sigma_T
236
  return eps
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
237
 
238
  @torch.no_grad()
239
- def denoise_step(self, x, t, indices):
240
  # register the time step and features in pnp injection modules
241
  read_files = self.inverted_latents is None
242
 
@@ -264,21 +295,31 @@ class TokenFlow(nn.Module):
264
  noise_pred = noise_pred_uncond + self.config["guidance_scale"] * (noise_pred_cond - noise_pred_uncond)
265
 
266
  # compute the denoising step with the reference model
267
- denoised_latent = self.scheduler.step(noise_pred, t, x)['prev_sample']
 
268
  return denoised_latent
269
 
270
  @torch.autocast(dtype=torch.float16, device_type='cuda')
271
- def batched_denoise_step(self, x, t, indices):
272
  batch_size = self.config["batch_size"]
273
  denoised_latents = []
274
- pivotal_idx = torch.randint(batch_size, (len(x)//batch_size,)) + torch.arange(0,len(x),batch_size)
275
-
276
  register_pivotal(self, True)
277
- self.denoise_step(x[pivotal_idx], t, indices[pivotal_idx])
 
 
 
 
278
  register_pivotal(self, False)
279
  for i, b in enumerate(range(0, len(x), batch_size)):
280
  register_batch_idx(self, i)
281
- denoised_latents.append(self.denoise_step(x[b:b + batch_size], t, indices[b:b + batch_size]))
 
 
 
 
 
282
  denoised_latents = torch.cat(denoised_latents)
283
  return denoised_latents
284
 
@@ -309,7 +350,13 @@ class TokenFlow(nn.Module):
309
 
310
  self.init_method(conv_injection_t=pnp_f_t, qk_injection_t=pnp_attn_t)
311
 
312
- noisy_latents = self.scheduler.add_noise(self.latents, self.eps, self.scheduler.timesteps[0])
 
 
 
 
 
 
313
  edited_frames = self.sample_loop(noisy_latents, torch.arange(self.config["n_frames"]))
314
 
315
  if save_files:
@@ -321,12 +368,24 @@ class TokenFlow(nn.Module):
321
  return edited_frames
322
 
323
  def sample_loop(self, x, indices):
324
- save_files = self.inverted_latents is None # if we're in the original non-demo setting
325
- # save_files = True
326
  if save_files:
327
  os.makedirs(f'{self.config["output_path"]}/img_ode', exist_ok=True)
328
- for i, t in enumerate(tqdm(self.scheduler.timesteps, desc="Sampling")):
329
- x = self.batched_denoise_step(x, t, indices)
 
 
 
 
 
 
 
 
 
 
 
 
 
330
 
331
  decoded_latents = self.decode_latents(x)
332
  if save_files:
 
9
  import argparse
10
  from PIL import Image
11
  import yaml
12
+ import inspect
13
  from tqdm import tqdm
14
  from transformers import logging
15
  from diffusers import DDIMScheduler, StableDiffusionPipeline
 
26
  class TokenFlow(nn.Module):
27
  def __init__(self, config,
28
  pipe,
29
+ frames = None,
30
+ inverted_latents = None, #X0,...,XT,
31
+ zs = None):
32
  super().__init__()
33
  self.config = config
34
  self.device = config["device"]
 
62
  print('SD model loaded')
63
 
64
  # data
65
+ self.inversion = config['inversion']
66
+ if self.inversion == 'ddpm':
67
+ self.skip_steps = config['skip_steps']
68
+ self.eta = 1.0
69
+ else:
70
+ self.eta = 0.0
71
+ self.extra_step_kwargs = self.prepare_extra_step_kwargs(self.eta)
72
+
73
+ # data
74
+ self.frames, self.inverted_latents, self.zs = frames, inverted_latents, zs
75
  self.latents_path = self.get_latents_path()
76
 
77
  # load frames
 
130
 
131
  def get_latents_path(self):
132
  read_from_files = self.frames is None
 
133
  if read_from_files:
134
  latents_path = os.path.join(self.config["latents_path"], f'sd_{self.config["sd_version"]}',
135
  Path(self.config["data_path"]).stem, f'steps_{self.config["n_inversion_steps"]}')
136
  latents_path = [x for x in glob.glob(f'{latents_path}/*') if '.' not in Path(x).name]
137
  n_frames = [int([x for x in latents_path[i].split('/') if 'nframes' in x][0].split('_')[1]) for i in range(len(latents_path))]
 
138
  latents_path = latents_path[np.argmax(n_frames)]
139
+
140
  self.config["n_frames"] = min(max(n_frames), self.config["n_frames"])
141
 
142
  else:
 
146
  if self.config["n_frames"] % self.config["batch_size"] != 0:
147
  # make n_frames divisible by batch_size
148
  self.config["n_frames"] = self.config["n_frames"] - (self.config["n_frames"] % self.config["batch_size"])
149
+
150
  if read_from_files:
 
151
  return os.path.join(latents_path, 'latents')
152
  else:
153
  return None
 
213
  # encode to latents
214
  latents = self.encode_imgs(frames, deterministic=True).to(torch.float16).to(self.device)
215
  # get noise
216
+ if self.inversion == 'ddim':
217
+ eps = self.get_ddim_eps(latents, range(self.config["n_frames"])).to(torch.float16).to(self.device)
218
+ elif self.inversion == 'ddpm':
219
+ eps = self.get_ddpm_noise()
220
+ else:
221
+ raise NotImplementedError()
222
+
223
  if not read_from_files:
224
  return None, frames, latents, eps
225
  return paths, frames, latents, eps
226
 
227
  def get_ddim_eps(self, latent, indices):
228
  read_from_files = self.inverted_latents is None
 
229
  if read_from_files:
230
  noisest = max([int(x.split('_')[-1].split('.')[0]) for x in glob.glob(os.path.join(self.latents_path, f'noisy_latents_*.pt'))])
 
 
231
  latents_path = os.path.join(self.latents_path, f'noisy_latents_{noisest}.pt')
232
  noisy_latent = torch.load(latents_path)[indices].to(self.device)
 
 
 
 
233
  else:
234
  noisest = max([int(key.split("_")[-1]) for key in self.inverted_latents.keys()])
 
 
235
  noisy_latent = self.inverted_latents[f'noisy_latents_{noisest}'][indices]
236
 
237
  alpha_prod_T = self.scheduler.alphas_cumprod[noisest]
238
  mu_T, sigma_T = alpha_prod_T ** 0.5, (1 - alpha_prod_T) ** 0.5
239
  eps = (noisy_latent - mu_T * latent) / sigma_T
240
  return eps
241
+
242
+ def get_ddpm_noise(self):
243
+ read_from_files = self.inverted_latents is None
244
+ idx_to_t = {int(k): int(v) for k, v in enumerate(self.scheduler.timesteps)}
245
+ t = idx_to_t[self.skip_steps]
246
+ if read_from_files:
247
+ x0_path = os.path.join(self.latents_path, f'noisy_latents_{t}.pt')
248
+ zs_path = os.path.join(self.latents_path, f'noise_total.pt')
249
+ x0 = torch.load(x0_path)[:self.config["n_frames"]].to(self.device)
250
+ zs = torch.load(zs_path)[self.skip_steps:, :self.config["n_frames"]].to(self.device)
251
+ else:
252
+ x0 = self.inverted_latents[f'noisy_latents_{t}'][:self.config["n_frames"]].to(self.device)
253
+ zs = self.zs[self.skip_steps:, :self.config["n_frames"]].to(self.device)
254
+ return x0, zs
255
+
256
+ def prepare_extra_step_kwargs(self, eta):
257
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
258
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
259
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
260
+ # and should be between [0, 1]
261
+
262
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
263
+ extra_step_kwargs = {}
264
+ if accepts_eta:
265
+ extra_step_kwargs["eta"] = eta
266
+
267
+ return extra_step_kwargs
268
 
269
  @torch.no_grad()
270
+ def denoise_step(self, x, t, indices, zs=None):
271
  # register the time step and features in pnp injection modules
272
  read_files = self.inverted_latents is None
273
 
 
295
  noise_pred = noise_pred_uncond + self.config["guidance_scale"] * (noise_pred_cond - noise_pred_uncond)
296
 
297
  # compute the denoising step with the reference model
298
+ denoised_latent = self.scheduler.step(noise_pred, t, x, variance_noise=zs, **self.extra_step_kwargs)[
299
+ 'prev_sample']
300
  return denoised_latent
301
 
302
  @torch.autocast(dtype=torch.float16, device_type='cuda')
303
+ def batched_denoise_step(self, x, t, indices, zs=None):
304
  batch_size = self.config["batch_size"]
305
  denoised_latents = []
306
+ pivotal_idx = torch.randint(batch_size, (len(x) // batch_size,)) + torch.arange(0, len(x), batch_size)
307
+
308
  register_pivotal(self, True)
309
+ if zs is None:
310
+ zs_input = None
311
+ else:
312
+ zs_input = zs[pivotal_idx]
313
+ self.denoise_step(x[pivotal_idx], t, indices[pivotal_idx], zs_input)
314
  register_pivotal(self, False)
315
  for i, b in enumerate(range(0, len(x), batch_size)):
316
  register_batch_idx(self, i)
317
+ if zs is None:
318
+ zs_input = None
319
+ else:
320
+ zs_input = zs[b:b + batch_size]
321
+ denoised_latents.append(self.denoise_step(x[b:b + batch_size], t, indices[b:b + batch_size]
322
+ , zs_input))
323
  denoised_latents = torch.cat(denoised_latents)
324
  return denoised_latents
325
 
 
350
 
351
  self.init_method(conv_injection_t=pnp_f_t, qk_injection_t=pnp_attn_t)
352
 
353
+ if self.inversion == 'ddim':
354
+ noisy_latents = self.scheduler.add_noise(self.latents, self.eps, self.scheduler.timesteps[0])
355
+ elif self.inversion == 'ddpm':
356
+ noisy_latents = self.eps[0]
357
+ else:
358
+ raise NotImplementedError()
359
+
360
  edited_frames = self.sample_loop(noisy_latents, torch.arange(self.config["n_frames"]))
361
 
362
  if save_files:
 
368
  return edited_frames
369
 
370
  def sample_loop(self, x, indices):
371
+ save_files = self.inverted_latents is None # if we're in the original non-demo settinge
 
372
  if save_files:
373
  os.makedirs(f'{self.config["output_path"]}/img_ode', exist_ok=True)
374
+
375
+ timesteps = self.scheduler.timesteps
376
+ if self.inversion == 'ddpm':
377
+ zs_total = self.eps[1]
378
+
379
+ t_to_idx = {int(v): k for k, v in enumerate(timesteps[-zs_total.shape[0]:])}
380
+ timesteps = timesteps[-zs_total.shape[0]:]
381
+
382
+ for i, t in enumerate(tqdm(timesteps, desc="Sampling")):
383
+ if self.inversion == 'ddpm':
384
+ idx = t_to_idx[int(t)]
385
+ zs = zs_total[idx]
386
+ else:
387
+ zs = None
388
+ x = self.batched_denoise_step(x, t, indices, zs)
389
 
390
  decoded_latents = self.decode_latents(x)
391
  if save_files: