Linoy Tsaban commited on
Commit
e24d40d
1 Parent(s): 9ad499d

add ddpm inversion

Browse files
Files changed (1) hide show
  1. app.py +68 -37
app.py CHANGED
@@ -61,20 +61,22 @@ def prep(config):
61
  model_key = "stabilityai/stable-diffusion-2-depth"
62
  toy_scheduler = DDIMScheduler.from_pretrained(model_key, subfolder="scheduler")
63
  toy_scheduler.set_timesteps(config["save_steps"])
64
- print("config[save_steps]", config["save_steps"])
65
  timesteps_to_save, num_inference_steps = get_timesteps(toy_scheduler, num_inference_steps=config["save_steps"],
66
  strength=1.0,
67
  device=device)
68
- print("YOOOO timesteps to save", timesteps_to_save)
69
 
70
  # seed_everything(config["seed"])
71
  if not config["frames"]: # original non demo setting
72
  save_path = os.path.join(config["save_dir"],
 
73
  f'sd_{config["sd_version"]}',
74
  Path(config["data_path"]).stem,
75
  f'steps_{config["steps"]}',
76
  f'nframes_{config["n_frames"]}')
77
  os.makedirs(os.path.join(save_path, f'latents'), exist_ok=True)
 
 
78
  add_dict_to_yaml_file(os.path.join(config["save_dir"], 'inversion_prompts.yaml'), Path(config["data_path"]).stem, config["inversion_prompt"])
79
  # save inversion prompt in a txt file
80
  with open(os.path.join(save_path, 'inversion_prompt.txt'), 'w') as f:
@@ -82,43 +84,53 @@ def prep(config):
82
  else:
83
  save_path = None
84
 
85
- model = Preprocess(device, config,
 
86
  vae=vae,
87
  text_encoder=text_encoder,
88
  scheduler=scheduler,
89
  tokenizer=tokenizer,
90
  unet=unet)
91
- print(type(model.config["batch_size"]))
92
- frames, latents, total_inverted_latents, rgb_reconstruction = model.extract_latents(
93
  num_steps=model.config["steps"],
94
  save_path=save_path,
95
  batch_size=model.config["batch_size"],
96
  timesteps_to_save=timesteps_to_save,
97
  inversion_prompt=model.config["inversion_prompt"],
 
 
 
98
  )
99
 
100
-
101
- return frames, latents, total_inverted_latents, rgb_reconstruction
 
 
 
 
 
102
 
103
  def preprocess_and_invert(input_video,
104
  frames,
105
  latents,
106
  inverted_latents,
 
107
  seed,
108
  randomize_seed,
109
  do_inversion,
110
- # save_dir: str = "latents",
111
  steps,
112
  n_timesteps = 50,
113
  batch_size: int = 8,
114
  n_frames: int = 40,
115
  inversion_prompt:str = '',
 
116
 
117
  ):
118
  sd_version = "2.1"
119
- height = 512
120
  weidth: int = 512
121
- print("n timesteps", n_timesteps)
122
  if do_inversion or randomize_seed:
123
  preprocess_config = {}
124
  preprocess_config['H'] = height
@@ -134,30 +146,37 @@ def preprocess_and_invert(input_video,
134
  preprocess_config['frames'] = video_to_frames(input_video)
135
  preprocess_config['data_path'] = input_video.split(".")[0]
136
 
 
 
 
 
 
137
 
138
  if randomize_seed:
139
  seed = randomize_seed_fn()
140
  seed_everything(seed)
141
 
142
- frames, latents, total_inverted_latents, rgb_reconstruction = prep(preprocess_config)
143
- print(total_inverted_latents.keys())
144
- print(len(total_inverted_latents.keys()))
145
- frames = gr.State(value=frames)
146
- latents = gr.State(value=latents)
147
- inverted_latents = gr.State(value=total_inverted_latents)
148
  do_inversion = False
149
 
150
- return frames, latents, inverted_latents, do_inversion
151
 
152
 
153
  def edit_with_pnp(input_video,
154
  frames,
155
  latents,
156
  inverted_latents,
 
157
  seed,
158
  randomize_seed,
159
  do_inversion,
160
  steps,
 
161
  prompt: str = "a marble sculpture of a woman running, Venus de Milo",
162
  # negative_prompt: str = "ugly, blurry, low res, unrealistic, unaesthetic",
163
  pnp_attn_t: float = 0.5,
@@ -183,14 +202,18 @@ def edit_with_pnp(input_video,
183
  config["pnp_attn_t"] = pnp_attn_t
184
  config["pnp_f_t"] = pnp_f_t
185
  config["pnp_inversion_prompt"] = inversion_prompt
 
 
 
186
 
187
 
188
  if do_inversion:
189
- frames, latents, inverted_latents, do_inversion = preprocess_and_invert(
190
  input_video,
191
  frames,
192
  latents,
193
  inverted_latents,
 
194
  seed,
195
  randomize_seed,
196
  do_inversion,
@@ -198,7 +221,8 @@ def edit_with_pnp(input_video,
198
  n_timesteps,
199
  batch_size,
200
  n_frames,
201
- inversion_prompt)
 
202
  do_inversion = False
203
 
204
 
@@ -207,12 +231,13 @@ def edit_with_pnp(input_video,
207
  seed_everything(seed)
208
 
209
 
210
- editor = TokenFlow(config=config,pipe=tokenflow_pipe, frames=frames.value, inverted_latents=inverted_latents.value)
211
  edited_frames = editor.edit_video()
212
-
213
- save_video(edited_frames, 'tokenflow_PnP_fps_30.mp4', fps=n_fps)
 
214
  # path = export_to_video(edited_frames)
215
- return 'tokenflow_PnP_fps_30.mp4', frames, latents, inverted_latents, do_inversion
216
 
217
  ########
218
  # demo #
@@ -238,6 +263,7 @@ with gr.Blocks(css="style.css") as demo:
238
  frames = gr.State()
239
  inverted_latents = gr.State()
240
  latents = gr.State()
 
241
  do_inversion = gr.State(value=True)
242
 
243
  with gr.Row():
@@ -252,15 +278,7 @@ with gr.Blocks(css="style.css") as demo:
252
  label="Describe your edited video",
253
  max_lines=1, value=""
254
  )
255
- # with gr.Group(visible=False) as share_btn_container:
256
- # with gr.Group(elem_id="share-btn-container"):
257
- # community_icon = gr.HTML(community_icon_html, visible=True)
258
- # loading_icon = gr.HTML(loading_icon_html, visible=False)
259
- # share_button = gr.Button("Share to community", elem_id="share-btn", visible=True)
260
-
261
-
262
- # with gr.Row():
263
- # inversion_progress = gr.Textbox(visible=False, label="Inversion progress")
264
 
265
  with gr.Row():
266
  run_button = gr.Button("Edit your video!", visible=True)
@@ -274,8 +292,10 @@ with gr.Blocks(css="style.css") as demo:
274
  randomize_seed = gr.Checkbox(label='Randomize seed', value=False)
275
  gudiance_scale = gr.Slider(label='Guidance Scale', minimum=1, maximum=30,
276
  value=7.5, step=0.5, interactive=True)
277
- steps = gr.Slider(label='Inversion steps', minimum=10, maximum=500,
278
- value=200, step=1, interactive=True)
 
 
279
 
280
  with gr.Column(min_width=100):
281
  inversion_prompt = gr.Textbox(lines=1, label="Inversion prompt", interactive=True, placeholder="")
@@ -284,7 +304,7 @@ with gr.Blocks(css="style.css") as demo:
284
  n_frames = gr.Slider(label='Num frames', minimum=2, maximum=200,
285
  value=24, step=1, interactive=True)
286
  n_timesteps = gr.Slider(label='Diffusion steps', minimum=25, maximum=100,
287
- value=25, step=25, interactive=True)
288
  n_fps = gr.Slider(label='Frames per second', minimum=1, maximum=60,
289
  value=10, step=1, interactive=True)
290
 
@@ -300,6 +320,11 @@ with gr.Blocks(css="style.css") as demo:
300
  fn = reset_do_inversion,
301
  outputs = [do_inversion],
302
  queue = False)
 
 
 
 
 
303
 
304
  inversion_prompt.change(
305
  fn = reset_do_inversion,
@@ -326,6 +351,7 @@ with gr.Blocks(css="style.css") as demo:
326
  frames,
327
  latents,
328
  inverted_latents,
 
329
  seed,
330
  randomize_seed,
331
  do_inversion,
@@ -333,11 +359,13 @@ with gr.Blocks(css="style.css") as demo:
333
  n_timesteps,
334
  batch_size,
335
  n_frames,
336
- inversion_prompt
 
337
  ],
338
  outputs = [frames,
339
  latents,
340
  inverted_latents,
 
341
  do_inversion
342
 
343
  ])
@@ -347,10 +375,12 @@ with gr.Blocks(css="style.css") as demo:
347
  frames,
348
  latents,
349
  inverted_latents,
 
350
  seed,
351
  randomize_seed,
352
  do_inversion,
353
  steps,
 
354
  prompt,
355
  pnp_attn_t,
356
  pnp_f_t,
@@ -360,7 +390,7 @@ with gr.Blocks(css="style.css") as demo:
360
  gudiance_scale,
361
  inversion_prompt,
362
  n_fps ],
363
- outputs = [output_video, frames, latents, inverted_latents, do_inversion]
364
  )
365
 
366
  gr.Examples(
@@ -371,4 +401,5 @@ with gr.Blocks(css="style.css") as demo:
371
  )
372
 
373
  demo.queue()
 
374
  demo.launch()
 
61
  model_key = "stabilityai/stable-diffusion-2-depth"
62
  toy_scheduler = DDIMScheduler.from_pretrained(model_key, subfolder="scheduler")
63
  toy_scheduler.set_timesteps(config["save_steps"])
 
64
  timesteps_to_save, num_inference_steps = get_timesteps(toy_scheduler, num_inference_steps=config["save_steps"],
65
  strength=1.0,
66
  device=device)
67
+
68
 
69
  # seed_everything(config["seed"])
70
  if not config["frames"]: # original non demo setting
71
  save_path = os.path.join(config["save_dir"],
72
+ f'inversion_{config[inversion]}',
73
  f'sd_{config["sd_version"]}',
74
  Path(config["data_path"]).stem,
75
  f'steps_{config["steps"]}',
76
  f'nframes_{config["n_frames"]}')
77
  os.makedirs(os.path.join(save_path, f'latents'), exist_ok=True)
78
+ if opt[inversion] == 'ddpm':
79
+ os.makedirs(os.path.join(save_path, f'latents'), exist_ok=True)
80
  add_dict_to_yaml_file(os.path.join(config["save_dir"], 'inversion_prompts.yaml'), Path(config["data_path"]).stem, config["inversion_prompt"])
81
  # save inversion prompt in a txt file
82
  with open(os.path.join(save_path, 'inversion_prompt.txt'), 'w') as f:
 
84
  else:
85
  save_path = None
86
 
87
+ model = Preprocess(device,
88
+ config,
89
  vae=vae,
90
  text_encoder=text_encoder,
91
  scheduler=scheduler,
92
  tokenizer=tokenizer,
93
  unet=unet)
94
+
95
+ frames_and_latents, rgb_reconstruction = model.extract_latents(
96
  num_steps=model.config["steps"],
97
  save_path=save_path,
98
  batch_size=model.config["batch_size"],
99
  timesteps_to_save=timesteps_to_save,
100
  inversion_prompt=model.config["inversion_prompt"],
101
+ inversion_type=model.config["inversion"],
102
+ skip_steps=model.config["skip_steps"],
103
+ reconstruction=model.config["reconstruct"]
104
  )
105
 
106
+ if model.config["inversion"] == 'ddpm':
107
+ frames, latents, total_inverted_latents, zs = frames_and_latents
108
+ return frames, latents, total_inverted_latents, zs, rgb_reconstruction
109
+ else:
110
+ frames, latents, total_inverted_latents = frames_and_latents
111
+ return frames, latents, total_inverted_latents, rgb_reconstruction
112
+
113
 
114
  def preprocess_and_invert(input_video,
115
  frames,
116
  latents,
117
  inverted_latents,
118
+ zs,
119
  seed,
120
  randomize_seed,
121
  do_inversion,
 
122
  steps,
123
  n_timesteps = 50,
124
  batch_size: int = 8,
125
  n_frames: int = 40,
126
  inversion_prompt:str = '',
127
+ skip_steps: int = 15,
128
 
129
  ):
130
  sd_version = "2.1"
131
+ height: int = 512
132
  weidth: int = 512
133
+
134
  if do_inversion or randomize_seed:
135
  preprocess_config = {}
136
  preprocess_config['H'] = height
 
146
  preprocess_config['frames'] = video_to_frames(input_video)
147
  preprocess_config['data_path'] = input_video.split(".")[0]
148
 
149
+ preprocess_config['inversion'] = 'ddpm'
150
+ preprocess_config['skip_steps'] = skip_steps
151
+ preprocess_config['reconstruct'] = False
152
+
153
+
154
 
155
  if randomize_seed:
156
  seed = randomize_seed_fn()
157
  seed_everything(seed)
158
 
159
+ frames, latents, total_inverted_latents, zs, rgb_reconstruction = prep(preprocess_config)
160
+
161
+ frames = gr.State(value = frames)
162
+ latents = gr.State(value = latents)
163
+ inverted_latents = gr.State(value = total_inverted_latents)
164
+ zs = gr.State(value = zs)
165
  do_inversion = False
166
 
167
+ return frames, latents, inverted_latents, zs, do_inversion
168
 
169
 
170
  def edit_with_pnp(input_video,
171
  frames,
172
  latents,
173
  inverted_latents,
174
+ zs,
175
  seed,
176
  randomize_seed,
177
  do_inversion,
178
  steps,
179
+ skip_steps: int = 15,
180
  prompt: str = "a marble sculpture of a woman running, Venus de Milo",
181
  # negative_prompt: str = "ugly, blurry, low res, unrealistic, unaesthetic",
182
  pnp_attn_t: float = 0.5,
 
202
  config["pnp_attn_t"] = pnp_attn_t
203
  config["pnp_f_t"] = pnp_f_t
204
  config["pnp_inversion_prompt"] = inversion_prompt
205
+ config["inversion"] = "ddpm"
206
+ config["skip_steps"] = skip_steps
207
+
208
 
209
 
210
  if do_inversion:
211
+ frames, latents, inverted_latents, zs, do_inversion = preprocess_and_invert(
212
  input_video,
213
  frames,
214
  latents,
215
  inverted_latents,
216
+ zs,
217
  seed,
218
  randomize_seed,
219
  do_inversion,
 
221
  n_timesteps,
222
  batch_size,
223
  n_frames,
224
+ inversion_prompt,
225
+ skip_steps)
226
  do_inversion = False
227
 
228
 
 
231
  seed_everything(seed)
232
 
233
 
234
+ editor = TokenFlow(config=config,pipe=tokenflow_pipe, frames=frames.value, inverted_latents=inverted_latents.value, zs= zs.value)
235
  edited_frames = editor.edit_video()
236
+
237
+ edit_video_path = f'tokenflow_PnP_fps_{n_fps}.mp4'
238
+ save_video(edited_frames, edit_video_path, fps=n_fps)
239
  # path = export_to_video(edited_frames)
240
+ return edit_video_path, frames, latents, inverted_latents, zs, do_inversion
241
 
242
  ########
243
  # demo #
 
263
  frames = gr.State()
264
  inverted_latents = gr.State()
265
  latents = gr.State()
266
+ zs = gr.State()
267
  do_inversion = gr.State(value=True)
268
 
269
  with gr.Row():
 
278
  label="Describe your edited video",
279
  max_lines=1, value=""
280
  )
281
+
 
 
 
 
 
 
 
 
282
 
283
  with gr.Row():
284
  run_button = gr.Button("Edit your video!", visible=True)
 
292
  randomize_seed = gr.Checkbox(label='Randomize seed', value=False)
293
  gudiance_scale = gr.Slider(label='Guidance Scale', minimum=1, maximum=30,
294
  value=7.5, step=0.5, interactive=True)
295
+ steps = gr.Slider(label='Inversion steps', minimum=10, maximum=200,
296
+ value=50, step=1, interactive=True)
297
+ skip_steps = gr.Slider(label='Skip Steps', minimum=5, maximum=25,
298
+ value=5, step=1, interactive=True)
299
 
300
  with gr.Column(min_width=100):
301
  inversion_prompt = gr.Textbox(lines=1, label="Inversion prompt", interactive=True, placeholder="")
 
304
  n_frames = gr.Slider(label='Num frames', minimum=2, maximum=200,
305
  value=24, step=1, interactive=True)
306
  n_timesteps = gr.Slider(label='Diffusion steps', minimum=25, maximum=100,
307
+ value=50, step=25, interactive=True)
308
  n_fps = gr.Slider(label='Frames per second', minimum=1, maximum=60,
309
  value=10, step=1, interactive=True)
310
 
 
320
  fn = reset_do_inversion,
321
  outputs = [do_inversion],
322
  queue = False)
323
+
324
+ steps.change(
325
+ fn = reset_do_inversion,
326
+ outputs = [do_inversion],
327
+ queue = False)
328
 
329
  inversion_prompt.change(
330
  fn = reset_do_inversion,
 
351
  frames,
352
  latents,
353
  inverted_latents,
354
+ zs,
355
  seed,
356
  randomize_seed,
357
  do_inversion,
 
359
  n_timesteps,
360
  batch_size,
361
  n_frames,
362
+ inversion_prompt,
363
+ skip_steps
364
  ],
365
  outputs = [frames,
366
  latents,
367
  inverted_latents,
368
+ zs,
369
  do_inversion
370
 
371
  ])
 
375
  frames,
376
  latents,
377
  inverted_latents,
378
+ zs,
379
  seed,
380
  randomize_seed,
381
  do_inversion,
382
  steps,
383
+ skip_steps,
384
  prompt,
385
  pnp_attn_t,
386
  pnp_f_t,
 
390
  gudiance_scale,
391
  inversion_prompt,
392
  n_fps ],
393
+ outputs = [output_video, frames, latents, inverted_latents, zs, do_inversion]
394
  )
395
 
396
  gr.Examples(
 
401
  )
402
 
403
  demo.queue()
404
+
405
  demo.launch()