GrayShine commited on
Commit
4f488cf
1 Parent(s): f43e5e5

Delete backend.py

Browse files
Files changed (1) hide show
  1. backend.py +0 -395
backend.py DELETED
@@ -1,395 +0,0 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # All rights reserved.
3
-
4
- # This source code is licensed under the license found in the
5
- # LICENSE file in the root directory of this source tree.
6
-
7
- """
8
- Sample new images from a pre-trained DiT.
9
- """
10
- import os
11
- import sys
12
- import math
13
- try:
14
- import utils
15
-
16
- from diffusion import create_diffusion
17
- from download import find_model
18
- except:
19
- sys.path.append(os.path.split(sys.path[0])[0])
20
- import utils
21
- from diffusion import create_diffusion
22
- from download import find_model
23
-
24
- import torch
25
- torch.backends.cuda.matmul.allow_tf32 = True
26
- torch.backends.cudnn.allow_tf32 = True
27
- import argparse
28
- import torchvision
29
-
30
- from einops import rearrange
31
- from models import get_models
32
- from torchvision.utils import save_image
33
- from diffusers.models import AutoencoderKL
34
- from models.clip import TextEmbedder
35
- from omegaconf import OmegaConf
36
- from PIL import Image
37
- import numpy as np
38
- from torchvision import transforms
39
- sys.path.append("..")
40
- from datasets import video_transforms
41
- from decord import VideoReader
42
- from utils import mask_generation_before
43
- from natsort import natsorted
44
- from diffusers.utils.import_utils import is_xformers_available
45
- from tca.tca_transform import tca_transform_model
46
-
47
-
48
- def get_input(args):
49
- input_path = args.input_path
50
- transform_video = transforms.Compose([
51
- video_transforms.ToTensorVideo(), # TCHW
52
- video_transforms.ResizeVideo((args.image_h, args.image_w)),
53
- transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5], inplace=True)
54
- ])
55
- temporal_sample_func = video_transforms.TemporalRandomCrop(args.num_frames * args.frame_interval)
56
- if input_path is not None:
57
- print(f'loading video from {input_path}')
58
- if os.path.isdir(input_path):
59
- file_list = os.listdir(input_path)
60
- video_frames = []
61
- if args.mask_type.startswith('onelast'):
62
- num = int(args.mask_type.split('onelast')[-1])
63
- # get first and last frame
64
- first_frame_path = os.path.join(input_path, natsorted(file_list)[0])
65
- last_frame_path = os.path.join(input_path, natsorted(file_list)[-1])
66
- first_frame = torch.as_tensor(np.array(Image.open(first_frame_path).convert("RGB"), dtype=np.uint8, copy=True)).unsqueeze(0)
67
- last_frame = torch.as_tensor(np.array(Image.open(last_frame_path).convert("RGB"), dtype=np.uint8, copy=True)).unsqueeze(0)
68
- for i in range(num):
69
- video_frames.append(first_frame)
70
- # add zeros to frames
71
- num_zeros = args.num_frames-2*num
72
- for i in range(num_zeros):
73
- zeros = torch.zeros_like(first_frame)
74
- video_frames.append(zeros)
75
- for i in range(num):
76
- video_frames.append(last_frame)
77
- n = 0
78
- video_frames = torch.cat(video_frames, dim=0).permute(0, 3, 1, 2) # f,c,h,w
79
- video_frames = transform_video(video_frames)
80
- elif args.mask_type.startswith('video_onelast'):
81
- num = int(args.mask_type.split('onelast')[-1])
82
- first_frame_path = os.path.join(input_path, natsorted(file_list)[0])
83
- last_frame_path = os.path.join(input_path, natsorted(file_list)[-1])
84
- video_reader_first = VideoReader(first_frame_path)
85
- video_reader_last = VideoReader(last_frame_path)
86
- total_frames_first = len(video_reader_first)
87
- total_frames_last = len(video_reader_last)
88
- start_frame_ind_f, end_frame_ind_f = temporal_sample_func(total_frames_first)
89
- start_frame_ind_l, end_frame_ind_l = temporal_sample_func(total_frames_last)
90
- frame_indice_f = np.linspace(start_frame_ind_f, end_frame_ind_f-1, args.num_frames, dtype=int)
91
- frame_indice_l = np.linspace(start_frame_ind_l, end_frame_ind_l-1, args.num_frames, dtype=int)
92
- video_frames_first = torch.from_numpy(video_reader_first.get_batch(frame_indice_f).asnumpy()).permute(0, 3, 1, 2).contiguous()
93
- video_frames_last = torch.from_numpy(video_reader_last.get_batch(frame_indice_l).asnumpy()).permute(0, 3, 1, 2).contiguous()
94
- video_frames_first = transform_video(video_frames_first) # f,c,h,w
95
- video_frames_last = transform_video(video_frames_last)
96
- num_zeros = args.num_frames-2*num
97
- video_frames.append(video_frames_first[-num:])
98
- for i in range(num_zeros):
99
- zeros = torch.zeros_like(video_frames_first[0]).unsqueeze(0)
100
- video_frames.append(zeros)
101
- video_frames.append(video_frames_last[:num])
102
- video_frames = torch.cat(video_frames, dim=0)
103
- # video_frames = transform_video(video_frames)
104
- n = num
105
- else:
106
- for file in file_list:
107
- if file.endswith('jpg') or file.endswith('png'):
108
- image = torch.as_tensor(np.array(Image.open(file), dtype=np.uint8, copy=True)).unsqueeze(0)
109
- video_frames.append(image)
110
- else:
111
- continue
112
- n = 0
113
- video_frames = torch.cat(video_frames, dim=0).permute(0, 3, 1, 2) # f,c,h,w
114
- video_frames = transform_video(video_frames)
115
- return video_frames, n
116
- elif os.path.isfile(input_path):
117
- _, full_file_name = os.path.split(input_path)
118
- file_name, extention = os.path.splitext(full_file_name)
119
- if extention == '.jpg' or extention == '.png':
120
- # raise TypeError('a single image is not supported yet!!')
121
- print("reading video from a image")
122
- video_frames = []
123
- num = int(args.mask_type.split('first')[-1])
124
- first_frame = torch.as_tensor(np.array(Image.open(input_path).convert("RGB"), dtype=np.uint8, copy=True)).unsqueeze(0)
125
- for i in range(num):
126
- video_frames.append(first_frame)
127
- num_zeros = args.num_frames - num
128
- for i in range(num_zeros):
129
- zeros = torch.zeros_like(first_frame)
130
- video_frames.append(zeros)
131
- n = 0
132
- video_frames = torch.cat(video_frames, dim=0).permute(0, 3, 1, 2) # f,c,h,w
133
- H_scale = args.image_h / video_frames.shape[2]
134
- W_scale = args.image_w / video_frames.shape[3]
135
- scale_ = H_scale
136
- if W_scale < H_scale:
137
- scale_ = W_scale
138
- video_frames = torch.nn.functional.interpolate(video_frames, scale_factor=scale_, mode="bilinear", align_corners=False)
139
- video_frames = transform_video(video_frames)
140
- return video_frames, n
141
- elif extention == '.mp4':
142
- video_reader = VideoReader(input_path)
143
- total_frames = len(video_reader)
144
- start_frame_ind, end_frame_ind = temporal_sample_func(total_frames)
145
- frame_indice = np.linspace(start_frame_ind, end_frame_ind-1, args.num_frames, dtype=int)
146
- video_frames = torch.from_numpy(video_reader.get_batch(frame_indice).asnumpy()).permute(0, 3, 1, 2).contiguous()
147
- video_frames = transform_video(video_frames)
148
- n = args.researve_frame
149
- del video_reader
150
- return video_frames, n
151
- else:
152
- raise TypeError(f'{extention} is not supported !!')
153
- else:
154
- raise ValueError('Please check your path input!!')
155
- else:
156
- # raise ValueError('Need to give a video or some images')
157
- print('given video is None, using text to video')
158
- video_frames = torch.zeros(16,3,args.latent_h,args.latent_w,dtype=torch.uint8)
159
- args.mask_type = 'all'
160
- video_frames = transform_video(video_frames)
161
- n = 0
162
- return video_frames, n
163
-
164
- def auto_inpainting(args, video_input, masked_video, mask, prompt, vae, text_encoder, diffusion, model, device,):
165
- # masked_video = rearrange(masked_video, 'b f c h w -> (b f) c h w').contiguous()
166
- # masked_video = vae.encode(masked_video).latent_dist.sample().mul_(0.18215)
167
- # masked_video = rearrange(masked_video, '(b f) c h w -> b c f h w', b=b).contiguous()
168
- # mask = torch.nn.functional.interpolate(mask[:,:,0,:], size=(latent_size, latent_size)).unsqueeze(1)
169
- b,f,c,h,w=video_input.shape
170
- latent_h = args.image_size[0] // 8
171
- latent_w = args.image_size[1] // 8
172
-
173
- # prepare inputs
174
- # video_input = rearrange(video_input, 'b f c h w -> (b f) c h w').contiguous()
175
- # video_input = vae.encode(video_input).latent_dist.sample().mul_(0.18215)
176
- # video_input = rearrange(video_input, '(b f) c h w -> b c f h w', b=b).contiguous()
177
- if args.use_fp16:
178
- z = torch.randn(1, 4, args.num_frames, args.latent_h, args.latent_w, dtype=torch.float16, device=device) # b,c,f,h,w
179
- masked_video = masked_video.to(dtype=torch.float16)
180
- mask = mask.to(dtype=torch.float16)
181
- else:
182
- z = torch.randn(1, 4, args.num_frames, args.latent_h, args.latent_w, device=device) # b,c,f,h,w
183
-
184
-
185
- masked_video = rearrange(masked_video, 'b f c h w -> (b f) c h w').contiguous()
186
- masked_video = vae.encode(masked_video).latent_dist.sample().mul_(0.18215)
187
- masked_video = rearrange(masked_video, '(b f) c h w -> b c f h w', b=b).contiguous()
188
- mask = torch.nn.functional.interpolate(mask[:,:,0,:], size=(latent_h, latent_w)).unsqueeze(1)
189
-
190
-
191
- # classifier_free_guidance
192
- if args.do_classifier_free_guidance:
193
- masked_video = torch.cat([masked_video] * 2)
194
- mask = torch.cat([mask] * 2)
195
- z = torch.cat([z] * 2)
196
- prompt_all = [prompt] + [args.negative_prompt]
197
- else:
198
- masked_video = masked_video
199
- mask = mask
200
- z = z
201
- prompt_all = [prompt]
202
-
203
- text_prompt = text_encoder(text_prompts=prompt_all, train=False)
204
- model_kwargs = dict(encoder_hidden_states=text_prompt,
205
- class_labels=None,
206
- cfg_scale=args.cfg_scale,
207
- use_fp16=args.use_fp16,) # tav unet
208
-
209
- # Sample images:
210
- if args.sample_method == 'ddim':
211
- samples = diffusion.ddim_sample_loop(
212
- model.forward_with_cfg, z.shape, z, clip_denoised=False, model_kwargs=model_kwargs, progress=True, device=device, \
213
- mask=mask, x_start=masked_video, use_concat=args.use_mask
214
- )
215
- elif args.sample_method == 'ddpm':
216
- samples = diffusion.p_sample_loop(
217
- model.forward_with_cfg, z.shape, z, clip_denoised=False, model_kwargs=model_kwargs, progress=True, device=device, \
218
- mask=mask, x_start=masked_video, use_concat=args.use_mask
219
- )
220
- samples, _ = samples.chunk(2, dim=0) # [1, 4, 16, 32, 32]
221
- if args.use_fp16:
222
- samples = samples.to(dtype=torch.float16)
223
-
224
- video_clip = samples[0].permute(1, 0, 2, 3).contiguous() # [16, 4, 32, 32]
225
- video_clip = vae.decode(video_clip / 0.18215).sample # [16, 3, 256, 256]
226
- return video_clip
227
-
228
- def auto_inpainting_temp_split(args, video_input, masked_video, mask, prompt, vae, text_encoder, diffusion, model, device,):
229
- b,f,c,h,w=video_input.shape
230
- latent_h = args.image_size[0] // 8
231
- latent_w = args.image_size[1] // 8
232
-
233
- if args.use_fp16:
234
- z = torch.randn(1, 4, args.num_frames, args.latent_h, args.latent_w, dtype=torch.float16, device=device) # b,c,f,h,w
235
- masked_video = masked_video.to(dtype=torch.float16)
236
- mask = mask.to(dtype=torch.float16)
237
- else:
238
- z = torch.randn(1, 4, args.num_frames, args.latent_h, args.latent_w, device=device) # b,c,f,h,w
239
-
240
-
241
- masked_video = rearrange(masked_video, 'b f c h w -> (b f) c h w').contiguous()
242
- masked_video = vae.encode(masked_video).latent_dist.sample().mul_(0.18215)
243
- masked_video = rearrange(masked_video, '(b f) c h w -> b c f h w', b=b).contiguous()
244
- mask = torch.nn.functional.interpolate(mask[:,:,0,:], size=(latent_h, latent_w)).unsqueeze(1)
245
-
246
- if args.do_classifier_free_guidance:
247
- masked_video = torch.cat([masked_video] * 3)
248
- mask = torch.cat([mask] * 3)
249
- z = torch.cat([z] * 3)
250
- prompt_all = [prompt] + [prompt] + [args.negative_prompt]
251
- prompt_temp = [prompt] + [""] + [""]
252
- else:
253
- masked_video = masked_video
254
- mask = mask
255
- z = z
256
- prompt_all = [prompt]
257
-
258
- text_prompt = text_encoder(text_prompts=prompt_all, train=False)
259
- temporal_text_prompt = text_encoder(text_prompts=prompt_temp, train=False)
260
- model_kwargs = dict(encoder_hidden_states=text_prompt,
261
- class_labels=None,
262
- cfg_scale=args.cfg_scale,
263
- use_fp16=args.use_fp16,
264
- encoder_temporal_hidden_states=temporal_text_prompt) # tav unet
265
-
266
- # Sample images:
267
- if args.sample_method == 'ddim':
268
- samples = diffusion.ddim_sample_loop(
269
- model.forward_with_cfg_temp_split, z.shape, z, clip_denoised=False, model_kwargs=model_kwargs, progress=True, device=device, \
270
- mask=mask, x_start=masked_video, use_concat=args.use_mask
271
- )
272
- elif args.sample_method == 'ddpm':
273
- samples = diffusion.p_sample_loop(
274
- model.forward_with_cfg_temp_split, z.shape, z, clip_denoised=False, model_kwargs=model_kwargs, progress=True, device=device, \
275
- mask=mask, x_start=masked_video, use_concat=args.use_mask
276
- )
277
- samples, _ = samples.chunk(2, dim=0) # [1, 4, 16, 32, 32]
278
- if args.use_fp16:
279
- samples = samples.to(dtype=torch.float16)
280
-
281
- video_clip = samples[0].permute(1, 0, 2, 3).contiguous() # [16, 4, 32, 32]
282
- video_clip = vae.decode(video_clip / 0.18215).sample # [16, 3, 256, 256]
283
- return video_clip
284
-
285
- def main(args):
286
- # torch.cuda.empty_cache()
287
- print("--------------------------begin running--------------------------", flush=True)
288
- if args.gpu is not None:
289
- os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpu)
290
- # Setup PyTorch:
291
- if args.seed:
292
- torch.manual_seed(args.seed)
293
- torch.set_grad_enabled(False)
294
- device = "cuda" if torch.cuda.is_available() else "cpu"
295
- # device = "cpu"
296
-
297
- if args.ckpt is None:
298
- assert args.model == "DiT-XL/2", "Only DiT-XL/2 models are available for auto-download."
299
- assert args.image_size in [256, 512]
300
- assert args.num_classes == 1000
301
-
302
- # Load model:
303
- latent_h = args.image_size[0] // 8
304
- latent_w = args.image_size[1] // 8
305
- args.image_h = args.image_size[0]
306
- args.image_w = args.image_size[1]
307
- args.latent_h = latent_h
308
- args.latent_w = latent_w
309
- print('loading model')
310
- model = get_models(args.use_mask, args).to(device)
311
- model = tca_transform_model(model).to(device)
312
- # model = temp_scale_set(model, 0.98)
313
-
314
- if args.use_compile:
315
- model = torch.compile(model)
316
- if args.enable_xformers_memory_efficient_attention:
317
- if is_xformers_available():
318
- model.enable_xformers_memory_efficient_attention()
319
- else:
320
- raise ValueError("xformers is not available. Make sure it is installed correctly")
321
-
322
- # Auto-download a pre-trained model or load a custom DiT checkpoint from train.py:
323
- ckpt_path = args.ckpt or f"DiT-XL-2-{args.image_size}x{args.image_size}.pt"
324
- state_dict = find_model(ckpt_path)
325
- model.load_state_dict(state_dict)
326
- print('loading succeed')
327
-
328
- model.eval() # important!
329
- pretrained_model_path = args.pretrained_model_path
330
- diffusion = create_diffusion(str(args.num_sampling_steps))
331
- vae = AutoencoderKL.from_pretrained(pretrained_model_path, subfolder="vae").to(device)
332
- text_encoder = TextEmbedder(tokenizer_path=pretrained_model_path + "tokenizer",
333
- encoder_path=pretrained_model_path + "text_encoder").to(device)
334
- if args.use_fp16:
335
- print('Warnning: using half percision for inferencing!')
336
- vae.to(dtype=torch.float16)
337
- model.to(dtype=torch.float16)
338
- text_encoder.to(dtype=torch.float16)
339
-
340
- # Labels to condition the model with (feel free to change):
341
- prompts = args.text_prompt
342
- class_name = [p + args.additional_prompt for p in prompts]
343
-
344
- if args.use_autoregressive:
345
- if not os.path.exists(os.path.join(args.save_img_path)):
346
- os.makedirs(os.path.join(args.save_img_path))
347
- video_input, researve_frames = get_input(args) # f,c,h,w
348
- video_input = video_input.to(device).unsqueeze(0) # b,f,c,h,w
349
- mask = mask_generation_before(args.mask_type, video_input.shape, video_input.dtype, device) # b,f,c,h,w
350
- # TODO: change the first3 to last3
351
- if args.mask_type.startswith('first') and researve_frames != 0:
352
- masked_video = torch.cat([video_input[:,-researve_frames:], video_input[:,:-researve_frames]], dim=1) * (mask == 0)
353
- else:
354
- masked_video = video_input * (mask == 0)
355
-
356
- all_video = []
357
- if researve_frames != 0:
358
- all_video.append(video_input)
359
- for idx, prompt in enumerate(class_name):
360
- if idx == 0:
361
- video_clip = auto_inpainting(args, video_input, masked_video, mask, prompt, vae, text_encoder, diffusion, model, device,)
362
- video_clip_ = video_clip.unsqueeze(0)
363
- all_video.append(video_clip_[:, researve_frames:])
364
- else:
365
- researve_frames = args.researve_frame
366
- if args.mask_type.startswith('first') and researve_frames != 0:
367
- masked_video = torch.cat([video_clip_[:,-researve_frames:], video_clip_[:,:-researve_frames]], dim=1) * (mask == 0)
368
- else:
369
- masked_video = video_input * (mask == 0)
370
- video_clip = auto_inpainting(args, video_clip.unsqueeze(0), masked_video, mask, prompt, vae, text_encoder, diffusion, model, device,)
371
- video_clip_ = video_clip.unsqueeze(0)
372
- all_video.append(video_clip_[:, researve_frames:])
373
- video_ = ((video_clip * 0.5 + 0.5) * 255).add_(0.5).clamp_(0, 255).to(dtype=torch.uint8).cpu().permute(0, 2, 3, 1)
374
- if args.mask_type.startswith('video_onelast'):
375
- torchvision.io.write_video(os.path.join(args.save_img_path, 'clip_video_' + '%04d' % idx + '.mp4'), video_[researve_frames:-researve_frames], fps=8)
376
- else:
377
- torchvision.io.write_video(os.path.join(args.save_img_path, 'clip_video_' + '%04d' % idx + '.mp4'), video_, fps=8)
378
- if args.mask_type.startswith('first') and researve_frames != 0:
379
- all_video = torch.cat(all_video, dim=1).squeeze(0)
380
- video_ = ((all_video * 0.5 + 0.5) * 255).add_(0.5).clamp_(0, 255).to(dtype=torch.uint8).cpu().permute(0, 2, 3, 1)
381
- torchvision.io.write_video(os.path.join(args.save_img_path, 'complete_video' + '.mp4'), video_, fps=8)
382
- else:
383
- # all_video = torch.cat(all_video, dim=-1).squeeze(0)
384
- pass
385
- print(f'save in {args.save_img_path}')
386
- return os.path.join(args.save_img_path, 'clip_video_' + '%04d' % idx + '.mp4')
387
-
388
-
389
- def call_main(input):
390
- parser = argparse.ArgumentParser()
391
- parser.add_argument("--config", type=str, default="./configs/sample_mask.yaml")
392
- args = parser.parse_args()
393
- omega_conf = OmegaConf.load(args.config)
394
- omega_conf.text_prompt = [input]
395
- return main(omega_conf)