fffiloni commited on
Commit
cb76ea2
1 Parent(s): 9f11b17

Update inference.py

Browse files
Files changed (1) hide show
  1. inference.py +8 -6
inference.py CHANGED
@@ -101,8 +101,8 @@ if __name__ == "__main__":
101
  video = read_video(video_path=args.video_path, video_length=args.video_length, width=args.width, height=args.height)
102
 
103
  # Save source video
104
- original_pixels = rearrange(video, "(b f) c h w -> b c f h w", b=1)
105
- save_videos_grid(original_pixels, os.path.join(args.output_path, "source_video.mp4"), rescale=True)
106
 
107
 
108
  # Step 2. Parse a video to conditional frames
@@ -111,11 +111,11 @@ if __name__ == "__main__":
111
  pil_annotation = [pil_annot[0] for pil_annot in pil_annotation]
112
 
113
  # Save condition video
114
- video_cond = [np.array(p).astype(np.uint8) for p in pil_annotation]
115
- imageio.mimsave(os.path.join(args.output_path, f"{args.condition}_condition.mp4"), video_cond, fps=args.fps)
116
 
117
  # Reduce memory (optional)
118
- del annotator; torch.cuda.empty_cache()
119
 
120
  # Step 3. inference
121
 
@@ -132,4 +132,6 @@ if __name__ == "__main__":
132
  generator=generator, guidance_scale=12.5, negative_prompt=NEG_PROMPT,
133
  width=args.width, height=args.height
134
  ).videos
135
- save_videos_grid(sample, f"{args.output_path}/{args.temp_chunk_path}.mp4")
 
 
 
101
  video = read_video(video_path=args.video_path, video_length=args.video_length, width=args.width, height=args.height)
102
 
103
  # Save source video
104
+ # original_pixels = rearrange(video, "(b f) c h w -> b c f h w", b=1)
105
+ # save_videos_grid(original_pixels, os.path.join(args.output_path, "source_video.mp4"), rescale=True)
106
 
107
 
108
  # Step 2. Parse a video to conditional frames
 
111
  pil_annotation = [pil_annot[0] for pil_annot in pil_annotation]
112
 
113
  # Save condition video
114
+ #video_cond = [np.array(p).astype(np.uint8) for p in pil_annotation]
115
+ #imageio.mimsave(os.path.join(args.output_path, f"{args.condition}_condition.mp4"), video_cond, fps=args.fps)
116
 
117
  # Reduce memory (optional)
118
+ #del annotator; torch.cuda.empty_cache()
119
 
120
  # Step 3. inference
121
 
 
132
  generator=generator, guidance_scale=12.5, negative_prompt=NEG_PROMPT,
133
  width=args.width, height=args.height
134
  ).videos
135
+ save_videos_grid(sample, f"{args.output_path}/{args.temp_chunk_path}.mp4")
136
+ del pipe
137
+ torch.cuda.empty_cache()