Yw22 commited on
Commit
cf97d5b
1 Parent(s): a8913c5
Files changed (1) hide show
  1. app.py +15 -9
app.py CHANGED
@@ -20,13 +20,13 @@ import spaces
20
  from PIL import Image
21
  from omegaconf import OmegaConf
22
  from einops import rearrange, repeat
23
- from torchvision import transforms
24
  from transformers import CLIPTextModel, CLIPTokenizer
25
  from diffusers import AutoencoderKL, DDIMScheduler
26
 
27
  from pipelines.pipeline_imagecoductor import ImageConductorPipeline
28
  from modules.unet import UNet3DConditionFlowModel
29
- from utils.gradio_utils import ensure_dirname, split_filename, visualize_drag, image2pil, image2arr
30
  from utils.utils import create_image_controlnet, create_flow_controlnet, interpolate_trajectory, load_weights, load_model, bivariate_Gaussian
31
  from utils.lora_utils import add_LoRA_to_controlnet
32
  from utils.visualizer import Visualizer, vis_flow_to_video
@@ -382,9 +382,12 @@ class ImageConductor:
382
  eval_mode = True,
383
  ).videos
384
 
385
- outputs_path = os.path.join(output_dir, f'output_{i}_{id}.mp4')
386
- vis_video = (rearrange(sample[0], 'c t h w -> t h w c') * 255.).clip(0, 255)
387
- torchvision.io.write_video(outputs_path, vis_video, fps=8, video_codec='h264', options={'crf': '10'})
 
 
 
388
 
389
  return {output_image: visualized_drag, output_video: outputs_path}
390
 
@@ -619,10 +622,13 @@ with block:
619
  examples_type = gr.Textbox(label="Examples Type (Ignore) ", value="", visible=False)
620
 
621
  with gr.Column(scale=7):
622
- output_video = gr.Video(
623
- label="Output Video",
624
- width=384,
625
- height=256)
 
 
 
626
 
627
 
628
  with gr.Row():
 
20
  from PIL import Image
21
  from omegaconf import OmegaConf
22
  from einops import rearrange, repeat
23
+ from torchvision import transforms,utils
24
  from transformers import CLIPTextModel, CLIPTokenizer
25
  from diffusers import AutoencoderKL, DDIMScheduler
26
 
27
  from pipelines.pipeline_imagecoductor import ImageConductorPipeline
28
  from modules.unet import UNet3DConditionFlowModel
29
+ from utils.gradio_utils import ensure_dirname, split_filename, visualize_drag, image2pil, save_videos_grid
30
  from utils.utils import create_image_controlnet, create_flow_controlnet, interpolate_trajectory, load_weights, load_model, bivariate_Gaussian
31
  from utils.lora_utils import add_LoRA_to_controlnet
32
  from utils.visualizer import Visualizer, vis_flow_to_video
 
382
  eval_mode = True,
383
  ).videos
384
 
385
+ # outputs_path = os.path.join(output_dir, f'output_{i}_{id}.mp4')
386
+ # vis_video = (rearrange(sample[0], 'c t h w -> t h w c') * 255.).clip(0, 255)
387
+ # torchvision.io.write_video(outputs_path, vis_video, fps=8, video_codec='h264', options={'crf': '10'})
388
+
389
+ outputs_path = os.path.join(output_dir, f'output_{i}_{id}.gif')
390
+ save_videos_grid(sample[0][None], outputs_path)
391
 
392
  return {output_image: visualized_drag, output_video: outputs_path}
393
 
 
622
  examples_type = gr.Textbox(label="Examples Type (Ignore) ", value="", visible=False)
623
 
624
  with gr.Column(scale=7):
625
+ # output_video = gr.Video(
626
+ # label="Output Video",
627
+ # width=384,
628
+ # height=256)
629
+ output_video = gr.Image(label="Output Video",
630
+ height=256,
631
+ width=384,)
632
 
633
 
634
  with gr.Row():