adamirus commited on
Commit
ff6dd17
1 Parent(s): b5bb1f7

Upload folder using huggingface_hub

Browse files
Files changed (3) hide show
  1. README.md +5 -2
  2. __pycache__/app.cpython-310.pyc +0 -0
  3. app.py +5 -5
README.md CHANGED
@@ -9,11 +9,14 @@ app_file: app.py
9
  pinned: false
10
  license: mit
11
  ---
 
 
 
 
12
  @InProceedings{VideoFusion,
13
  author = {Luo, Zhengxiong and Chen, Dayou and Zhang, Yingya and Huang, Yan and Wang, Liang and Shen, Yujun and Zhao, Deli and Zhou, Jingren and Tan, Tieniu},
14
  title = {VideoFusion: Decomposed Diffusion Models for High-Quality Video Generation},
15
  booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)},
16
  month = {June},
17
  year = {2023}
18
- }
19
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
9
  pinned: false
10
  license: mit
11
  ---
12
+
13
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
14
+
15
+
16
  @InProceedings{VideoFusion,
17
  author = {Luo, Zhengxiong and Chen, Dayou and Zhang, Yingya and Huang, Yan and Wang, Liang and Shen, Yujun and Zhao, Deli and Zhou, Jingren and Tan, Tieniu},
18
  title = {VideoFusion: Decomposed Diffusion Models for High-Quality Video Generation},
19
  booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)},
20
  month = {June},
21
  year = {2023}
22
+ }
 
__pycache__/app.cpython-310.pyc ADDED
Binary file (900 Bytes). View file
 
app.py CHANGED
@@ -3,12 +3,12 @@ import gradio as gr
3
  from diffusers import DiffusionPipeline, DPMSolverMultistepScheduler
4
  from diffusers.utils import export_to_video
5
 
6
- pipeline = DiffusionPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b")
7
 
8
  def generate_video(prompt):
9
  # load pipeline
10
- pipe = DiffusionPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b", torch_dtype=torch.float16, variant="fp16")
11
- pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
12
 
13
  # optimize for GPU memory
14
  pipe.enable_model_cpu_offload()
@@ -21,5 +21,5 @@ def generate_video(prompt):
21
  video_path = export_to_video(video_frames)
22
  return video_path
23
 
24
- iface = gr.Interface(fn=generate_video, inputs="text", outputs="file")
25
- iface.launch()
 
3
  from diffusers import DiffusionPipeline, DPMSolverMultistepScheduler
4
  from diffusers.utils import export_to_video
5
 
6
+
7
 
8
  def generate_video(prompt):
9
  # load pipeline
10
+ pipe = DiffusionPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b", torch_dtype=torch.float16, variant="fp16").to("cuda")
11
+ pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config).to("cuda")
12
 
13
  # optimize for GPU memory
14
  pipe.enable_model_cpu_offload()
 
21
  video_path = export_to_video(video_frames)
22
  return video_path
23
 
24
+ demo = gr.Interface(fn=generate_video, inputs="text", outputs="file")
25
+ demo.launch(share=True)