fffiloni commited on
Commit
12cfb57
1 Parent(s): f017d32

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -22
app.py CHANGED
@@ -7,7 +7,7 @@ pipe = DiffusionPipeline.from_pretrained("cerspense/zeroscope_v2_576w", torch_dt
7
  pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
8
  pipe.enable_model_cpu_offload()
9
 
10
- def infer(prompt, num_inference_steps):
11
  #prompt = "Darth Vader is surfing on waves"
12
  video_frames = pipe(prompt, num_inference_steps=40, height=320, width=576, num_frames=24).frames
13
  video_path = export_to_video(video_frames)
@@ -21,33 +21,24 @@ a {text-decoration-line: underline; font-weight: 600;}
21
 
22
  with gr.Blocks(css=css) as demo:
23
  with gr.Column(elem_id="col-container"):
24
- gr.HTML("""<div style="text-align: center; max-width: 700px; margin: 0 auto;">
25
- <div
26
- style="
27
- display: inline-flex;
28
- align-items: center;
29
- gap: 0.8rem;
30
- font-size: 1.75rem;
31
- "
32
- >
33
- <h1 style="font-weight: 900; margin-bottom: 7px; margin-top: 5px;">
34
- Zeroscope Text-to-Video
35
- </h1>
36
- </div>
37
- <p style="margin-bottom: 10px; font-size: 94%">
38
- A watermark-free Modelscope-based video model optimized for producing high-quality 16:9 compositions and a smooth video output. <br />
39
- This model was trained using 9,923 clips and 29,769 tagged frames at 24 frames, 576x320 resolution.
40
- [![Duplicate this Space](https://huggingface.co/datasets/huggingface/badges/raw/main/duplicate-this-space-sm.svg)](https://huggingface.co/spaces/fffiloni/zeroscope?duplicate=true)
41
- </p>
42
- </div>""")
43
 
44
  prompt_in = gr.Textbox(label="Prompt", placeholder="Darth Vader is surfing on waves")
45
- inference_steps = gr.Slider(label="Inference Steps", minimum=10, maximum=100, step=1, value=40, interactive=False)
46
  submit_btn = gr.Button("Submit")
47
  video_result = gr.Video(label="Video Output")
48
 
49
  submit_btn.click(fn=infer,
50
- inputs=[prompt_in, inference_steps],
51
  outputs=[video_result])
52
 
53
  demo.queue(max_size=12).launch()
 
7
  pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
8
  pipe.enable_model_cpu_offload()
9
 
10
+ def infer(prompt):
11
  #prompt = "Darth Vader is surfing on waves"
12
  video_frames = pipe(prompt, num_inference_steps=40, height=320, width=576, num_frames=24).frames
13
  video_path = export_to_video(video_frames)
 
21
 
22
  with gr.Blocks(css=css) as demo:
23
  with gr.Column(elem_id="col-container"):
24
+ gr.Mardown(
25
+ """
26
+ # Zeroscope Text-to-Video
27
+
28
+ A watermark-free Modelscope-based video model optimized for producing high-quality 16:9 compositions and a smooth video output. <br />
29
+ This model was trained using 9,923 clips and 29,769 tagged frames at 24 frames, 576x320 resolution.
30
+ [![Duplicate this Space](https://huggingface.co/datasets/huggingface/badges/raw/main/duplicate-this-space-sm.svg)](https://huggingface.co/spaces/fffiloni/zeroscope?duplicate=true)
31
+
32
+ """
33
+ )
 
 
 
 
 
 
 
 
 
34
 
35
  prompt_in = gr.Textbox(label="Prompt", placeholder="Darth Vader is surfing on waves")
36
+ #inference_steps = gr.Slider(label="Inference Steps", minimum=10, maximum=100, step=1, value=40, interactive=False)
37
  submit_btn = gr.Button("Submit")
38
  video_result = gr.Video(label="Video Output")
39
 
40
  submit_btn.click(fn=infer,
41
+ inputs=[prompt_in],
42
  outputs=[video_result])
43
 
44
  demo.queue(max_size=12).launch()