Spaces:
Sleeping
Sleeping
test
#3
by
ayazii2
- opened
- README.md +1 -1
- app.py +17 -24
- requirements.txt +0 -2
README.md
CHANGED
@@ -4,7 +4,7 @@ emoji: 🐠
|
|
4 |
colorFrom: blue
|
5 |
colorTo: gray
|
6 |
sdk: gradio
|
7 |
-
sdk_version:
|
8 |
app_file: app.py
|
9 |
pinned: false
|
10 |
duplicated_from: fffiloni/zeroscope
|
|
|
4 |
colorFrom: blue
|
5 |
colorTo: gray
|
6 |
sdk: gradio
|
7 |
+
sdk_version: 3.35.2
|
8 |
app_file: app.py
|
9 |
pinned: false
|
10 |
duplicated_from: fffiloni/zeroscope
|
app.py
CHANGED
@@ -1,10 +1,8 @@
|
|
1 |
import gradio as gr
|
2 |
-
import
|
3 |
-
import tempfile
|
4 |
-
import imageio
|
5 |
-
|
6 |
import torch
|
7 |
from diffusers import DiffusionPipeline, DPMSolverMultistepScheduler
|
|
|
8 |
|
9 |
pipe = DiffusionPipeline.from_pretrained("cerspense/zeroscope_v2_576w", torch_dtype=torch.float16)
|
10 |
pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
|
@@ -17,21 +15,12 @@ def create_image_caption(image_init):
|
|
17 |
print("cap: " + cap)
|
18 |
return cap
|
19 |
|
20 |
-
def export_to_video(frames: np.ndarray, fps: int) -> str:
|
21 |
-
frames = np.clip((frames * 255), 0, 255).astype(np.uint8)
|
22 |
-
out_file = tempfile.NamedTemporaryFile(suffix=".mp4", delete=False)
|
23 |
-
writer = imageio.get_writer(out_file.name, format="FFMPEG", fps=fps)
|
24 |
-
for frame in frames:
|
25 |
-
writer.append_data(frame)
|
26 |
-
writer.close()
|
27 |
-
return out_file.name
|
28 |
-
|
29 |
def infer(image_init):
|
30 |
prompt = create_image_caption(image_init)
|
31 |
-
video_frames = pipe(prompt, num_inference_steps=40, height=320, width=576, num_frames=24).frames
|
32 |
-
video_path = export_to_video(video_frames
|
33 |
print(video_path)
|
34 |
-
return prompt, video_path
|
35 |
|
36 |
css = """
|
37 |
#col-container {max-width: 510px; margin-left: auto; margin-right: auto;}
|
@@ -113,18 +102,22 @@ with gr.Blocks(css=css) as demo:
|
|
113 |
"""
|
114 |
)
|
115 |
|
116 |
-
image_init = gr.Image(label="Image Init",
|
117 |
#inference_steps = gr.Slider(label="Inference Steps", minimum=10, maximum=100, step=1, value=40, interactive=False)
|
118 |
submit_btn = gr.Button("Submit")
|
119 |
coca_cap = gr.Textbox(label="Caption", placeholder="CoCa Caption will be displayed here", elem_id="coca-cap-in")
|
120 |
video_result = gr.Video(label="Video Output", elem_id="video-output")
|
121 |
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
|
|
|
|
|
|
|
|
128 |
|
129 |
-
demo.queue(max_size=12).launch(
|
130 |
|
|
|
1 |
import gradio as gr
|
2 |
+
from share_btn import community_icon_html, loading_icon_html, share_js
|
|
|
|
|
|
|
3 |
import torch
|
4 |
from diffusers import DiffusionPipeline, DPMSolverMultistepScheduler
|
5 |
+
from diffusers.utils import export_to_video
|
6 |
|
7 |
pipe = DiffusionPipeline.from_pretrained("cerspense/zeroscope_v2_576w", torch_dtype=torch.float16)
|
8 |
pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
|
|
|
15 |
print("cap: " + cap)
|
16 |
return cap
|
17 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
18 |
def infer(image_init):
|
19 |
prompt = create_image_caption(image_init)
|
20 |
+
video_frames = pipe(prompt, num_inference_steps=40, height=320, width=576, num_frames=24).frames
|
21 |
+
video_path = export_to_video(video_frames)
|
22 |
print(video_path)
|
23 |
+
return prompt, video_path, gr.Group.update(visible=True)
|
24 |
|
25 |
css = """
|
26 |
#col-container {max-width: 510px; margin-left: auto; margin-right: auto;}
|
|
|
102 |
"""
|
103 |
)
|
104 |
|
105 |
+
image_init = gr.Image(label="Image Init",type="filepath", source="upload", elem_id="image-init")
|
106 |
#inference_steps = gr.Slider(label="Inference Steps", minimum=10, maximum=100, step=1, value=40, interactive=False)
|
107 |
submit_btn = gr.Button("Submit")
|
108 |
coca_cap = gr.Textbox(label="Caption", placeholder="CoCa Caption will be displayed here", elem_id="coca-cap-in")
|
109 |
video_result = gr.Video(label="Video Output", elem_id="video-output")
|
110 |
|
111 |
+
with gr.Group(elem_id="share-btn-container", visible=False) as share_group:
|
112 |
+
community_icon = gr.HTML(community_icon_html)
|
113 |
+
loading_icon = gr.HTML(loading_icon_html)
|
114 |
+
share_button = gr.Button("Share to community", elem_id="share-btn")
|
115 |
+
|
116 |
+
submit_btn.click(fn=infer,
|
117 |
+
inputs=[image_init],
|
118 |
+
outputs=[coca_cap, video_result, share_group])
|
119 |
+
|
120 |
+
share_button.click(None, [], [], _js=share_js)
|
121 |
|
122 |
+
demo.queue(max_size=12).launch()
|
123 |
|
requirements.txt
CHANGED
@@ -3,6 +3,4 @@ transformers
|
|
3 |
accelerate
|
4 |
torch==2.0.1
|
5 |
opencv-python
|
6 |
-
imageio[ffmpeg]==2.34.1
|
7 |
-
numpy
|
8 |
|
|
|
3 |
accelerate
|
4 |
torch==2.0.1
|
5 |
opencv-python
|
|
|
|
|
6 |
|