File size: 2,311 Bytes
2c924d3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
import gradio as gr

from text_to_animation.model import ControlAnimationModel
from webui.app_pose import create_demo as create_demo_pose
from webui.app_text_to_video import create_demo as create_demo_text_to_video
from webui.app_control_animation import create_demo as create_demo_animation
import argparse
import os
import jax.numpy as jnp

huggingspace_name = os.environ.get("SPACE_AUTHOR_NAME")
on_huggingspace = huggingspace_name if huggingspace_name is not None else False

model = ControlAnimationModel(device="cuda", dtype=jnp.float16)

parser = argparse.ArgumentParser()
parser.add_argument(
    "--public_access",
    action="store_true",
    help="if enabled, the app can be access from a public url",
    default=False,
)
args = parser.parse_args()


title = """
<div style="text-align: center; max-width: 1200px; margin: 20px auto;">
<h1 style="font-weight: 900; font-size: 3rem; margin: 0rem">Control Animation</h1>
"""

description = """
<div style="text-align: center; max-width: 1200px; margin: 20px auto;">
<h2 style="font-weight: 450; font-size: 1rem; margin-top: 0.5rem; margin-bottom: 0.5rem">
Our code uses <a href="https://www.humphreyshi.com/home">Text2Video-Zero</a> and the <a href="https://github.com/huggingface/diffusers">Diffusers</a> library as inspiration.
</h2>
</div>
"""

notice = """
<p>For faster inference without waiting in queue, you may duplicate the space and upgrade to GPU in settings.
<br/>
<a href="https://github.com/Pie31415/control-animation">
<img style="margin-top: 0em; margin-bottom: 0em" src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a>
</p>
"""

with gr.Blocks(css="style.css") as demo:
    gr.Markdown(title)
    gr.Markdown(description)

    if on_huggingspace:
        gr.HTML(notice)

    # NOTE: In our final demo we should consider removing zero-shot t2v and pose conditional
    with gr.Tab("Control Animation"):
        create_demo_animation(model)
    # with gr.Tab("Zero-Shot Text2Video"):
    #     create_demo_text_to_video(model)
    # with gr.Tab("Pose Conditional"):
    #     create_demo_pose(model)

if on_huggingspace:
    demo.queue(max_size=20)
    demo.launch(debug=True)
else:
    _, _, link = demo.queue(api_open=False).launch(
        file_directories=["temporal"], share=args.public_access, debug=True
    )
    print(link)