import gradio as gr from text_to_animation.model import ControlAnimationModel from webui.app_control_animation import create_demo as create_demo_animation import argparse import os import jax.numpy as jnp huggingspace_name = os.environ.get("SPACE_AUTHOR_NAME") on_huggingspace = huggingspace_name if huggingspace_name is not None else False model = ControlAnimationModel(dtype=jnp.float16) parser = argparse.ArgumentParser() parser.add_argument( "--public_access", action="store_true", help="if enabled, the app can be access from a public url", default=False, ) args = parser.parse_args() title = """

Control Animation

""" description = """

Our code uses Text2Video-Zero and the Diffusers library as inspiration.

""" notice = """

For faster inference without waiting in queue, you may duplicate the space and upgrade to GPU in settings.
Duplicate Space

""" with gr.Blocks(css="style.css") as demo: gr.Markdown(title) gr.Markdown(description) if on_huggingspace: gr.HTML(notice) with gr.Tab("Control Animation"): create_demo_animation(model) if on_huggingspace: demo.queue(max_size=20) demo.launch(debug=True) else: _, _, link = demo.queue(api_open=False).launch( file_directories=["temporal"], share=args.public_access, debug=True ) print(link)