File size: 5,822 Bytes
cc50ae5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
fab1585
cc50ae5
 
 
 
2c31dbf
cc50ae5
 
 
 
 
 
553eb81
 
2c31dbf
 
 
 
 
 
cc50ae5
87b2569
2c31dbf
 
553eb81
2c31dbf
 
cc50ae5
553eb81
 
 
 
 
 
cc50ae5
 
2c31dbf
 
 
 
 
 
 
 
 
fab1585
cc50ae5
 
 
 
 
 
 
2c31dbf
cc50ae5
 
 
 
 
5e7f462
90f0083
ebbffeb
cc50ae5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
91488c4
 
 
 
 
 
 
 
 
cc50ae5
 
 
 
27dc24e
 
 
 
 
 
 
 
cc50ae5
 
4181fbb
91488c4
2c31dbf
cc50ae5
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
import gradio as gr
import torch
import os
import spaces
import uuid

from diffusers import AnimateDiffPipeline, MotionAdapter, EulerDiscreteScheduler
from diffusers.utils import export_to_video
from huggingface_hub import hf_hub_download
from safetensors.torch import load_file
from PIL import Image

# Constants
bases = {
    "Cartoon": "frankjoshua/toonyou_beta6",
    "Realistic": "emilianJR/epiCRealism", 
    "3d": "Lykon/DreamShaper",
    "Anime": "Yntec/mistoonAnime2"
}

# Preload models
if not torch.cuda.is_available():
    raise NotImplementedError("No GPU detected!")

device = "cuda"
dtype = torch.float16

motion_loaded = None

pipes = {}
for base_name, base_path in bases.items():
    pipe = AnimateDiffPipeline.from_pretrained(base_path, torch_dtype=dtype).to(device)
    pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config, timestep_spacing="trailing", beta_schedule="linear")
    pipes[base_name] = pipe

# Function 
@spaces.GPU(duration=60,queue=False)
def generate_image(prompt, base="Realistic", motion="Default", step=8, progress=gr.Progress()):
    global pipes
    global motion_loaded

    pipe = pipes[base]

    if motion_loaded != motion:
        pipe.unload_lora_weights()
        if motion != "":
            pipe.load_lora_weights(motion, adapter_name="motion")
            pipe.set_adapters(["motion"], [0.7])
        motion_loaded = motion


    # Load step model if not already loaded
    repo = "ByteDance/AnimateDiff-Lightning"
    ckpt = f"animatediff_lightning_{step}step_diffusers.safetensors"
    try:
        pipe.unet.load_state_dict(load_file(hf_hub_download(repo, ckpt, local_files_only=True), device=device), strict=False)
    except:
        pipe.unet.load_state_dict(load_file(hf_hub_download(repo, ckpt), device=device), strict=False)

    # Generate image
    output = pipe(prompt=f"{base} image of {prompt}", guidance_scale=1.2, num_inference_steps=step)

    name = str(uuid.uuid4()).replace("-", "")
    path = f"/tmp/{name}.mp4"
    export_to_video(output.frames[0], path, fps=10)
    return path



# Gradio Interface
with gr.Blocks(css="style.css") as demo:
    gr.HTML(
        "<h1><center>Instant⚡Video</center></h1>" +
        "<p><center><span style='color: red;'>You may change the steps from 4 to 8, if you didn't get satisfied results.</center></p>" +
        "<p><center><strong>First Video Generating takes time then Videos generate faster.</p>" +
        "<p><center>To get best results Make Sure to Write prompts in style as Given in Examples/p>" +
        "<p><a href='https://huggingface.co/spaces/KingNish/Instant-Video/discussions/1' >Must Share you Best Results with Community - Click HERE<a></p>"
    )
    with gr.Group():
        with gr.Row():
            prompt = gr.Textbox(
                label='Prompt'
            )
        with gr.Row():
            select_base = gr.Dropdown(
                label='Base model',
                choices=[
                    "Cartoon", 
                    "Realistic",
                    "3d",
                    "Anime",
                ],
                value=base_loaded,
                interactive=True
            )
            select_motion = gr.Dropdown(
                label='Motion',
                choices=[
                    ("Default", ""),
                    ("Zoom in", "guoyww/animatediff-motion-lora-zoom-in"),
                    ("Zoom out", "guoyww/animatediff-motion-lora-zoom-out"),
                    ("Tilt up", "guoyww/animatediff-motion-lora-tilt-up"),
                    ("Tilt down", "guoyww/animatediff-motion-lora-tilt-down"),
                    ("Pan left", "guoyww/animatediff-motion-lora-pan-left"),
                    ("Pan right", "guoyww/animatediff-motion-lora-pan-right"),
                    ("Roll left", "guoyww/animatediff-motion-lora-rolling-anticlockwise"),
                    ("Roll right", "guoyww/animatediff-motion-lora-rolling-clockwise"),
                ],
                value="guoyww/animatediff-motion-lora-zoom-in",
                interactive=True
            )
            select_step = gr.Dropdown(
                label='Inference steps',
                choices=[
                    ('1-Step', 1), 
                    ('2-Step', 2),
                    ('4-Step', 4),
                    ('8-Step', 8),
                ],
                value=4,
                interactive=True
            )
            submit = gr.Button(
                scale=1,
                variant='primary'
            )
    video = gr.Video(
        label='AnimateDiff-Lightning',
        autoplay=True,
        height=512,
        width=512,
        elem_id="video_output"
    )

    gr.on(triggers=[
            submit.click,
            prompt.submit
    ],
        fn = generate_image,
        inputs = [prompt, select_base, select_motion, select_step],
        outputs = [video],
        api_name = "instant_video",
        queue = False
    )

    gr.Examples(
        examples=[
        ["Focus: Eiffel Tower (Animate: Clouds moving)"], #Atmosphere Movement Example
        ["Focus: Trees In forest (Animate: Lion running)"], #Object Movement Example
        ["Focus: Astronaut in Space"], #Normal
        ["Focus: Group of Birds in sky (Animate:  Birds Moving) (Shot From distance)"], #Camera distance
        ["Focus:  Statue of liberty (Shot from Drone) (Animate: Drone coming toward statue)"], #Camera Movement
        ["Focus: Panda in Forest (Animate: Drinking Tea)"], #Doing Something
        ["Focus: Kids Playing (Season: Winter)"], #Atmosphere or Season
        {"Focus: Cars in Street (Season: Rain, Daytime) (Shot from Distance) (Movement: Cars running)"} #Mixture
    ], 
        fn=generate_image,
        inputs=[prompt],
        outputs=[video],
        cache_examples="lazy",
)

demo.queue().launch()