File size: 4,846 Bytes
8aa9c9a
 
 
 
 
 
 
 
 
 
 
 
 
 
6c1b85a
8aa9c9a
 
 
 
 
689b0f3
 
 
 
 
 
422f2d7
 
 
 
 
53097bd
8aa9c9a
 
0fde759
8aa9c9a
 
 
 
 
 
8d2200f
 
 
 
 
 
 
 
 
 
 
 
8aa9c9a
 
 
53097bd
8aa9c9a
 
570b3f3
53097bd
8aa9c9a
 
 
 
 
 
 
 
 
 
53097bd
8aa9c9a
 
53097bd
8aa9c9a
 
 
 
0fde759
 
8aa9c9a
 
 
 
 
0fde759
 
8aa9c9a
 
 
 
53097bd
8aa9c9a
 
 
 
 
 
0fde759
8aa9c9a
 
35635ca
8aa9c9a
 
 
 
 
0fde759
422f2d7
8aa9c9a
 
4ebe638
53097bd
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
# Copyright 2023 ByteDance and/or its affiliates.
#
# Copyright (2023) MagicAnimate Authors
#
# ByteDance, its affiliates and licensors retain all intellectual
# property and proprietary rights in and to this material, related
# documentation and any modifications thereto. Any use, reproduction,
# disclosure or distribution of this material and related documentation
# without an express license agreement from ByteDance or
# its affiliates is strictly prohibited.
import argparse
import imageio
import numpy as np
import gradio as gr
import os
from PIL import Image
from subprocess import PIPE, run

from demo.animate import MagicAnimate

from huggingface_hub import snapshot_download

snapshot_download(repo_id="runwayml/stable-diffusion-v1-5", local_dir="./stable-diffusion-v1-5")
snapshot_download(repo_id="stabilityai/sd-vae-ft-mse", local_dir="./sd-vae-ft-mse")
snapshot_download(repo_id="zcxu-eric/MagicAnimate", local_dir="./MagicAnimate")

is_spaces = True if "SPACE_ID" in os.environ else False
true_for_shared_ui = False #This will be true only if you are in a shared UI
if(is_spaces):
    true_for_shared_ui = True if "zcxu-eric/magicanimate" in os.environ['SPACE_ID'] else False


animator = MagicAnimate()

def animate(reference_image, motion_sequence_state, seed=1, steps=25, guidance_scale=7.5):
    return animator(reference_image, motion_sequence_state, seed, steps, guidance_scale)

with gr.Blocks() as demo:

    gr.HTML(
        """
        <div style="display: flex; justify-content: center; align-items: center; text-align: center;">
        <a href="https://github.com/magic-research/magic-animate" style="margin-right: 20px; text-decoration: none; display: flex; align-items: center;">
        </a>
        <div>
            <h1 >MagicAnimate: Temporally Consistent Human Image Animation using Diffusion Model</h1>
            <h5 style="margin: 0;">If you like our project, please give us a star ✨ on Github for the latest update.</h5>
            <div style="display: flex; justify-content: center; align-items: center; text-align: center;>
                <a href="https://arxiv.org/abs/2311.16498"><img src="https://img.shields.io/badge/Arxiv-2311.16498-red"></a>
                <a href='https://showlab.github.io/magicanimate'><img src='https://img.shields.io/badge/Project_Page-MagicAnimate-green' alt='Project Page'></a>
                <a href='https://github.com/magic-research/magic-animate'><img src='https://img.shields.io/badge/Github-Code-blue'></a>
            </div>
        </div>
        </div>
        """)
    animation = gr.Video(format="mp4", label="Animation Results", autoplay=True)

    with gr.Row():
        reference_image  = gr.Image(label="Reference Image")
        motion_sequence  = gr.Video(format="mp4", label="Motion Sequence",max_length=5)

        with gr.Column():
            random_seed         = gr.Textbox(label="Random seed", value=1, info="default: -1")
            sampling_steps      = gr.Textbox(label="Sampling steps", value=25, info="default: 25")
            guidance_scale      = gr.Textbox(label="Guidance scale", value=7.5, info="default: 7.5")
            submit              = gr.Button("Animate")

    def read_video(video):
        reader = imageio.get_reader(video)
        fps = reader.get_meta_data()['fps']
        return video

    def read_image(image, size=512):
        return np.array(Image.fromarray(image).resize((size, size)))

    # when user uploads a new video
    motion_sequence.upload(
        read_video,
        motion_sequence,
        motion_sequence,
        queue=False
    )
    # when `first_frame` is updated
    reference_image.upload(
        read_image,
        reference_image,
        reference_image,
        queue=False
    )
    # when the `submit` button is clicked
    submit.click(
        animate,
        [reference_image, motion_sequence, random_seed, sampling_steps, guidance_scale],
        animation
    )

    # Examples
    gr.Markdown("## Examples")
    gr.Examples(
        fn=animate,
        examples=[
            ["inputs/applications/source_image/monalisa.png", "inputs/applications/driving/densepose/running.mp4"],
            ["inputs/applications/source_image/demo4.png", "inputs/applications/driving/densepose/demo4.mp4"],
            ["inputs/applications/source_image/dalle2.jpeg", "inputs/applications/driving/densepose/running2.mp4"],
            ["inputs/applications/source_image/dalle8.jpeg", "inputs/applications/driving/densepose/dancing2.mp4"],
            ["inputs/applications/source_image/multi1_source.png", "inputs/applications/driving/densepose/multi_dancing.mp4"],
        ],
        inputs=[reference_image, motion_sequence],
        outputs=animation,
        cache_examples=true_for_shared_ui
    )

# demo.queue(max_size=15, api_open=False)
demo.launch(share=True, show_api=False)