File size: 7,001 Bytes
43b5157
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
import torch
from enum import Enum
import gc
import numpy as np
import jax.numpy as jnp
import jax

from PIL import Image
from typing import List

from flax.training.common_utils import shard
from flax.jax_utils import replicate
from flax import jax_utils
import einops

from transformers import CLIPTokenizer, CLIPFeatureExtractor, FlaxCLIPTextModel
from diffusers import (
    FlaxDDIMScheduler,
    FlaxAutoencoderKL,
    FlaxUNet2DConditionModel as VanillaFlaxUNet2DConditionModel,
)
from text_to_animation.models.unet_2d_condition_flax import FlaxUNet2DConditionModel
from diffusers import FlaxControlNetModel

from text_to_animation.pipelines.text_to_video_pipeline_flax import (
    FlaxTextToVideoPipeline,
)

import utils.utils as utils
import utils.gradio_utils as gradio_utils
import os

on_huggingspace = os.environ.get("SPACE_AUTHOR_NAME") == "PAIR"

unshard = lambda x: einops.rearrange(x, "d b ... -> (d b) ...")


class ModelType(Enum):
    Text2Video = 1
    ControlNetPose = 2
    StableDiffusion = 3


def replicate_devices(array):
    return jnp.expand_dims(array, 0).repeat(jax.device_count(), 0)


class ControlAnimationModel:
    def __init__(self, dtype, **kwargs):
        self.dtype = dtype
        self.rng = jax.random.PRNGKey(0)
        self.pipe = None
        self.model_type = None

        self.states = {}
        self.model_name = ""

    def set_model(
        self,
        model_id: str,
        **kwargs,
    ):
        if hasattr(self, "pipe") and self.pipe is not None:
            del self.pipe
            self.pipe = None
        gc.collect()

        controlnet, controlnet_params = FlaxControlNetModel.from_pretrained(
            "fusing/stable-diffusion-v1-5-controlnet-openpose",
            from_pt=True,
            dtype=jnp.float16,
        )

        scheduler, scheduler_state = FlaxDDIMScheduler.from_pretrained(
            model_id, subfolder="scheduler", from_pt=True
        )
        tokenizer = CLIPTokenizer.from_pretrained(model_id, subfolder="tokenizer")
        feature_extractor = CLIPFeatureExtractor.from_pretrained(
            model_id, subfolder="feature_extractor"
        )
        unet, unet_params = FlaxUNet2DConditionModel.from_pretrained(
            model_id, subfolder="unet", from_pt=True, dtype=self.dtype
        )
        unet_vanilla = VanillaFlaxUNet2DConditionModel.from_config(
            model_id, subfolder="unet", from_pt=True, dtype=self.dtype
        )
        vae, vae_params = FlaxAutoencoderKL.from_pretrained(
            model_id, subfolder="vae", from_pt=True, dtype=self.dtype
        )
        text_encoder = FlaxCLIPTextModel.from_pretrained(
            model_id, subfolder="text_encoder", from_pt=True, dtype=self.dtype
        )
        self.pipe = FlaxTextToVideoPipeline(
            vae=vae,
            text_encoder=text_encoder,
            tokenizer=tokenizer,
            unet=unet,
            unet_vanilla=unet_vanilla,
            controlnet=controlnet,
            scheduler=scheduler,
            safety_checker=None,
            feature_extractor=feature_extractor,
        )
        self.params = {
            "unet": unet_params,
            "vae": vae_params,
            "scheduler": scheduler_state,
            "controlnet": controlnet_params,
            "text_encoder": text_encoder.params,
        }
        self.p_params = jax_utils.replicate(self.params)
        self.model_name = model_id

    def generate_initial_frames(
        self,
        prompt: str,
        video_path: str,
        n_prompt: str = "",
        num_imgs: int = 4,
        resolution: int = 512,
        model_id: str = "runwayml/stable-diffusion-v1-5",
    ) -> List[Image.Image]:
        self.set_model(model_id=model_id)

        video_path = gradio_utils.motion_to_video_path(video_path)

        added_prompt = "high quality, best quality, HD, clay stop-motion, claymation, HQ, masterpiece, art, smooth"
        prompts = added_prompt + ", " + prompt

        added_n_prompt = "longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer difits, cropped, worst quality, low quality, deformed body, bloated, ugly"
        negative_prompts = added_n_prompt + ", " + n_prompt

        video, fps = utils.prepare_video(
            video_path, resolution, None, self.dtype, False, output_fps=4
        )
        control = utils.pre_process_pose(video, apply_pose_detect=False)

        seeds = [seed for seed in jax.random.randint(self.rng, [num_imgs], 0, 65536)]
        prngs = [jax.random.PRNGKey(seed) for seed in seeds]
        print(seeds)
        images = self.pipe.generate_starting_frames(
            params=self.p_params,
            prngs=prngs,
            controlnet_image=control,
            prompt=prompts,
            neg_prompt=negative_prompts,
        )

        images = [np.array(images[i]) for i in range(images.shape[0])]

        return images

    def generate_video_from_frame(self, controlnet_video, prompt, seed, neg_prompt=""):
        # generate a video using the seed provided
        prng_seed = jax.random.PRNGKey(seed)
        len_vid = controlnet_video.shape[0]
        # print(f"Generating video from prompt {'<aardman> style '+ prompt}, with {controlnet_video.shape[0]} frames and prng seed {seed}")
        added_prompt = "high quality, best quality, HD, clay stop-motion, claymation, HQ, masterpiece, art, smooth"
        prompts = added_prompt + ", " + prompt

        added_n_prompt = "longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer difits, cropped, worst quality, low quality, deformed body, bloated, ugly"
        negative_prompts = added_n_prompt + ", " + neg_prompt

        # prompt_ids = self.pipe.prepare_text_inputs(["aardman style "+ prompt]*len_vid)
        # n_prompt_ids = self.pipe.prepare_text_inputs([neg_prompt]*len_vid)

        prompt_ids = self.pipe.prepare_text_inputs([prompts] * len_vid)
        n_prompt_ids = self.pipe.prepare_text_inputs([negative_prompts] * len_vid)
        prng = replicate_devices(
            prng_seed
        )  # jax.random.split(prng, jax.device_count())
        image = replicate_devices(controlnet_video)
        prompt_ids = replicate_devices(prompt_ids)
        n_prompt_ids = replicate_devices(n_prompt_ids)
        motion_field_strength_x = replicate_devices(jnp.array(3))
        motion_field_strength_y = replicate_devices(jnp.array(4))
        smooth_bg_strength = replicate_devices(jnp.array(0.8))
        vid = (
            self.pipe(
                image=image,
                prompt_ids=prompt_ids,
                neg_prompt_ids=n_prompt_ids,
                params=self.p_params,
                prng_seed=prng,
                jit=True,
                smooth_bg_strength=smooth_bg_strength,
                motion_field_strength_x=motion_field_strength_x,
                motion_field_strength_y=motion_field_strength_y,
            ).images
        )[0]
        return utils.create_gif(np.array(vid), 4, path=None, watermark=None)