File size: 1,582 Bytes
3366701
1f99294
deef182
15f8f33
3366701
467ad58
deef182
660e224
 
3366701
 
d0dcc2c
 
3366701
 
 
 
 
 
 
1f99294
d90d4fc
1f99294
 
3366701
 
 
 
 
 
 
 
 
 
 
 
660e224
3366701
 
ee173ac
3366701
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
import torch
from typing import Any
from transformers.agents.tools import Tool
from transformers.utils import is_accelerate_available

from diffusers import DiffusionPipeline

if is_accelerate_available():
    from accelerate import PartialState

TEXT_TO_VIDEO_DESCRIPTION = (
    "This is a tool that creates a video according to a text description. "
    "It takes an optional input `seconds` which will be the duration of the video. "
    "The default is of two seconds. The tool outputs a video object."
)


class TextToVideoTool(Tool):
    default_checkpoint = "damo-vilab/text-to-video-ms-1.7b"
    description = TEXT_TO_VIDEO_DESCRIPTION
    name = "video_generator"
    inputs = {"prompt": {"type": str, "description": "contains the image description"}}
    output_type = Any
    
    def __init__(self, device=None, **hub_kwargs) -> None:
        if not is_accelerate_available():
            raise ImportError("Accelerate should be installed in order to use tools.")

        super().__init__()

        self.device = device
        self.pipeline = None
        self.hub_kwargs = hub_kwargs

    def setup(self):
        if self.device is None:
            self.device = PartialState().default_device

        self.pipeline = DiffusionPipeline.from_pretrained(
            self.default_checkpoint, variant="fp16"
        )
        self.pipeline.to(self.device)

        self.is_initialized = True

    def __call__(self, prompt, seconds=2):
        if not self.is_initialized:
            self.setup()

        return self.pipeline(prompt, num_frames=8 * seconds).frames