File size: 4,367 Bytes
639c25d
 
 
 
 
2881ba6
639c25d
 
2881ba6
 
639c25d
 
 
2149360
639c25d
2149360
639c25d
 
2881ba6
2149360
639c25d
 
 
2881ba6
 
 
 
 
 
 
 
639c25d
 
 
 
 
2149360
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
639c25d
2149360
 
639c25d
 
 
 
 
 
 
 
 
 
 
 
 
2881ba6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2149360
2881ba6
639c25d
 
 
2149360
639c25d
 
2149360
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
#!/usr/bin/env python

from __future__ import annotations

import os
import random

import gradio as gr
import numpy as np
import torch

from model import Model

DESCRIPTION = "# [UniDiffuser](https://github.com/thu-ml/unidiffuser)"

SPACE_ID = os.getenv("SPACE_ID")
if SPACE_ID is not None:
    DESCRIPTION += f'\n<p>For faster inference without waiting in queue, you may duplicate the space and upgrade to GPU in settings. <a href="https://huggingface.co/spaces/{SPACE_ID}?duplicate=true"><img style="display: inline; margin-top: 0em; margin-bottom: 0em" src="https://bit.ly/3gLdBN6" alt="Duplicate Space" /></a></p>'
if not torch.cuda.is_available():
    DESCRIPTION += "\n<p>Running on CPU 🥶</p>"

model = Model()

MAX_SEED = np.iinfo(np.int32).max


def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
    if randomize_seed:
        seed = random.randint(0, MAX_SEED)
    return seed


def create_demo(mode_name: str) -> gr.Blocks:
    with gr.Blocks() as demo:
        with gr.Row():
            with gr.Column():
                mode = gr.Dropdown(
                    label="Mode",
                    choices=[
                        "t2i",
                        "i2t",
                        "joint",
                        "i",
                        "t",
                        "i2t2i",
                        "t2i2t",
                    ],
                    value=mode_name,
                    visible=False,
                )
                prompt = gr.Text(label="Prompt", max_lines=1, visible=mode_name in ["t2i", "t2i2t"])
                image = gr.Image(label="Input image", type="pil", visible=mode_name in ["i2t", "i2t2i"])
                run_button = gr.Button("Run")
                with gr.Accordion("Advanced options", open=False):
                    seed = gr.Slider(
                        label="Seed",
                        minimum=0,
                        maximum=MAX_SEED,
                        step=1,
                        value=0,
                    )
                    randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
                    num_steps = gr.Slider(
                        label="Steps",
                        minimum=1,
                        maximum=100,
                        value=20,
                        step=1,
                    )
                    guidance_scale = gr.Slider(
                        label="Guidance Scale",
                        minimum=0.1,
                        maximum=30.0,
                        value=8.0,
                        step=0.1,
                    )
            with gr.Column():
                result_image = gr.Image(label="Generated image", visible=mode_name in ["t2i", "i", "joint", "i2t2i"])
                result_text = gr.Text(label="Generated text", visible=mode_name in ["i2t", "t", "joint", "t2i2t"])
        inputs = [
            mode,
            prompt,
            image,
            seed,
            num_steps,
            guidance_scale,
        ]
        outputs = [
            result_image,
            result_text,
        ]

        prompt.submit(
            fn=randomize_seed_fn,
            inputs=[seed, randomize_seed],
            outputs=seed,
            queue=False,
        ).then(
            fn=model.run,
            inputs=inputs,
            outputs=outputs,
        )
        run_button.click(
            fn=randomize_seed_fn,
            inputs=[seed, randomize_seed],
            outputs=seed,
            queue=False,
        ).then(
            fn=model.run,
            inputs=inputs,
            outputs=outputs,
            api_name=f"run_{mode_name}",
        )
    return demo


with gr.Blocks(css="style.css") as demo:
    gr.Markdown(DESCRIPTION)
    with gr.Tabs():
        with gr.TabItem("text2image"):
            create_demo("t2i")
        with gr.TabItem("image2text"):
            create_demo("i2t")
        with gr.TabItem("image variation"):
            create_demo("i2t2i")
        with gr.TabItem("joint generation"):
            create_demo("joint")
        with gr.TabItem("image generation"):
            create_demo("i")
        with gr.TabItem("text generation"):
            create_demo("t")
        with gr.TabItem("text variation"):
            create_demo("t2i2t")

if __name__ == "__main__":
    demo.queue(max_size=15).launch()