File size: 2,070 Bytes
186b58d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
631d3bb
600ccab
631d3bb
186b58d
 
 
9844e5d
186b58d
 
3621cc6
186b58d
 
 
 
 
 
 
 
77e73c7
6443cd7
186b58d
 
 
631d3bb
 
600ccab
 
186b58d
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
#!/usr/bin/env python

from __future__ import annotations

import os
import pathlib
import shlex
import subprocess

import gradio as gr


# base_url = 'https://huggingface.co/lllyasviel/ControlNet/resolve/main/annotator/ckpts/'
# names = [
#     'body_pose_model.pth',
#     'dpt_hybrid-midas-501f0c75.pt',
#     'hand_pose_model.pth',
#     'mlsd_large_512_fp32.pth',
#     'mlsd_tiny_512_fp32.pth',
#     'network-bsds500.pth',
#     'upernet_global_small.pth',
# ]
# for name in names:
#     command = f'wget https://huggingface.co/lllyasviel/ControlNet/resolve/main/annotator/ckpts/{name} -O {name}'
#     out_path = pathlib.Path(f'ControlNet/annotator/ckpts/{name}')
#     if out_path.exists():
#         continue
#     subprocess.run(shlex.split(command), cwd='ControlNet/annotator/ckpts/')

from gradio_sketch import create_demo as create_demo_sketch
from gradio_pose import create_demo as create_demo_pose
from gradio_seg import create_demo as create_demo_seg

from model import Model

MAX_IMAGES = 1
description = '''This is an unofficial demo for T2I-Adapter. [Paper](https://arxiv.org/abs/2302.08453)  [GitHub](https://github.com/TencentARC/T2I-Adapter) 
'''
if (SPACE_ID := os.getenv('SPACE_ID')) is not None:
    description += f'''<p>For faster inference without waiting in queue, you may duplicate the space and upgrade to GPU in settings.<br/>
<a href="https://huggingface.co/spaces/{SPACE_ID}?duplicate=true">
<img style="margin-top: 0em; margin-bottom: 0em" src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a>
<p/>
'''

model = Model()

with gr.Blocks(css='style.css') as demo:
    gr.Markdown("## T2I Adapter: Learning Adapters to Dig out More Controllable Ability for Text-to-Image Diffusion Models.")
    gr.Markdown(description)
    with gr.Tabs():
        with gr.TabItem('Sketch'):
            create_demo_sketch(model.process_sketch)
        with gr.TabItem('Pose'):
            create_demo_pose(model.process_pose)
        with gr.TabItem('Segmentation'):
            create_demo_seg(model.process_seg)


demo.queue(api_open=False).launch()