File size: 3,191 Bytes
cd9ed02
3996268
f268def
3996268
 
 
 
cd9ed02
fb6c2da
 
2254a67
fb6c2da
 
 
 
 
 
2254a67
fb6c2da
fc5fab1
fb6c2da
 
 
 
a056b0b
fb6c2da
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ee11c4c
fb6c2da
 
 
 
 
 
 
 
 
 
af3233a
 
 
 
 
 
 
fb6c2da
 
 
 
 
 
 
 
a056b0b
 
2254a67
 
fb6c2da
3996268
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
import os
# os.system('pip3 install openmim')
os.system('mim install mmcv-full==1.7.0')
# os.system('pip3 install mmpose')
# os.system('pip3 install mmdet')
# os.system('pip3 install gradio==3.19.1')
#os.system('pip3 install psutil')

from demo.model import Model_all
import gradio as gr
from demo.demos import create_demo_keypose, create_demo_sketch, create_demo_draw, create_demo_seg, create_demo_depth
import torch
import subprocess
import shlex
from huggingface_hub import hf_hub_url

urls = {
    'TencentARC/T2I-Adapter':['models/t2iadapter_keypose_sd14v1.pth', 'models/t2iadapter_seg_sd14v1.pth', 'models/t2iadapter_sketch_sd14v1.pth', 'models/t2iadapter_depth_sd14v1.pth'],
    'CompVis/stable-diffusion-v-1-4-original':['sd-v1-4.ckpt'],
    'andite/anything-v4.0':['anything-v4.0-pruned.ckpt', 'anything-v4.0.vae.pt'],
}
urls_mmpose = [
    'https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth',
    'https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_coco_256x192-b9e0b3ab_20200708.pth',
    'https://github.com/kazuto1011/deeplab-pytorch/releases/download/v1.0/deeplabv2_resnet101_msc-cocostuff164k-100000.pth'
]
if os.path.exists('models') == False:
    os.mkdir('models')
for repo in urls:
    files = urls[repo]
    for file in files:
        url = hf_hub_url(repo, file)
        name_ckp = url.split('/')[-1]
        save_path = os.path.join('models',name_ckp)
        if os.path.exists(save_path) == False:
            subprocess.run(shlex.split(f'wget {url} -O {save_path}'))

for url in urls_mmpose:
    name_ckp = url.split('/')[-1]
    save_path = os.path.join('models',name_ckp)
    if os.path.exists(save_path) == False:
        subprocess.run(shlex.split(f'wget {url} -O {save_path}'))

device = 'cuda' if torch.cuda.is_available() else 'cpu'
model = Model_all(device)

DESCRIPTION = '''# T2I-Adapter (Sketch & Keypose & Segmentation)
[Paper](https://arxiv.org/abs/2302.08453)               [GitHub](https://github.com/TencentARC/T2I-Adapter) 

This gradio demo is for a simple experience of T2I-Adapter:
- Keypose/Sketch to Image Generation
- Image to Image Generation 
- Support the base model of Stable Diffusion v1.4 and Anything 4.0
'''

with gr.Blocks(css='style.css') as demo:
    gr.Markdown(DESCRIPTION)
    
    gr.HTML("""
    <p>For faster inference without waiting in queue, you may duplicate the space and upgrade to GPU in settings.
    <br/>
    <a href="https://huggingface.co/spaces/Adapter/T2I-Adapter?duplicate=true">
    <img style="margin-top: 0em; margin-bottom: 0em" src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a>
    <p/>""")

    with gr.Tabs():
        with gr.TabItem('Keypose'):
            create_demo_keypose(model.process_keypose)
        with gr.TabItem('Sketch'):
            create_demo_sketch(model.process_sketch)
        with gr.TabItem('Draw'):
            create_demo_draw(model.process_draw)
        with gr.TabItem('Segmentation'):
            create_demo_seg(model.process_seg)
        with gr.TabItem('Depth'):
            create_demo_depth(model.process_depth)

demo.queue().launch(debug=True, server_name='0.0.0.0')