#!/usr/bin/env python from __future__ import annotations import os import pathlib import shlex import subprocess import gradio as gr # base_url = 'https://huggingface.co/lllyasviel/ControlNet/resolve/main/annotator/ckpts/' # names = [ # 'body_pose_model.pth', # 'dpt_hybrid-midas-501f0c75.pt', # 'hand_pose_model.pth', # 'mlsd_large_512_fp32.pth', # 'mlsd_tiny_512_fp32.pth', # 'network-bsds500.pth', # 'upernet_global_small.pth', # ] # for name in names: # command = f'wget https://huggingface.co/lllyasviel/ControlNet/resolve/main/annotator/ckpts/{name} -O {name}' # out_path = pathlib.Path(f'ControlNet/annotator/ckpts/{name}') # if out_path.exists(): # continue # subprocess.run(shlex.split(command), cwd='ControlNet/annotator/ckpts/') from gradio_sketch import create_demo as create_demo_sketch from gradio_pose import create_demo as create_demo_pose from gradio_seg import create_demo as create_demo_seg from model import Model MAX_IMAGES = 1 description = '''This is an unofficial demo for T2I-Adapter. [Paper](https://arxiv.org/abs/2302.08453) [GitHub](https://github.com/TencentARC/T2I-Adapter) ''' if (SPACE_ID := os.getenv('SPACE_ID')) is not None: description += f'''

For faster inference without waiting in queue, you may duplicate the space and upgrade to GPU in settings.
Duplicate Space

''' model = Model() with gr.Blocks(css='style.css') as demo: gr.Markdown("## T2I Adapter: Learning Adapters to Dig out More Controllable Ability for Text-to-Image Diffusion Models.") gr.Markdown(description) with gr.Tabs(): with gr.TabItem('Sketch'): create_demo_sketch(model.process_sketch) with gr.TabItem('Pose'): create_demo_pose(model.process_pose) with gr.TabItem('Segmentation'): create_demo_seg(model.process_seg) demo.queue(api_open=False).launch()