#!/usr/bin/env python from __future__ import annotations import os import pathlib import shlex import subprocess import gradio as gr if os.getenv('SYSTEM') == 'spaces': with open('patch') as f: subprocess.run(shlex.split('patch -p1'), stdin=f, cwd='ControlNet') base_url = 'https://huggingface.co/lllyasviel/ControlNet/resolve/main/annotator/ckpts/' names = [ 'body_pose_model.pth', 'dpt_hybrid-midas-501f0c75.pt', 'hand_pose_model.pth', 'mlsd_large_512_fp32.pth', 'mlsd_tiny_512_fp32.pth', 'network-bsds500.pth', 'upernet_global_small.pth', ] for name in names: command = f'wget https://huggingface.co/lllyasviel/ControlNet/resolve/main/annotator/ckpts/{name} -O {name}' out_path = pathlib.Path(f'ControlNet/annotator/ckpts/{name}') if out_path.exists(): continue subprocess.run(shlex.split(command), cwd='ControlNet/annotator/ckpts/') from app_depth import create_demo as create_demo_depth from model import Model, download_all_controlnet_weights DESCRIPTION = '# RoomGPT
Redesign your room using the power of AI
' SPACE_ID = os.getenv('SPACE_ID') MAX_IMAGES = 3 DEFAULT_NUM_IMAGES = min(MAX_IMAGES,1) if os.getenv('SYSTEM') == 'spaces': download_all_controlnet_weights() DEFAULT_MODEL_ID = os.getenv('DEFAULT_MODEL_ID', 'runwayml/stable-diffusion-v1-5') model = Model(base_model_id=DEFAULT_MODEL_ID) with gr.Blocks(css='style.css') as demo: gr.Markdown(DESCRIPTION) create_demo_depth(model.process_depth, max_images=MAX_IMAGES, default_num_images=DEFAULT_NUM_IMAGES) demo.queue(api_open=False).launch(file_directories=['/tmp'])