import gradio as gr import os from pathlib import Path import sys import torch from PIL import Image, ImageOps from utils_ootd import get_mask_location PROJECT_ROOT = Path(__file__).absolute().parents[1].absolute() sys.path.insert(0, str(PROJECT_ROOT)) from preprocess.openpose.run_openpose import OpenPose from preprocess.humanparsing.run_parsing import Parsing from ootd.inference_ootd_hd import OOTDiffusionHD from ootd.inference_ootd_dc import OOTDiffusionDC openpose_model_hd = OpenPose(0) parsing_model_hd = Parsing(0) ootd_model_hd = OOTDiffusionHD(0) openpose_model_dc = OpenPose(1) parsing_model_dc = Parsing(1) ootd_model_dc = OOTDiffusionDC(1) category_dict = ['upperbody', 'lowerbody', 'dress'] category_dict_utils = ['upper_body', 'lower_body', 'dresses'] example_path = os.path.join(os.path.dirname(__file__), 'examples') model_hd = os.path.join(example_path, 'model/model_1.png') garment_hd = os.path.join(example_path, 'garment/03244_00.jpg') model_dc = os.path.join(example_path, 'model/model_8.png') garment_dc = os.path.join(example_path, 'garment/048554_1.jpg') import spaces @spaces.GPU def process_hd(vton_img, garm_img, n_samples, n_steps, image_scale, seed): model_type = 'hd' category = 0 # 0:upperbody; 1:lowerbody; 2:dress with torch.no_grad(): openpose_model_hd.preprocessor.body_estimation.model.to('cuda') ootd_model_hd.pipe.to('cuda') ootd_model_hd.image_encoder.to('cuda') ootd_model_hd.text_encoder.to('cuda') garm_img = Image.open(garm_img).resize((768, 1024)) vton_img = Image.open(vton_img).resize((768, 1024)) keypoints = openpose_model_hd(vton_img.resize((384, 512))) model_parse, _ = parsing_model_hd(vton_img.resize((384, 512))) mask, mask_gray = get_mask_location(model_type, category_dict_utils[category], model_parse, keypoints) mask = mask.resize((768, 1024), Image.NEAREST) mask_gray = mask_gray.resize((768, 1024), Image.NEAREST) masked_vton_img = Image.composite(mask_gray, vton_img, mask) images = ootd_model_hd( model_type=model_type, category=category_dict[category], image_garm=garm_img, image_vton=masked_vton_img, mask=mask, image_ori=vton_img, num_samples=n_samples, num_steps=n_steps, image_scale=image_scale, seed=seed, ) return images @spaces.GPU def process_dc(vton_img, garm_img, category): model_type = 'dc' if category == 'Upper-body': category = 0 elif category == 'Lower-body': category = 1 else: category =2 with torch.no_grad(): openpose_model_dc.preprocessor.body_estimation.model.to('cuda') ootd_model_dc.pipe.to('cuda') ootd_model_dc.image_encoder.to('cuda') ootd_model_dc.text_encoder.to('cuda') garm_img = Image.open(garm_img).resize((768, 1024)) vton_img = Image.open(vton_img).resize((768, 1024)) keypoints = openpose_model_dc(vton_img.resize((384, 512))) model_parse, _ = parsing_model_dc(vton_img.resize((384, 512))) mask, mask_gray = get_mask_location(model_type, category_dict_utils[category], model_parse, keypoints) mask = mask.resize((768, 1024), Image.NEAREST) mask_gray = mask_gray.resize((768, 1024), Image.NEAREST) masked_vton_img = Image.composite(mask_gray, vton_img, mask) images = ootd_model_dc( model_type=model_type, category=category_dict[category], image_garm=garm_img, image_vton=masked_vton_img, mask=mask, image_ori=vton_img, num_samples=1, num_steps=20, image_scale=2.0, seed=-1, ) return images block = gr.Blocks().queue() with block: with gr.Row(): gr.Markdown("## Virtual Trial Room") with gr.Row(): gr.Markdown("*** Note :- Please Select Garment Type in below drop-down as upper-body/lower-body/dresses;***") with gr.Row(): with gr.Column(): vton_img_dc = gr.Image(label="Model", sources='upload', type="filepath", height=384, width=300) example = gr.Examples( label="Select for Upper/Lower Body", inputs=vton_img_dc, examples_per_page=7, examples=[ os.path.join(example_path, 'model/model_8.png'), os.path.join(example_path, 'model/049713_0.jpg'), ] ) example = gr.Examples( label="Select for Full Body Dress", inputs=vton_img_dc, examples_per_page=7, examples=[ os.path.join(example_path, 'model/model_9.png'), os.path.join(example_path, 'model/053514_0.jpg'), ] ) with gr.Column(): garm_img_dc = gr.Image(label="Garment", sources='upload', type="filepath", height=384, width=300) category_dc = gr.Dropdown(label="Garment category (important option!!!)", choices=["Upper-body", "Lower-body", "Dress"], value="Upper-body") example = gr.Examples( label="Examples (upper-body)", inputs=garm_img_dc, examples_per_page=7, examples=[ os.path.join(example_path, 'garment/01260_00.jpg'), os.path.join(example_path, 'garment/01430_00.jpg'), os.path.join(example_path, 'garment/02783_00.jpg'), os.path.join(example_path, 'garment/03751_00.jpg'), os.path.join(example_path, 'garment/06429_00.jpg'), os.path.join(example_path, 'garment/06802_00.jpg'), os.path.join(example_path, 'garment/07429_00.jpg'), os.path.join(example_path, 'garment/08348_00.jpg'), os.path.join(example_path, 'garment/09933_00.jpg'), os.path.join(example_path, 'garment/11028_00.jpg'), os.path.join(example_path, 'garment/11351_00.jpg'), os.path.join(example_path, 'garment/11791_00.jpg'), os.path.join(example_path, 'garment/048554_1.jpg'), os.path.join(example_path, 'garment/049920_1.jpg'), os.path.join(example_path, 'garment/049965_1.jpg'), os.path.join(example_path, 'garment/049949_1.jpg'), os.path.join(example_path, 'garment/050181_1.jpg'), os.path.join(example_path, 'garment/049805_1.jpg'), os.path.join(example_path, 'garment/050105_1.jpg'), ] ) example = gr.Examples( label="Examples (lower-body)", inputs=garm_img_dc, examples_per_page=7, examples=[ os.path.join(example_path, 'garment/051827_1.jpg'), os.path.join(example_path, 'garment/051946_1.jpg'), os.path.join(example_path, 'garment/051473_1.jpg'), os.path.join(example_path, 'garment/051515_1.jpg'), os.path.join(example_path, 'garment/051517_1.jpg'), os.path.join(example_path, 'garment/051988_1.jpg'), os.path.join(example_path, 'garment/051412_1.jpg'), ] ) example = gr.Examples( label="Examples (dress)", inputs=garm_img_dc, examples_per_page=7, examples=[ os.path.join(example_path, 'garment/053290_1.jpg'), os.path.join(example_path, 'garment/053744_1.jpg'), os.path.join(example_path, 'garment/053742_1.jpg'), os.path.join(example_path, 'garment/053786_1.jpg'), os.path.join(example_path, 'garment/053790_1.jpg'), os.path.join(example_path, 'garment/053319_1.jpg'), os.path.join(example_path, 'garment/052234_1.jpg'), ] ) with gr.Column(): result_gallery_dc = gr.Gallery(label='Output', show_label=False, elem_id="gallery", preview=True, scale=1) with gr.Column(): run_button_dc = gr.Button(value="Run") ips_dc = [vton_img_dc, garm_img_dc, category_dc] run_button_dc.click(fn=process_dc, inputs=ips_dc, outputs=[result_gallery_dc]) block.launch()