import os,sys
osa = os.path.abspath
osd = os.path.dirname
cur_dir = osd(osa(__file__))
par_dir = osd(cur_dir)
sys.path.insert(0,par_dir)

from util_for_argparse import get_cuda_port_args

args = get_cuda_port_args()


import os,sys
os.environ['CUDA_VISIBLE_DEVICES']=args.cuda

from utils.util_for_os import osj

# Tagbliton/kontext_extract_clothes_lora
par_lora_dir = lambda clo_type: f'/mnt/nas/shengjie/extractclo_{clo_type}_rank16_output_0923'
lora_dir = lambda par_lora_dir, id: f'{par_lora_dir}/checkpoint-{id}'
lora_name = 'pytorch_lora_weights.safetensors'
lora_id = 5000

def get_lora_path(lora_dir , lora_name):
    return osj( lora_dir , lora_name )


import torch
from diffusers import FluxKontextPipeline

import gc

from utils.MODEL_CKP import FLUX_KONTEXT
from utils.util_flux import process_img_1024

pipeline = FluxKontextPipeline.from_pretrained(FLUX_KONTEXT, 
                                           torch_dtype=torch.bfloat16)
pipeline.to("cuda")

cur_type = None
get_prompt = lambda clo_type: f"[{clo_type}tryoff] tryoff {clo_type} clothing from control image"

def inference_one_img(  extra_type='',
                        img_input=None,):

    # pdb.set_trace()
    target_shape = (1024, 1024) # w h

    torch.cuda.empty_cache()
    gc.collect()
        
    global cur_type, lora_id
    if extra_type != cur_type:
        lora_path = get_lora_path( lora_dir(par_lora_dir(extra_type), lora_id) , 
                                    lora_name )

        pipeline.load_lora_weights(lora_path)
        
        cur_type = extra_type

    device = pipeline.device
    weight_dtype = pipeline.dtype

    autocast_ctx = torch.autocast(device.type, weight_dtype)

    prom = get_prompt(extra_type)

    img_ori_pil = process_img_1024('',img_pil=img_input, target_shape=target_shape)

    with autocast_ctx:
        image = pipeline(
            prompt=prom,
            image=img_ori_pil,
            num_inference_steps=20,
            guidance_scale=2.5,
            generator=torch.Generator(device=device).manual_seed(42),
            max_sequence_length=512,
            height=img_ori_pil.height,
            width=img_ori_pil.width,
        ).images[0]
    
    image = process_img_1024( '', image, 
                            img_ori_pil.size,)

    return image

import gradio as gr

import os
os.environ['GRADIO_TEMP_DIR'] = '/mnt/nas/shengjie/tmp'


# 定义可选类型（比如 'upper', 'lower', 'dresses' 等）
CLO_TYPES = ['upper', 'lower', 'dresses']

def gradio_inference(extra_type, img_input):
    # img_input: PIL Image, extra_type: string
    return inference_one_img(extra_type=extra_type, img_input=img_input)

def start_gradio(port):
    with gr.Blocks() as demo:
        gr.Markdown("# FluxKontext Try-on Demo\n上传一张control image，并选择服饰类型，点击`生成`预览试穿效果。")
        with gr.Row():
            with gr.Column():
                img_input = gr.Image(label="Control Image",height=1024,width=1024, type="pil")
                extra_type = gr.Dropdown(
                    choices=CLO_TYPES,
                    value=CLO_TYPES[0],
                    label="服饰类型 extra_type"
                )
                submit_btn = gr.Button(value="生成")
            with gr.Column():
                img_output = gr.Image(label="生成的试穿效果",height=1024,width=1024,type='pil')

        submit_btn.click(fn=gradio_inference, inputs=[extra_type, img_input], outputs=img_output)
    demo.launch(server_name="0.0.0.0", server_port=port)

if __name__ == "__main__":
    start_gradio(args.port)