import os,sys
osa = os.path.abspath
osd = os.path.dirname
cur_dir = osd(osa(__file__))
par_dir = osd(cur_dir)
sys.path.insert(0,par_dir)

import argparse

def parse_args():
    """解析命令行参数"""
    parser = argparse.ArgumentParser(description='示例脚本：接收CUDA设备和端口参数')
    
    # 添加参数
    parser.add_argument(
        '-c',
        '--cuda_id', 
        type=str,
        required=True,  # 必须传入
        help='CUDA设备ID，例如 "0" 或 "0,1"（字符串类型）'
    )
    parser.add_argument(
        '-p',
        '--port',
        type=int,
        default=20025,  # 默认值
        help='端口号（整数类型，默认8000）'
    )
    
    # 解析参数
    args = parser.parse_args()
    return args

args = parse_args()

import os,sys,pdb
os.environ['CUDA_VISIBLE_DEVICES']=args.cuda_id

from utils.util_for_os import ose,osj
import utils.util_for_huggingface 

# Tagbliton/kontext_extract_clothes_lora
par_lora_dir = '/mnt/nas/shengjie/posenv_output_0918'
lora_dir = lambda id: f'{par_lora_dir}/checkpoint-{id}'
lora_name = 'pytorch_lora_weights.safetensors'

def get_lora_path(lora_dir , lora_name):
    return osj( lora_dir , lora_name )

import torch
from diffusers import FluxKontextPipeline
from PIL import Image

import gc

from utils.MODEL_CKP import FLUX_KONTEXT
from utils.util_flux import process_img_1024,vertical_concat_images,horizontal_concat_images

pipe = FluxKontextPipeline.from_pretrained(FLUX_KONTEXT, 
                                           torch_dtype=torch.bfloat16)
pipe.to("cuda")

id = 1500

lora_path = get_lora_path( lora_dir(id) , lora_name )

pipe.load_lora_weights(lora_path)

def val_transformer(prom, input_img, steps):
    pipeline = pipe

    torch.cuda.empty_cache()
    gc.collect()

    device = pipeline.device
    weight_dtype = pipeline.dtype

    autocast_ctx = torch.autocast(device.type, weight_dtype)

    img_ori_pil = process_img_1024('', input_img, target_shape=(2048, 1024))  # 输入

    with autocast_ctx:
        image = pipeline(
            prompt=prom,
            image=img_ori_pil,
            num_inference_steps=steps,
            guidance_scale=3.5,
            generator=None,
            max_sequence_length=512,
            height=img_ori_pil.height,
            width=img_ori_pil.width,
        ).images[0]

    image = process_img_1024('', image, img_ori_pil.size,)

    return image

import gradio as gr

def infer_fn(prompt, input_img, steps):
    if input_img is None or prompt.strip() == "":
        return None
    return val_transformer(prompt, input_img, steps)

def start_gradio():

    with gr.Blocks() as demo:
        gr.Markdown("# FLUX Kontext PoseEnv Demo")
        with gr.Row():
            with gr.Column():
                prompt_in = gr.Textbox(label="Prompt", lines=2, placeholder="Enter your prompt here")
                img_in = gr.Image(label="Input Image", height=512, type="pil")
                steps_in = gr.Slider(label="Steps", minimum=1, maximum=100, value=30, step=1)
                submit_btn = gr.Button("Submit")
            with gr.Column():
                img_out = gr.Image(label="Output Image", height=512, type="pil")

        submit_btn.click(
            fn=infer_fn,
            inputs=[prompt_in, img_in, steps_in],
            outputs=img_out
        )

    demo.launch(server_name='0.0.0.0', server_port=args.port)


if __name__ == "__main__":
    start_gradio()