import os
os.environ['CUDA_VISIBLE_DEVICES'] = '1'
import gradio as gr
from diffusers import FluxKontextPipeline
import torch,gc
from PIL import Image
from util_flux import process_img_1024, vertical_concat_images, horizontal_concat_images
from util_for_os import osj, ose
from MODEL_CKP import FLUX_KONTEXT,FLUX_REDUX


# Constants
dtype = torch.bfloat16
device = 'cuda'

# 全局变量记录当前 CKP
current_ckp = None
pipe = None

def load_model_if_needed(ckp_id):
    global current_ckp, pipe
    
    # 如果 CKP 未变化且模型已加载，则跳过
    if current_ckp == ckp_id and pipe is not None:
        print(f"CKP {ckp_id} 已加载，跳过重复加载")
        return pipe
    
    # 清理旧模型
    if pipe is not None:
        del pipe
        torch.cuda.empty_cache()
        gc.collect()
    
    print(f"正在加载 CKP {ckp_id}...")
    lora_path = get_lora_path(ckp_id)
    pipe = FluxKontextPipeline.from_pretrained(
        FLUX_KONTEXT, 
        torch_dtype=torch.bfloat16
    ).to("cuda")
    pipe.load_lora_weights(lora_path)
    
    current_ckp = ckp_id  # 更新当前 CKP
    return pipe


def get_lora_path(ckp_id):
    return f'/mnt/nas/shengjie/tryoff_output20250717/checkpoint-{ckp_id}000/pytorch_lora_weights.safetensors'


def process_image(input_image, ckp_id, garment_type):
    try:
        # 智能加载模型（有缓存机制）
        pipe = load_model_if_needed(ckp_id)
        
        # 处理图像（原代码逻辑）
        # Process the input image
        control_image = process_img_1024(input_image)
        
        # Define prompts based on garment type
        prompt = f'An image of a garment. Focus on the {garment_type} garment the human wearing, and isolate it from the rest.'
        prompt2 = f'An image of a garment. Focus on the {garment_type} garment the human wearing, and isolate it from the rest. A high-quality product photo of the {garment_type} garment on a white background, centered, no model, studio lighting.'
        
        # Generate output
        with torch.no_grad():
            result_image = pipe(
                prompt=prompt,
                prompt_2=prompt2,
                image=control_image,
                height=control_image.height,
                width=control_image.width,
                num_inference_steps=20,
                guidance_scale=4.5,
                generator=torch.Generator().manual_seed(20250718),
            ).images[0]
            
        return horizontal_concat_images([control_image, result_image]), result_image
        
    except Exception as e:
        print(f"Error: {e}")
        return None, None


# Create Gradio interface
with gr.Blocks() as demo:
    gr.Markdown("# Garment Extraction Tool")
    gr.Markdown("Upload an image and select the garment type to extract")
    
    with gr.Row():
        with gr.Column():
            image_input = gr.Image(type="filepath", label="Upload Image")
            ckp_select = gr.Dropdown(
                choices=list(range(13, 29)),
                label="Checkpoint Selection",
                value=13
            )
            
            with gr.Row():
                upper_btn = gr.Button("Extract Upper")
                lower_btn = gr.Button("Extract Lower")
                full_btn = gr.Button("Extract Full Body")
        
        with gr.Column():
            comparison_output = gr.Image(label="Comparison (Original vs Result)")
            result_output = gr.Image(label="Extracted Garment")
    
    # Define button actions
    upper_btn.click(
        fn=lambda img, ckp: process_image(img, ckp, "upper"),
        inputs=[image_input, ckp_select],
        outputs=[comparison_output, result_output]
    )
    
    lower_btn.click(
        fn=lambda img, ckp: process_image(img, ckp, "lower"),
        inputs=[image_input, ckp_select],
        outputs=[comparison_output, result_output]
    )
    
    full_btn.click(
        fn=lambda img, ckp: process_image(img, ckp, "fullbody"),
        inputs=[image_input, ckp_select],
        outputs=[comparison_output, result_output]
    )

if __name__ == "__main__":
    demo.launch(    
        server_port=20018,          # 自定义端口
        server_name="0.0.0.0",    # 允许外部访问
        share=False,              # 不生成公开分享链接
        debug=True,                # 调试模式
        prevent_thread_lock=True, # 不阻塞主线程
        )