import gradio as gr
import torch
import os
from torchvision import transforms
from peft import PeftModel
# from model_demo3 import build_llm_model, build_clip_model, MultiModalTaggingModel  
from model_ChineseClip_Q3 import *
from concurrent.futures import ThreadPoolExecutor
from torchvision.transforms.functional import InterpolationMode
from model_old.model_ChineseCLIPSimilarityCalculator import *
# 确保中文显示正常
import matplotlib.pyplot as plt
plt.rcParams["font.family"] = ["SimHei", "WenQuanYi Micro Hei", "Heiti TC"]

# 全局变量
model = None
tokenizer = None
current_llm_lora = None
current_clip_lora = None
current_proj_layer = None
device = "cuda" if torch.cuda.is_available() else "cpu"

executor = ThreadPoolExecutor(max_workers=1)


def load_models_async(llm_lora_path, clip_lora_path, proj_layer_path):
    future = executor.submit(load_models, llm_lora_path, clip_lora_path, proj_layer_path)
    return future.result()


def load_best_model(llm_path, clip_path, llm_lora_path, clip_lora_path, proj_layer_path):
    """加载模型（与测试代码逻辑一致）"""
    llm_model, tokenizer = build_llm_model(llm_path)
    clip_model, clip_processor = build_clip_model(clip_path)
    similarity_calculator = ChineseCLIPSimilarityCalculator(
            clip_model_path="pth/chinese-clip-large14",
            device="cuda" if torch.cuda.is_available() else "cpu"
        )
    model = MultiModalTaggingModel(
        clip_model=clip_model,
        llm_model=llm_model,
        tokenizer=tokenizer,
        processor=clip_processor,
        similarity_calculator =similarity_calculator
    )
    
    if os.path.exists(llm_lora_path):
        model.llm = PeftModel.from_pretrained(model.llm, llm_lora_path)
    else:
        raise FileNotFoundError(f"LLM LoRA路径不存在: {llm_lora_path}")
    
    if os.path.exists(clip_lora_path):
        model.clip = PeftModel.from_pretrained(model.clip, clip_lora_path)
    else:
        raise FileNotFoundError(f"CLIP LoRA路径不存在: {clip_lora_path}")
    
    if os.path.exists(proj_layer_path):
        model.clip2llm_proj.load_state_dict(torch.load(proj_layer_path))
    else:
        raise FileNotFoundError(f"投影层路径不存在: {proj_layer_path}")
    
    return model, tokenizer


def load_models(llm_lora_path, clip_lora_path, proj_layer_path):
    global model, tokenizer, current_llm_lora, current_clip_lora, current_proj_layer
    try:
        base_llm_path = "pth/Qwen3-1.7B"
        base_clip_path = "pth/chinese-clip-large14"
        
        model, tokenizer = load_best_model(
            llm_path=base_llm_path,
            clip_path=base_clip_path,
            llm_lora_path=llm_lora_path,
            clip_lora_path=clip_lora_path,
            proj_layer_path=proj_layer_path
        )
        model.to(device)
        model.eval()
        
        current_llm_lora = llm_lora_path
        current_clip_lora = clip_lora_path
        current_proj_layer = proj_layer_path
        
        return "模型加载成功: 三参数均已加载"
    
    except Exception as e:
        return f"加载失败: {str(e)}"


# 图像预处理 - 使用与测试代码完全一致的processor
def preprocess_image(image):
    global model
    if model is None:
        return None
    
    # 使用完整的processor（包含标准化），与测试代码一致
    inputs = model.processor(images=image, return_tensors="pt").to(device)
    return inputs["pixel_values"]  # 返回标准化后的张量


def generate_tags(image, llm_lora_path, clip_lora_path, proj_layer_path):
    global model, tokenizer, current_llm_lora, current_clip_lora, current_proj_layer
    if model is None:
        return "请先加载模型参数", ""
    
    # 参数变化时重新加载
    if (llm_lora_path != current_llm_lora or 
        clip_lora_path != current_clip_lora or 
        proj_layer_path != current_proj_layer):
        status = load_models(llm_lora_path, clip_lora_path, proj_layer_path)
        if "失败" in status:
            return status, ""
    
    try:
        processed_img = preprocess_image(image)
        if processed_img is None:
            return "图像预处理失败", ""
        
        with torch.no_grad():
            # 模型返回五元素：(filtered_tags, filtered_sims, all_sims, near_miss_tags, near_miss_sims)
            # 获取模型输出
            model_output = model(images=processed_img, is_training=False, top_k=3)
            filtered_tags_all, filtered_sims_all, all_sims_all, near_miss_tags_all, near_miss_sims_all = model_output

            # 打印过滤后的标签及其相似度
            print("===== 过滤后的标签及其相似度 =====")
            for i, (tags_str, sims) in enumerate(zip(filtered_tags_all, filtered_sims_all)):
                print(f"样本 {i+1}:")
                print(f"标签: {tags_str}")
                print(f"对应的相似度: {sims}")
                print("---")

            # 打印接近阈值的标签及其相似度
            print("\n===== 接近阈值的标签及其相似度 =====")
            for i, (tags_str, sims) in enumerate(zip(near_miss_tags_all, near_miss_sims_all)):
                print(f"样本 {i+1}:")
                print(f"标签: {tags_str}")
                print(f"对应的相似度: {sims}")
                print("---")
            
            # 处理生成的标签（filtered_tags_all）
            if isinstance(filtered_tags_all, list) and filtered_tags_all:
                # 取第一个batch的标签（假设单张图片）
                tags_str = filtered_tags_all[0] if filtered_tags_all[0] else "未生成有效标签"
            else:
                tags_str = "未生成有效标签"
            
            # 处理接近阈值但被过滤的标签（near_miss_tags_all）
            if isinstance(near_miss_tags_all, list) and near_miss_tags_all:
                # 取第一个batch的接近阈值标签
                near_miss_tags = near_miss_tags_all[0].split(",")  # 按逗号拆分
                # 过滤空字符串（避免分割后残留）
                near_miss_tags = [tag.strip() for tag in near_miss_tags if tag.strip()]
                near_miss_tags_str = ", ".join(near_miss_tags) if near_miss_tags else "无接近阈值标签"
            else:
                near_miss_tags_str = "无接近阈值标签"
            
            return tags_str, near_miss_tags_str
    
    except Exception as e:
        import traceback
        print(traceback.format_exc())
        return f"生成失败: {str(e)}", ""

def scan_params_from_dir(root_dir):
    llm_lora_paths = []
    clip_lora_paths = []
    proj_layer_paths = []
    
    if not os.path.exists(root_dir):
        return llm_lora_paths, clip_lora_paths, proj_layer_paths
    
    for root, dirs, files in os.walk(root_dir):
        if "adapter_config.json" in files:
            if "llm_lora" in root.lower():
                llm_lora_paths.append(root)
            elif "clip_lora" in root.lower():
                clip_lora_paths.append(root)
        
        for file in files:
            if file.endswith(".pt") and "proj" in file.lower():
                proj_layer_paths.append(os.path.join(root, file))
    
    return llm_lora_paths, clip_lora_paths, proj_layer_paths


def create_interface():
    root_dir = "output_model_chineseclip_3-1.7B"
    llm_lora_paths, clip_lora_paths, proj_layer_paths = scan_params_from_dir(root_dir)
    
    llm_lora_paths = ["请选择LLM LoRA适配器"] + llm_lora_paths if llm_lora_paths else ["未找到LLM LoRA适配器"]
    clip_lora_paths = ["请选择CLIP LoRA适配器"] + clip_lora_paths if clip_lora_paths else ["未找到CLIP LoRA适配器"]
    proj_layer_paths = ["请选择投影层参数"] + proj_layer_paths if proj_layer_paths else ["未找到投影层参数"]
    
    with gr.Blocks(title="工装标签生成器") as interface:
        gr.Markdown("# 工装图像标签生成器")
        gr.Markdown(f"从目录 `{root_dir}` 中选择三个参数，生成图像标签")
        
        with gr.Row():
            with gr.Column(scale=1):
                image_input = gr.Image(type="pil", label="上传图像")
                llm_lora = gr.Dropdown(choices=llm_lora_paths, label="LLM LoRA适配器", value=llm_lora_paths[0])
                clip_lora = gr.Dropdown(choices=clip_lora_paths, label="CLIP LoRA适配器", value=clip_lora_paths[0])
                proj_layer = gr.Dropdown(choices=proj_layer_paths, label="投影层参数（.pt）", value=proj_layer_paths[0])
                load_btn = gr.Button("加载参数", variant="secondary")
                generate_btn = gr.Button("生成标签", variant="primary")
                
            with gr.Column(scale=1):
                status_output = gr.Textbox(label="加载状态")
                tags_output = gr.Textbox(label="生成的标签")
                near_miss_tags_output = gr.Textbox(label="可选其他标签")
        
        load_btn.click(
            fn=load_models_async,
            inputs=[llm_lora, clip_lora, proj_layer],
            outputs=[status_output]
        )
        
        generate_btn.click(
            fn=generate_tags,
            inputs=[image_input, llm_lora, clip_lora, proj_layer],
            outputs=[tags_output, near_miss_tags_output]
        )
    
    return interface


if __name__ == "__main__":
    interface = create_interface()
    interface.launch(server_name="192.168.1.163", server_port=7880)