import torch
import os
import yaml
from tqdm import tqdm
from torch.optim import AdamW
from accelerate import Accelerator
from model_old.model_demo2 import *
from data.balancedDataset import get_dataloaders
from transformers import get_scheduler
from safetensors.torch import load_file as load_safetensors
from peft import PeftModel
import json

def test(model, dataloader, accelerator):
    model.eval()
    # 存储每个样本的详细信息（新增图片ID）
    sample_results = []
    # 存储全局指标计算所需数据
    all_predictions = []
    all_references = []

    for batch in tqdm(dataloader, desc="测试", disable=not accelerator.is_main_process):
        images = batch["images"]
        prompts = batch["prompts"]
        tags = batch["tags"]
        # 提取图片ID（假设数据集中的键为"image_id"，根据实际情况修改）
        image_ids = batch.get("image_id", [f"unknown_{i}" for i in range(len(images))])  # 兜底处理

        with torch.no_grad():
            # 生成标签
            # generated_ids = model(images, prompts, is_training=False,top_k=5)
            # generated_tags = model.tokenizer.batch_decode(generated_ids, skip_special_tokens=True)

            
            generated_tags = model(images, prompts, is_training=False, top_k=5)  # 直接得到字符串列表
            # 统一标签格式（中英文逗号转换、去空）
            generated_tags = [t.replace('，', ',').strip() for t in generated_tags]
            true_tags = [t.replace('，', ',').strip() for t in tags]
            
            # 收集单个样本的结果（包含图片ID、真实标签、预测标签）
            for img_id, pred, true in zip(image_ids, generated_tags, true_tags):
                sample_results.append({
                    "image_id": img_id,
                    "true_tags": true,
                    "predicted_tags": pred
                })
            
            # 收集全局指标数据
            all_predictions.extend(generated_tags)
            all_references.extend(true_tags)
    
    # 计算指标
    metrics = calculate_metrics(all_predictions, all_references)
    
    # 整理最终结果
    results = {
        "metrics": metrics,
        "samples": sample_results  # 每个样本的详细信息（含ID、真实标签、预测标签）
    }
    
    return results

def calculate_metrics(predictions, references):
    """计算评估指标（保持不变）"""
    total_tp, total_fp, total_fn = 0, 0, 0
    exact_match_count = 0
    
    for pred_str, ref_str in zip(predictions, references):
        pred_set = set(t.strip() for t in pred_str.split(',') if t.strip())
        ref_set = set(t.strip() for t in ref_str.split(',') if t.strip())
        
        total_tp += len(pred_set & ref_set)
        total_fp += len(pred_set - ref_set)
        total_fn += len(ref_set - pred_set)
        
        if pred_set == ref_set:
            exact_match_count += 1

    # 避免除零
    micro_precision = total_tp / (total_tp + total_fp) if (total_tp + total_fp) > 0 else 0
    micro_recall = total_tp / (total_tp + total_fn) if (total_tp + total_fn) > 0 else 0
    micro_f1 = 2 * micro_precision * micro_recall / (micro_precision + micro_recall) if (micro_precision + micro_recall) > 0 else 0
    exact_match_accuracy = exact_match_count / len(predictions) if predictions else 0

    return {
        "exact_match_accuracy": exact_match_accuracy,
        "micro_precision": micro_precision,
        "micro_recall": micro_recall,
        "micro_f1": micro_f1
    }

def load_best_model(config):
    """加载保存的最佳模型（保持不变）"""
    llm_model, tokenizer = build_llm_model(config.baichuan_path)
    clip_model, clip_processor = build_clip_model(config.clip_path)
    
    model = MultiModalTaggingModel(
        clip_model=clip_model,
        llm_model=llm_model,
        tokenizer=tokenizer,
        processor=clip_processor
    )
    
    # 加载LLM LoRA权重
    lora_path = os.path.join(config.output_dir, "llm_lorabylast/llm_lora_adapter")
    model.llm = PeftModel.from_pretrained(model.llm, lora_path)
    
    # 加载CLIP LoRA权重
    if os.path.exists(os.path.join(config.output_dir, "clip_lorabylast/clip_lora_adapter")):
        model.clip = PeftModel.from_pretrained(model.clip, os.path.join(config.output_dir, "clip_lorabylast/clip_lora_adapter"))
    
    # 加载投影层权重
    linear_layer_path = os.path.join(config.output_dir, "clip2llm_proj_bylast.pt")
    model.clip2llm_proj.load_state_dict(torch.load(linear_layer_path))
    
    return model, tokenizer

def run_test(config):
    """运行完整的测试流程（保持不变，仅修改结果保存逻辑）"""
    accelerator = Accelerator(mixed_precision="bf16")
    model, tokenizer = load_best_model(config)
    
    # 获取测试数据加载器
    processor = model.processor
    _, _, test_loader = get_dataloaders(config, processor, tokenizer)
        
    model, test_loader = accelerator.prepare(model, test_loader)
    results = test(model, test_loader, accelerator)
    
    # 在主进程上处理结果
    if accelerator.is_main_process:
        print("\n--- 测试结果 ---")
        for metric, value in results["metrics"].items():
            print(f"{metric}: {value:.4f}")
        
        # 保存详细结果（包含每个样本的ID、真实标签、预测标签）
        output_path = os.path.join(config.output_dir, "test_resultsbylast.json")
        with open(output_path, "w", encoding="utf-8") as f:
            json.dump(results, f, ensure_ascii=False, indent=2)
        
        print(f"详细结果已保存到: {output_path}")
    
    return results

if __name__ == "__main__":
    torch.cuda.empty_cache()
    with open('Gongzhuang/config_GZ.yaml', 'r') as f:
        config_data = yaml.safe_load(f)

    class Config:
        def __init__(self, **entries):
            self.__dict__.update(entries)
            # 默认参数
            self.num_workers = entries.get('num_workers', 4)
            self.output_dir = entries.get('output_dir', 'output_model_6_6_128_3w_10/')
            self.gradient_accumulation_steps = entries.get('gradient_accumulation_steps', 16)
            self.warmup_steps = entries.get('warmup_steps', 120)

    config = Config(** config_data)
    os.makedirs(config.output_dir, exist_ok=True)
    print("\n[INFO] Starting final test...")
    run_test(config)
    print("\n[INFO] ------------END------------")