# --- 训练、验证、测试函数 (保持不变，已在上一轮修改中适配) ---
import torch
import os
import yaml
from tqdm import tqdm
from torch.optim import AdamW
from accelerate import Accelerator
# from model_demo3 import build_multimodal_model
# from model_chinese_clip import build_multimodal_model
from model_ChineseClip_Q3 import *
# from data.balancedDataset import *
from data.MultiModalTaggingDataset import *
from transformers import get_scheduler
import random
from safetensors.torch import load_file as load_safetensors
import gc
import torch.nn.functional as F
import logging
import re
def filter_qwen2_cache_warnings():
    """递归为所有transformers子日志器添加Qwen2缓存警告过滤器"""
    # 定义过滤关键词（精确匹配警告核心内容）
    pattern = re.compile(r"Caching is incompatible with gradient checkpointing")
    
    # 自定义过滤器：匹配关键词则过滤
    class Qwen2Filter(logging.Filter):
        def filter(self, record):
            return not pattern.search(record.getMessage())
    
    # 递归获取所有transformers相关日志器并添加过滤器
    def add_filter_to_loggers(logger_name):
        logger = logging.getLogger(logger_name)
        logger.addFilter(Qwen2Filter())
        # 处理子日志器（如"transformers.models.qwen2"等）
        for child_name in logging.Logger.manager.loggerDict:
            if child_name.startswith(logger_name + "."):
                add_filter_to_loggers(child_name)
    
    # 从transformers根日志器开始递归处理
    add_filter_to_loggers("transformers")

def get_dynamic_interval(step):
    return 1

# 动态温度调整函数（训练后期进一步降低温度）
def get_dynamic_temperature(base_temp,step):
    decay_factor = 0.999
    return max(base_temp * (decay_factor ** step), 0.07)

# 动态相似度阈值（训练后期放宽阈值以增加标签多样性）
def get_dynamic_threshold(step):
    if step < 2000:
        return 0.38  # 前期严格过滤
    else:
        return 0.38 # 后期适当放宽

def train_one_epoch(model, dataloader, optimizer, lr_scheduler, accelerator,temperature=0.15):
    model.train()
    total_loss = 0
    gen_loss_sum = 0  # 新增：跟踪生成损失
    contrastive_loss_sum = 0  # 新增：跟踪对比损失
    i2t_acc_sum = 0  # 新增：跟踪图像到文本的准确率
    t2i_acc_sum = 0  # 新增：跟踪文本到图像的准确率
    step_count = 0  # 新增：有效步数计数
    torch.cuda.empty_cache()
    for step, batch in enumerate(tqdm(dataloader, desc="训练进度", disable=not accelerator.is_main_process)):
        images = batch["images"]
        prompts = batch["prompts"]
        tags = batch["tags"]

        current_interval = get_dynamic_interval(step)
        # 判断是否启用生成标签的对比监督
        use_generated = (step % current_interval == 0) and (step > 0)
        model.set_temperature(temperature)

        sim = get_dynamic_threshold(step)
        model.set_sim(sim)

        
        with accelerator.accumulate(model):
            model_output = model(images, prompts, tags=tags, is_training=True,use_generated_tags=use_generated )
            loss = model_output['loss']
            accelerator.backward(loss)

            # 梯度裁剪（原逻辑保留）
            if accelerator.sync_gradients:
                accelerator.clip_grad_norm_(model.parameters(), max_norm=1)

            optimizer.step()
            lr_scheduler.step()
            optimizer.zero_grad()
        
        # 新增：收集指标
        total_loss += loss.item()
        gen_loss_sum += model_output.get('gen_loss', 0).item()
        contrastive_loss_sum += model_output.get('contrastive_loss', 0).item()
        i2t_acc_sum += model_output.get('acc_i2t', 0).item()
        t2i_acc_sum += model_output.get('acc_t2i', 0).item()
        # dice_loss_sum += model_output.get('dice_loss', 0).item()
        step_count += 1
        
        if step % 50 == 0:
            # 新增：打印详细损失和准确率
            accelerator.print(f"[DEBUG] step {step}, loss = {loss.item():.4f}, "
                             f"gen_loss = {model_output.get('gen_loss', 0).item():.4f}, "
                             f"contrastive_loss = {model_output.get('contrastive_loss', 0).item():.4f}, "
                             f"acc_i2t = {model_output.get('acc_i2t', 0).item():.4f}, "
                             f"acc_t2i = {model_output.get('acc_t2i', 0).item():.4f}")
            
    
    
    return {
        "avg_total_loss": total_loss / step_count,
        "avg_gen_loss": gen_loss_sum / step_count,
        "avg_contrastive_loss": contrastive_loss_sum / step_count,
        "avg_i2t_acc": i2t_acc_sum / step_count,
        "avg_t2i_acc": t2i_acc_sum / step_count
    }

def evaluate(model, dataloader, accelerator):
    model.eval()
    total_loss = 0
    predictions = []          # 生成的标签
    references = []           # 真实标签
    filtered_sims_list = []   # 过滤后标签的相似度（每个样本是一个列表）
    all_sims_list = []        # 所有原始标签的相似度（每个样本是一个列表）
    near_miss_tags_list = []  # 接近阈值但被过滤的标签
    near_miss_sims_list = []  # 对应相似度

    num_debug_samples = 15
    printed_samples_count = 0 

    for batch in tqdm(dataloader, desc="验证", disable=not accelerator.is_main_process):
        images = batch["images"]
        prompts = batch["prompts"]
        tags = batch["tags"]

        model.set_temperature(0.07)
        model.set_sim(0.4)

        with torch.no_grad():
            # 1. 计算验证损失
            llm_output = model(images, prompts, tags=tags, is_training=False)
            loss = llm_output['loss']
            total_loss += loss.item()
            
            # 2. 生成标签并获取相似度（使用修改后的forward返回值）
            generated_tags, filtered_sims, all_sims, near_miss_tags, near_miss_sims = model(
                images, prompts, is_training=False, top_k=5)

            generated_tags = [t.replace('，', ',').strip() for t in generated_tags]
            tags = [t.replace('，', ',').strip() for t in tags]

            # 3. 收集结果
            predictions.extend(generated_tags)
            references.extend(tags)
            filtered_sims_list.extend(filtered_sims)  # 收集过滤后标签的相似度
            all_sims_list.extend(all_sims)            # 收集所有原始标签的相似度
            near_miss_tags_list.extend(near_miss_tags)  # 新增：收集接近阈值标签
            near_miss_sims_list.extend(near_miss_sims)  # 新增：收集对应相似度
            
            # 4. 调试打印（增加相似度显示）
            if accelerator.is_main_process and printed_samples_count < num_debug_samples:
                sample_in_batch_idx = random.randint(0, len(images) - 1)
                sample_pred = generated_tags[sample_in_batch_idx]
                sample_ref = tags[sample_in_batch_idx]
                sample_filtered_sims = filtered_sims[sample_in_batch_idx]
                sample_near_miss_tags = near_miss_tags[sample_in_batch_idx]
                sample_near_miss_sims = near_miss_sims[sample_in_batch_idx]
                
                print(f"\n--- 调试样本 {printed_samples_count + 1} ---")
                print(f"模型生成标签: {sample_pred}")
                print(f"过滤后标签的相似度: {[round(s, 4) for s in sample_filtered_sims]}")
                print(f"接近阈值但被过滤的标签: {sample_near_miss_tags}")
                print(f"对应相似度: {[round(s, 4) for s in sample_near_miss_sims]}")
                print(f"真实标签: {sample_ref}")
                printed_samples_count += 1
    
    # 计算原有指标
    avg_loss = total_loss / len(dataloader)
    total_tp, total_fp, total_fn = 0, 0, 0
    exact_match_count = 0
    
    for pred_str, ref_str in zip(predictions, references):
        pred_set = set(t.strip() for t in pred_str.split(',') if t.strip())
        ref_set = set(t.strip() for t in ref_str.split(',') if t.strip())
        
        total_tp += len(pred_set & ref_set)
        total_fp += len(pred_set - ref_set)
        total_fn += len(ref_set - pred_set)
        
        if pred_set == ref_set:
            exact_match_count += 1

    # 原有指标
    micro_precision = total_tp / (total_tp + total_fp) if (total_tp + total_fp) > 0 else 0
    micro_recall = total_tp / (total_tp + total_fn) if (total_tp + total_fn) > 0 else 0
    micro_f1 = 2 * micro_precision * micro_recall / (micro_precision + micro_recall) if (micro_precision + micro_recall) > 0 else 0
    exact_match_accuracy = exact_match_count / len(predictions) if predictions else 0

    # ---------------- 新增：相似度指标 ----------------
    # 1. 过滤后标签的平均相似度（衡量保留标签的整体相关性）
    all_filtered_sims = [s for sims in filtered_sims_list for s in sims]  # 展平为一维列表
    avg_filtered_sim = sum(all_filtered_sims) / len(all_filtered_sims) if all_filtered_sims else 0.0

    # 2. 所有原始标签的平均相似度（衡量生成标签的整体相关性，包括被过滤的）
    all_original_sims = [s for sims in all_sims_list for s in sims]
    avg_original_sim = sum(all_original_sims) / len(all_original_sims) if all_original_sims else 0.0

    # 3. 匹配标签的平均相似度（生成标签中与真实标签匹配的部分的平均相似度）
    match_sims = []
    for pred_str, ref_str, all_sims in zip(predictions, references, all_sims_list):
        pred_tags = [t.strip() for t in pred_str.split(',') if t.strip()]
        ref_set = set(t.strip() for t in ref_str.split(',') if t.strip())
        # 找到生成标签中与真实标签匹配的索引
        for idx, tag in enumerate(pred_tags):
            if tag in ref_set and idx < len(all_sims):  # 确保索引有效
                match_sims.append(all_sims[idx])
    avg_match_sim = sum(match_sims) / len(match_sims) if match_sims else 0.0

    # 返回包含新指标的结果
    return avg_loss, {
        "exact_match_accuracy": exact_match_accuracy,
        "micro_precision": micro_precision,
        "micro_recall": micro_recall,
        "micro_f1": micro_f1,
        # 新增相似度指标
        "avg_filtered_similarity": avg_filtered_sim,       # 过滤后标签的平均相似度
        "avg_original_similarity": avg_original_sim,       # 所有生成标签的平均相似度
        "avg_match_similarity": avg_match_sim              # 匹配真实标签的平均相似度
    }




# --- 主运行部分 ---
if __name__ == "__main__":
    gc.collect()
    torch.cuda.empty_cache()
    with open('Gongzhuang/config_GZ.yaml', 'r') as f:
        config_data = yaml.safe_load(f)

    class Config:
        def __init__(self, **entries):
            # 更新所有来自 YAML 的条目
            self.__dict__.update(entries)
            # 定义默认值，防止config.yaml中缺少某些参数时报错
            # 这些是原代码中出现，但config.yaml中未提供的，或者为了完整性重新声明的
            self.num_workers = entries.get('num_workers', 4) # 默认值
            self.tag_weights_json = entries.get('tag_weights_json', 'augmented_train_weights.json')
            self.gradient_accumulation_steps = entries.get('gradient_accumulation_steps', 4)
            self.warmup_steps = entries.get('warmup_steps', 500) # 默认不预热
            self.early_stopping_patience = entries.get('early_stopping_patience', 3)  # 早停耐心值（默认5轮）
            self.early_stopping_metric = entries.get('early_stopping_metric', 'micro_f1')  


    config = Config(**config_data)
    # 确保 output_dir 存
    os.makedirs(config.output_dir, exist_ok=True)
    # 初始化 Accelerator
    accelerator = Accelerator(gradient_accumulation_steps=config.gradient_accumulation_steps,  mixed_precision="bf16",log_with="tensorboard", project_dir=config.output_dir)

    # 构建模型从模型实例中获取 processor 和 tokenizer
    filter_qwen2_cache_warnings()
    model = build_multimodal_model(config) 
    print("------------")
    processor = model.processor
    tokenizer = model.tokenizer
    # 获取数据加载器
    print("[INFO] Loading datas...")
    # train_loader, val_loader, test_loader = get_dataloaders(config, processor, tokenizer)
    train_loader, val_loader, test_loader, _, _ = get_dataloaders(config, processor, tokenizer)

    # 初始化优化器和学习率调度器
    print("------------")
    # 使用config中的weight_decay
    optimizer = AdamW(model.parameters(), lr=float(config.learning_rate), weight_decay=config.weight_decay)

    num_training_steps = config.max_epoch * (len(train_loader) // config.gradient_accumulation_steps)
    lr_scheduler = get_scheduler(
        name="cosine", 
        optimizer=optimizer,
        num_warmup_steps=config.warmup_steps,
        num_training_steps=num_training_steps
    )

    # 使用 accelerator 准备模型、优化器和数据加载器

    model, optimizer, train_loader, val_loader, lr_scheduler = accelerator.prepare(
        model, optimizer, train_loader, val_loader, lr_scheduler
    )
    #断点续接
    resume_checkpoint_path = os.path.join(config.output_dir, "last_checkpoint")
    start_epoch = 0
    if os.path.exists(resume_checkpoint_path):
        accelerator.print(f"[INFO] Resuming from checkpoint: {resume_checkpoint_path}")
        try:  
            # 检查文件存在性（优先使用model.safetensors）
            safetensors_path = os.path.join(resume_checkpoint_path, "model.safetensors")
            if os.path.exists(safetensors_path):
                model_state_dict = load_safetensors(safetensors_path)
                accelerator.print("[INFO] 从model.safetensors加载模型状态")
            else:
                # 备选：尝试加载pytorch_model.bin（如果存在）
                bin_path = os.path.join(resume_checkpoint_path, "pytorch_model.bin")
                if os.path.exists(bin_path):
                    model_state_dict = torch.load(bin_path, map_location=accelerator.device)
                    accelerator.print("[INFO] 从pytorch_model.bin加载模型状态")
                else:
                    raise FileNotFoundError("在检查点目录中未找到model.safetensors或pytorch_model.bin")
            # 过滤掉量化相关的额外键（BitsAndBytes动态生成的参数）
            quant_related_keys = [k for k in model_state_dict.keys() if "quant" in k or "absmax" in k or "nested" in k]
            if quant_related_keys:
                accelerator.print(f"[WARNING] 过滤掉{len(quant_related_keys)}个量化相关键，例如：{quant_related_keys[:3]}")
                for k in quant_related_keys:
                    model_state_dict.pop(k, None)
            # 过滤掉LoRA未应用时不存在的键（如果LoRA配置有变化）
            current_model_keys = set(model.state_dict().keys())
            unexpected_keys = [k for k in model_state_dict.keys() if k not in current_model_keys]
            if unexpected_keys:
                accelerator.print(f"[WARNING] 过滤掉{len(unexpected_keys)}个模型中不存在的键，例如：{unexpected_keys[:3]}")
                for k in unexpected_keys:
                    model_state_dict.pop(k, None)
            # 加载过滤后的状态字典（strict=False确保兼容）
            model.load_state_dict(model_state_dict, strict=False)
            accelerator.print("[INFO] 模型状态字典加载完成（已过滤量化相关键）")
            # 加载优化器状态
            optimizer_ckpt_path = os.path.join(resume_checkpoint_path, "optimizer.bin")
            if os.path.exists(optimizer_ckpt_path):
                optimizer_state_dict = torch.load(optimizer_ckpt_path, map_location=accelerator.device)
                optimizer.load_state_dict(optimizer_state_dict)
                accelerator.print("[INFO] 优化器状态加载完成")
            # 加载学习率调度器状态
            if lr_scheduler is not None:
                scheduler_ckpt_path = os.path.join(resume_checkpoint_path, "scheduler.bin")
                if os.path.exists(scheduler_ckpt_path):
                    scheduler_state_dict = torch.load(scheduler_ckpt_path, map_location=accelerator.device)
                    lr_scheduler.load_state_dict(scheduler_state_dict)
                    accelerator.print("[INFO] 学习率调度器状态加载完成")
            # 读取epoch信息
            try:
                with open(os.path.join(resume_checkpoint_path, "epoch.txt"), "r") as f:
                    start_epoch = int(f.read().strip()) + 1
                accelerator.print(f"[INFO] 恢复到epoch {start_epoch}")
            except FileNotFoundError:
                accelerator.print("[WARNING] 未找到epoch.txt，从epoch 0开始")
        except Exception as e:
            accelerator.print(f"[ERROR] 加载检查点失败：{str(e)}")
            accelerator.print("[INFO] 从初始状态开始训练")
    else:
        accelerator.print("[INFO] 未找到检查点，从初始状态开始训练")

    accelerator.init_trackers("multimodal_tagging")
    # 开始训练
    best_val_loss = float('inf')
    best_micro_f1 = -1.0
    best_avg_match_similarity = 0
    early_stop_counter = 0  # 早停计数器（连续未提升的轮数）
    # current_interval = 20
    print("---------------------------------")
    for epoch in range(start_epoch,config.max_epoch): 
    
        train_loss = train_one_epoch(model, train_loader, optimizer, lr_scheduler, accelerator,temperature=config.temperature)

        avg_total_loss = train_loss['avg_total_loss']
        avg_gen_loss = train_loss['avg_gen_loss']
        avg_contrastive_loss = train_loss['avg_contrastive_loss']
        accelerator.print(f"Epoch {epoch+1}/{config.max_epoch} - avg_total_loss: {avg_total_loss:.4f} - avg_gen_loss: {avg_gen_loss:.4f} - avg_contrastive_loss: {avg_contrastive_loss:.4f}")
        print("---------------------------------")
        val_loss, metrics = evaluate(model, val_loader, accelerator)
        current_micro_f1 = metrics['micro_f1']
        current_avg_original_similarity = metrics['avg_original_similarity']
        current_avg_match_similarity = metrics['avg_match_similarity']
        accelerator.print(f"Epoch {epoch+1}/{config.max_epoch} - Val Loss: {val_loss:.4f} - Micro F1: {current_micro_f1:.4f} - avg_original_similarity: {current_avg_original_similarity:.4f} - avg_match_similarity: {current_avg_match_similarity:.4f}")



        # --- 早停逻辑核心 ---
        # 根据监控指标判断是否提升
        is_improved = False
        if config.early_stopping_metric == 'micro_f1':
            # 监控F1分数（越大越好）
            if current_micro_f1 > best_micro_f1:
                is_improved = True
        else:
            # 监控验证损失（越小越好）
            if val_loss < best_val_loss:
                is_improved = True
        # 更新计数器
        if is_improved:
            early_stop_counter = 0  # 有提升，重置计数器
        else:
            early_stop_counter += 1  # 无提升，计数器+1
            accelerator.print(f"[INFO] 早停计数器: {early_stop_counter}/{config.early_stopping_patience}")
            # 达到耐心值，终止训练
            if early_stop_counter >= config.early_stopping_patience:
                accelerator.print(f"[INFO] 早停触发：连续{config.early_stopping_patience}轮未提升，终止训练")
                break  # 跳出训练循环


        if current_micro_f1 > best_micro_f1:
            best_micro_f1 = current_micro_f1
            accelerator.save_state(os.path.join(config.output_dir, "best_model_by_f1"))
            accelerator.print(f"Saving best model (by F1) with Micro F1: {best_micro_f1:.4f}")
            #保存 LLM LoRA adapter
            model.llm.save_pretrained(os.path.join(config.output_dir, "llm_lorabyF1/llm_lora_adapter"))
            #保存 CLIP LoRA adapter（如果你对其进行了 LoRA 微调）
            model.clip.save_pretrained(os.path.join(config.output_dir, "clip_lorabyF1/clip_lora_adapter"))
            linear_layer_path = os.path.join(config.output_dir, "clip2llm_proj_byF1.pt")
            torch.save(model.clip2llm_proj.state_dict(), linear_layer_path)

            # linear_layer_path = os.path.join(config.output_dir, "image_contrast_proj_bysim.pt")
            # torch.save(model.image_contrast_proj.state_dict(), linear_layer_path)
            # linear_layer_path = os.path.join(config.output_dir, "text_contrast_proj_bysim.pt")
            # torch.save(model.text_contrast_proj.state_dict(), linear_layer_path)



        if current_avg_match_similarity > best_avg_match_similarity:
            best_avg_match_similarity = current_avg_match_similarity
            accelerator.save_state(os.path.join(config.output_dir, "best_model_by_sim"))
            accelerator.print(f"Saving best model (by sim) with sim: {best_avg_match_similarity:.4f}")
            #保存 LLM LoRA adapter
            model.llm.save_pretrained(os.path.join(config.output_dir, "llm_lorabysim/llm_lora_adapter"))
            #保存 CLIP LoRA adapter（如果你对其进行了 LoRA 微调）
            model.clip.save_pretrained(os.path.join(config.output_dir, "clip_lorabysim/clip_lora_adapter"))
            linear_layer_path = os.path.join(config.output_dir, "clip2llm_proj_bysim.pt")
            torch.save(model.clip2llm_proj.state_dict(), linear_layer_path)




        accelerator.save_state(os.path.join(config.output_dir, "last_checkpoint"))
        with open(os.path.join(config.output_dir, "last_checkpoint", "epoch.txt"), "w") as f:
            f.write(str(epoch))
        accelerator.save_state(os.path.join(config.output_dir, "best_model_by_last"))
        #保存 LLM LoRA adapter
        model.llm.save_pretrained(os.path.join(config.output_dir, "llm_lorabylast/llm_lora_adapter"))
        #保存 CLIP LoRA adapter（如果你对其进行了 LoRA 微调）
        model.clip.save_pretrained(os.path.join(config.output_dir, "clip_lorabylast/clip_lora_adapter"))
        linear_layer_path = os.path.join(config.output_dir, "clip2llm_proj_bylast.pt")
        torch.save(model.clip2llm_proj.state_dict(), linear_layer_path)
        accelerator.end_training()
    # 训练结束后进行测试
    print("\n[INFO] Starting final test...")

    print("\n[INFO] ------------END------------")
