"""
train_aligner.py

本脚本用于训练视觉→语言映射头，采用疾病+分期判断的问题构造方式，
使得模型能通过眼底图像对疾病类型和分期进行准确映射。
同时支持 Ascend (华为昇腾芯片) 与 GPU 环境，并增加调试回调保存模型生成结果。

注意：请参考之前讨论的内容，本代码中构造数据时采用了如下思路：
    – 对于异常眼底（疾病不为 normal），采用三种情况：
        1. 完全正确：问题与答案均为真实诊断（回答“是”）
        2. 疾病正确但分期错误：问题中给出错误的分期，答案说明“疾病类型正确，但更接近 XXX”
        3. 疾病错误：问题中给出其他疾病，答案为“否”
    – 对于正常眼底，仅随机选两种情况：
        A. 询问是否为“正常眼底”，答案“是”
        B. 询问是否为某种异常疾病，答案“否”

使用方法：
    python train_aligner.py --data_path /path/to/data_dir --pretrained_model ./pretrained/eyekowner --output_dir ./experiments/output

作者：zym1105
日期：2025-4-16
"""

import sklearn  # 避免共享库冲突，需最先导入
import argparse
import logging
import os
import random
import json
from functools import partial
from typing import Dict, List, Tuple
from collections import defaultdict
import numpy as np
import torch
from torch.optim import AdamW
from torch.utils.data import Dataset, random_split
from transformers import Trainer, TrainingArguments, TrainerCallback, get_linear_schedule_with_warmup, get_cosine_schedule_with_warmup, EvalPrediction
from transformers.trainer_utils import EvalLoopOutput
from PIL import Image
from collections import defaultdict
from utils.aligner_training.promt_maker import PromptConstructor

# 导入我们预训练模型和processor
from janus.models import MultiModalityCausalLM, VLChatProcessor, model_name_to_cls
from custom_janus import EyeKnowner
# 全局日志配置
# 创建日志文件夹
log_dir = './experiments/train_aligner'
os.makedirs(log_dir, exist_ok=True)
log_file_path = os.path.join(log_dir, 'report_log.txt')

# 清除已有 handler（防止多次初始化导致重复输出）
root_logger = logging.getLogger()
if root_logger.hasHandlers():
    root_logger.handlers.clear()

# 控制台 handler（只显示 INFO 及以上）
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.INFO)
console_handler.setFormatter(logging.Formatter('%(message)s'))

# 文件 handler（记录 DEBUG 及以上）
file_handler = logging.FileHandler(log_file_path, mode='w', encoding='utf-8')
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(logging.Formatter(
    '%(asctime)s - %(levelname)s - %(message)s'))

# 配置 root logger
logging.basicConfig(level=logging.DEBUG, handlers=[
                    console_handler, file_handler])

# 获取当前模块 logger
logger = logging.getLogger(__name__)

###########################################
# 参数解析与随机种子设置
###########################################


def parse_args():
    parser = argparse.ArgumentParser(
        description="Train Vision-Language aligner for Disease & Stage Classification"
    )
    parser.add_argument("--data_path", type=str,
                        default="/mnt/c/DocumentWorkSpace/public_processes/APTOS/",
                        help="包含images和annotations.json的路径")
    parser.add_argument("--pretrained_model", type=str,
                        default="./pretrained/eyekowner",
                        help="预训练模型目录")
    parser.add_argument("--output_dir", type=str,
                        default="./experiments/output",
                        help="训练输出模型保存路径")
    parser.add_argument("--batch_size", type=int, default=1)
    parser.add_argument("--epochs", type=int, default=5)
    parser.add_argument("--lr", type=float, default=1e-3)
    parser.add_argument("--seed", type=int, default=42)
    parser.add_argument(
        "--server", "-s", action='store_true', help='在Ascend服务器端运行')
    parser.add_argument("--debug_dir", type=str, default="./experiments/train_aligner",
                        help="调试输出目录，用于保存每1k步的生成结果")
    args = parser.parse_args()
    return args


def set_seed(seed):
    """设置随机种子，保证结果可复现"""
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    if torch.cuda.is_available():
        torch.cuda.manual_seed_all(seed)

###########################################
# Ascend NPU设备信息打印
###########################################


def print_device_info():
    try:
        import torch_npu
    except ImportError:
        torch_npu = None

    if torch_npu and torch_npu.npu.is_available():
        num_npus = torch_npu.npu.device_count()
        print(f"Ascend NPU 可用, 数量: {num_npus}")
        for i in range(num_npus):
            print(f"NPU {i}: {torch_npu.npu.device(i)}")
        if num_npus > 1:
            print("使用多个 NPU 进行训练。")
        else:
            print("仅使用单个 NPU。")
    else:
        print("Ascend NPU 不可用，使用 CPU 或 GPU。")

###########################################
# 数据集构建：读取 annotations.json 并构造问题和答案
###########################################
class DiseaseMappingDataset(Dataset):
    """
    自定义数据集，用于视觉映射头训练。
    从 annotations.json 中读取标注，每条记录包含：
        - image_path：图像文件路径（需要拼接完整路径）
        - diagnosis：疾病诊断（英文标签，例如 "normal", "mild diabetic retinopathy"）
    根据诊断信息构造问题和答案：
        - 对于正常眼底：随机选择询问“是否为正常眼底”或错误疾病（答案为“否”）
        - 对于异常眼底：随机选用以下选项之一
            1. 疾病类型和分期均正确，答案“是”
            2. 疾病类型正确但分期错误，答案为“疾病类型正确，但更接近XX”
            3. 疾病类型错误，答案“否”
    """

    def __init__(self, data_path,
                 disease_mapping_file='./configs/public_dataset_aligner.json',
                 disease_stages_mapping_file='./configs/diease_stage.json',
                 log_dir='./experiments/train_aligner',
                 bos="<｜begin▁of▁sentence｜>",
                 eos="<｜end▁of▁sentence｜>"):
        """
        初始化数据集
        :param data_path: 数据路径
        :param disease_mapping_file: 疾病映射文件路径
        :param disease_stages_mapping_file: 疾病分期映射文件路径
        :param log_dir: 日志文件路径
        """
        self.data = []
        self.bos_token = bos
        self.eos_token = eos
        self.data_path = data_path
        self.disease_mapping_file = disease_mapping_file
        self.disease_stages_mapping_file = disease_stages_mapping_file
        self.diagnosis_check_stats = {
            "missing_field": 0,
            "empty_text": 0,
            "un_processed": defaultdict(int),
            "unmapped": defaultdict(int)
        }

        # 加载疾病映射和分期映射
        raw_map = self._load_json(self.disease_mapping_file)
        self.disease_map: Dict[str, List[str]] = raw_map

        raw_stage = self._load_json(self.disease_stages_mapping_file)
        # 中文→统一分期 key
        self.stage_map: Dict[str, str] = raw_stage["diease_map"]
        self.stage_group: Dict[str, List[str]
                               ] = raw_stage["diease_stage"]  # 分期 key→中文列表

        # 设置日志目录，确保目录存在
        if (not torch.distributed.is_initialized()) or torch.distributed.get_rank() == 0:  # 只在主进程
            os.makedirs(os.path.dirname(log_dir), exist_ok=True)
            self.report_log_path = os.path.join(log_dir, 'report_log.txt')
            self.data_sample_dir = os.path.join(log_dir, 'data_sample')
            os.makedirs(self.data_sample_dir, exist_ok=True)  # 确保保存样本的目录存在

        # 加载数据（支持单目录或多子目录结构）
        self._load_data()

        # 执行检查与预处理
        self._check_and_prepare()
        logger.info(f"共加载有效记录 {len(self.data)} 条。")
        self.all_cn_diseases = [
            cn for cn_list in self.disease_map.values() for cn in cn_list
        ]
        # 初始化 prompt 构造器
        self.prompt_constructor = PromptConstructor(
            all_cn_diseases=self.all_cn_diseases,
            stage_map=self.stage_map,
            stage_group=self.stage_group
        )
        logger.info(f"共加载有效记录种类 {len(self.all_cn_diseases)} 种。")
        # 随机挑选3个样本并生成数据样本
        if (not torch.distributed.is_initialized()) or torch.distributed.get_rank() == 0:  # 只在主进程生成数据样本
            self._save_data_samples()

    def _load_json(self, file_path):
        """
        加载 JSON 文件
        :param file_path: JSON 文件路径
        :return: 文件内容（字典形式）
        """
        with open(file_path, 'r', encoding="utf-8") as f:
            return json.load(f)

    def _load_data(self):
        """
        加载数据并将每条记录的 image_path 进行补充
        """
        if os.path.isdir(os.path.join(self.data_path, "images")):
            annotations_file = os.path.join(self.data_path, "annotations.json")
            with open(annotations_file, "r", encoding="utf-8") as f:
                self.annotations = json.load(f)
            for key, val in self.annotations.items():
                img_path = val['image_path']
                self.annotations[key]["image_path"] = os.path.join(
                    self.data_path, img_path)
        else:
            all_annotations = {}
            for subdir in os.listdir(self.data_path):
                subdir_path = os.path.join(self.data_path, subdir)
                if os.path.isdir(subdir_path):
                    ann_file = os.path.join(subdir_path, "annotations.json")
                    if os.path.exists(ann_file):
                        with open(ann_file, "r", encoding="utf-8") as f:
                            sub_annotations = json.load(f)
                        for key, val in sub_annotations.items():
                            val["image_path"] = os.path.join(
                                subdir_path, val["image_path"])
                        all_annotations.update(sub_annotations)
            self.annotations = all_annotations

    def _check_and_prepare(self):
        """
        检查每条记录的 diagnosis 字段是否有效，并将英文诊断映射为中文标签列表。
        同时处理英文逗号分隔和“and”连接的多诊断情况，
        对于所有映射结果都为空的记录直接跳过不加入数据集。

        统计信息保存在 self.diagnosis_check_stats 中：
          - missing_field：缺失 diagnosis.text 字段的条数
          - empty_text：diagnosis.text 为空或只包含空白的条数
          - unmapped：无法映射的英文标签及其出现次数

        最终，符合条件的 record 会被添加到 self.data 中，并在
        record["diagnosis"]["text_list"] 中存储一个中文诊断列表。
        """
        for key, record in self.annotations.items():
            # 1. 取出原始英文诊断文本
            diagnosis = record.get("diagnosis", {}).get("text", None)

            # 2. 缺失字段或为空时跳过并统计
            if diagnosis is None:
                self.diagnosis_check_stats["missing_field"] += 1
                continue
            if not diagnosis.strip():
                self.diagnosis_check_stats["empty_text"] += 1
                continue

            # 3. 拆分英文逗号分隔的多诊断，统一去除首尾空格并转小写
            diseases = [d.strip().lower()
                        for d in diagnosis.split(",") if d.strip()]
            if not diseases:
                self.diagnosis_check_stats["empty_text"] += 1
                continue

            # 4. 进一步拆分“and”连接的复合诊断
            all_diseases = []
            for d in diseases:
                if " and " in d:
                    # 如果包含 "and"，按 and 分割
                    parts = [part.strip()
                             for part in d.split("and") if part.strip()]
                    all_diseases.extend(parts)
                else:
                    all_diseases.append(d)

            # 5. 将每个英文标签映射为中文列表，可能含多个中文术语
            mapped_diseases: List[str] = []
            for eng in all_diseases:
                cn_list = self.disease_map.get(eng, None)
                if cn_list is None:
                    # 无法映射时统计
                    self.diagnosis_check_stats["unmapped"][eng] += 1
                elif len(cn_list) == 0:
                    self.diagnosis_check_stats['un_processed'][eng] += 1
                else:
                    # 将所有中文术语加入最终列表
                    mapped_diseases.extend(cn_list)

            # 6. 如果映射结果完全为空，则跳过该条记录
            if not mapped_diseases:
                continue

            # 7. 可选：去重，避免同一中文标签出现多次
            unique_mapped = list(dict.fromkeys(mapped_diseases))

            # 8. 将最终的中文诊断列表写入 record，供 __getitem__ 使用
            record["diagnosis"]["text_list"] = unique_mapped
            self.data.append(record)

        if (not torch.distributed.is_initialized()) or torch.distributed.get_rank() == 0:  # 只在主进程log
            # 9. 日志输出统计信息
            logger.warning(
                f"[检查报告] 缺失 diagnosis.text 字段：{self.diagnosis_check_stats['missing_field']} 条")
            logger.warning(
                f"[检查报告] diagnosis.text 为空或无有效疾病：{self.diagnosis_check_stats['empty_text']} 条")
            if self.diagnosis_check_stats["unmapped"]:
                unmapped_sorted = sorted(
                    self.diagnosis_check_stats["unmapped"].items(),
                    key=lambda x: x[1],
                    reverse=True
                )
                logger.warning(
                    f"[检查报告] 无法映射的英文标签共 {len(unmapped_sorted)} 种（按出现频次排序）：")
                for eng, cnt in unmapped_sorted:
                    logger.warning(f"  - '{eng}'：{cnt} 次")
            if self.diagnosis_check_stats["un_processed"]:  # 以下是只打印到文件的部分
                unprocessed_sorted = sorted(
                    self.diagnosis_check_stats["un_processed"].items(),
                    key=lambda x: x[1],
                    reverse=True
                )
                logger.debug(
                    f"[检查报告] 故意留空处理的共 {len(unprocessed_sorted)} 种（按出现频次排序）：")
                for eng, cnt in unprocessed_sorted:
                    logger.debug(f"  - '{eng}'：{cnt} 次")
            logger.info(f"一共加载数据{len(self.data)}条")

    def _save_data_samples(self):
        """
        随机选取3个数据样本并保存为图像及问题-答案对。
        """
        sample_indices = random.sample(range(len(self.data)), 3)
        for idx in sample_indices:
            sample = self.data[idx]
            img_path = sample.get("image_path", "")
            diagnosis = random.choice(sample.get(
                "diagnosis", {}).get("text_list", ["normal"]))

            # 生成问题和答案
            question, answer = self.prompt_constructor.construct_prompt(diagnosis)

            # 加载图像
            image = Image.open(img_path).convert("RGB")

            # 保存图像和问题答案
            img_filename = os.path.join(
                self.data_sample_dir, f"sample_{idx}.png")
            image.save(img_filename)

            # 将问题和答案保存在文件中
            qa_filename = os.path.join(
                self.data_sample_dir, f"sample_{idx}_qa.txt")
            with open(qa_filename, 'w', encoding="utf-8") as f:
                f.write(f"Question: {question}\n")
                f.write(f"Answer: {answer}\n")

    def __len__(self):
        return len(self.data)


    def __getitem__(self, idx):
        record = self.data[idx]
        img_path = record.get("image_path", "")
        text_list = record.get("diagnosis", {}).get("text_list", ["normal"])

        # 从多疾病中随机选一个作为主诊断
        diagnosis = random.choice(text_list)

        image = Image.open(img_path).convert("RGB")
        question, answer = self.prompt_constructor.construct_prompt(diagnosis)

        return {"image": image, "question": question, "answer": answer}

###########################################
# 自定义 Trainer：计算损失（交叉熵）及调试回调
###########################################


class CustomTrainer(Trainer):
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.acc_log_path = os.path.join(self.args.output_dir, "train_acc.log")
        # 可选：初始化时清空旧日志
        with open(self.acc_log_path, "w") as f:
            f.write("step,accuracy\n")

    def compute_loss(self, model, inputs, return_outputs=False, **kwargs):
        # 分布式同步
        if torch.distributed.is_initialized():
            torch.distributed.barrier()

        # 拿出 labels 并移除非模型输入项
        labels = inputs.pop("labels")
        inputs.pop('sft_format', None)

        # 前向
        outputs = model(**inputs)
        logits = outputs.get("logits")                   # (B, L, V)

        # 1) 逐 token 计算 loss，忽略 -100
        loss_fct = torch.nn.CrossEntropyLoss(
            reduction="none", ignore_index=-100)
        loss_raw = loss_fct(
            logits.view(-1, logits.size(-1)),             # (B*L, V)
            labels.view(-1)                                # (B*L,)
        )

        # mask 向量，哪些位置是真实 label
        valid_mask = labels.view(-1) != -100              # (B*L,)

        # 平均 loss 
        loss = loss_raw[valid_mask].mean()
        if self.state.global_step % 50 == 0 :
            idx = 0  # 只取第一个样本
            with torch.no_grad():
                pred_ids = torch.argmax(logits, dim=-1)  # (B, L)

                label_seq = labels[idx]
                pred_seq = pred_ids[idx]
                mask = label_seq != -100

                # === 有效位置的预测和标签（即用于loss的部分）===
                label_ids_valid = label_seq[mask].tolist()
                pred_ids_valid = pred_seq[mask].tolist()
                label_text = self.tokenizer.decode(label_ids_valid, skip_special_tokens=False)
                pred_text = self.tokenizer.decode(pred_ids_valid, skip_special_tokens=False)

                # === 全量预测和label（包括mask的部分）===
                full_pred_ids = pred_seq.tolist()
                full_label_ids = label_seq.tolist()
                full_pred_text = self.tokenizer.decode(full_pred_ids, skip_special_tokens=False)
                # full_label_text = self.tokenizer.decode(full_label_ids, skip_special_tokens=False)

                # === 保存到文件 ===
                out_dir = "./experiments/debug/train_aligner"
                os.makedirs(out_dir, exist_ok=True)
                out_path = os.path.join(out_dir, f"{self.state.global_step}.txt")

                with open(out_path, "w", encoding="utf-8") as f:
                    f.write(f"[Step]: {self.state.global_step}\n")
                    f.write(f"[Logits shape]: {logits.shape}\n")
                    f.write(f"[Pred_ids shape]: {pred_ids.shape}\n\n")

                    f.write("[✅ Decoded prediction (only valid label positions)]:\n")
                    f.write(pred_text + "\n\n")
                    f.write("[🎯 Ground-truth label (only valid positions)]:\n")
                    f.write(label_text + "\n\n")

                    f.write("[🧾 FULL prediction (all positions)]:\n")
                    f.write(full_pred_text + "\n\n")
                    # f.write("[📚 FULL label (all positions)]:\n")
                    # f.write(full_label_text + "\n")
        return (loss, logits) if return_outputs else loss
    def prediction_step(self, model, inputs, prediction_loss_only, ignore_keys=None):
        inputs = self._prepare_inputs(inputs)
        labels = inputs.get("labels")

        with torch.no_grad():
            loss, outputs = self.compute_loss(
                model, inputs, return_outputs=True)
            logits = outputs if isinstance(
                outputs, torch.Tensor) else outputs.get("logits")

        # 移动到 CPU
        logits_cpu = logits.detach().cpu()
        labels_cpu = labels.detach().cpu()

        if prediction_loss_only:
            return (loss.detach(), None, None)

        # 提取有效位置（非 -100）
        valid_mask = labels_cpu != -100
        pred_ids = logits_cpu.argmax(dim=-1)

        # 对每个 batch 保留有效位置的 prediction 和 label（1D）
        pred_flat = pred_ids[valid_mask]
        label_flat = labels_cpu[valid_mask]

        return (loss.detach(), pred_flat, label_flat), pred_ids, labels_cpu

    def evaluation_loop(self, dataloader, description, prediction_loss_only=None, ignore_keys=None, metric_key_prefix="eval"):
        losses = []
        all_preds = []
        all_labels = []

        debug_preds = []
        debug_labels = []

        for step, inputs in enumerate(dataloader):
            (loss, pred_flat, label_flat), pred_ids_full, labels_full = self.prediction_step(
                self.model, inputs, False, ignore_keys=ignore_keys
            )
            losses.append(loss.item())
            all_preds.append(pred_flat)
            all_labels.append(label_flat)

            if len(debug_preds) < 10:
                debug_preds.append(pred_ids_full)
                debug_labels.append(labels_full)

        loss_avg = np.mean(losses)
        metrics = {f"{metric_key_prefix}_loss": loss_avg}

        # 拼接全量有效预测和label，用于真实metrics
        all_preds_flat = torch.cat(all_preds).numpy()
        all_labels_flat = torch.cat(all_labels).numpy()

        metrics.update(self.compute_metrics(
            EvalPrediction(predictions=all_preds_flat,
                           label_ids=all_labels_flat),
            loss=loss_avg
        ))

        # 打印前几个样本
        if debug_preds:
            debug_preds = torch.cat(debug_preds, dim=0).numpy()
            debug_labels = torch.cat(debug_labels, dim=0).numpy()
            self._debug_eval_samples(
                debug_preds, debug_labels, prefix=metric_key_prefix)

        return EvalLoopOutput(
            predictions=None,
            label_ids=None,
            metrics=metrics,
            num_samples=len(dataloader.dataset)
        )

    def _debug_eval_samples(self, preds, labels, prefix="eval", eval_debug_dir='./experiments/train_aligner/eval'):
        tokenizer = self.tokenizer
        out_dir = os.path.join(eval_debug_dir, f"{prefix}_samples")
        os.makedirs(out_dir, exist_ok=True)

        num_samples = min(3, preds.shape[0])

        with open(os.path.join(out_dir, f"{prefix}_step{self.state.global_step}.txt"), "w") as f:
            for idx in range(num_samples):
                pred_text = tokenizer.decode(
                    preds[idx][labels[idx] != -100], skip_special_tokens=False)
                label_text = tokenizer.decode(
                    labels[idx][labels[idx] != -100], skip_special_tokens=False)

                f.write("=" * 40 + "\n")
                f.write(f"✅ Sample [{idx}]:\n")
                f.write(f"🎯 Label:\n{label_text}\n\n")
                f.write(f"🤖 Prediction:\n{pred_text}\n\n")

    def save_model(self, output_dir=None, _internal_call=False):
        # 如果外面没传 output_dir，就用 training_args 里配置的
        output_dir = output_dir if output_dir is not None else self.args.output_dir

        # 先正常保存
        super().save_model(output_dir, _internal_call)

        # 然后再清理 checkpoints
        # _rotate_checkpoints 接受 use_mtime 和 output_dir 两个参数
        self._rotate_checkpoints(use_mtime=True, output_dir=output_dir)


def compute_metrics(eval_preds: EvalPrediction, loss=None):
    metrics = {}

    # ✅ 用 loss 计算 perplexity
    if loss is not None:
        metrics["perplexity"] = float(np.exp(loss))

    # ✅ 用前几个样本计算 token-level accuracy
    if eval_preds.predictions is not None and eval_preds.label_ids is not None:
        pred_tensor = torch.from_numpy(eval_preds.predictions).long()
        label_tensor = torch.from_numpy(eval_preds.label_ids).long()
        valid_mask = label_tensor != -100
        correct = (pred_tensor == label_tensor) & valid_mask
        accuracy = correct.sum().item() / valid_mask.sum().item()
        metrics["accuracy"] = accuracy

    return metrics

###########################################
# 自定义调试回调
###########################################
class DebugCallback(TrainerCallback):
    """
    调试回调，每 every_steps 步使用当前模型生成样例，并将问题、
    参考答案、生成的 raw token ids、长度和解码结果保存到 debug_dir 中。
    """

    def __init__(self, processor, dataset, debug_dir, every_steps=1000):
        self.processor = processor
        self.dataset = dataset
        self.debug_dir = debug_dir
        self.every_steps = every_steps
        os.makedirs(debug_dir, exist_ok=True)

    def prepare_inputs(self, processor, model, image, question):
        conversation = [
            {"role": "<|User|>", "content": "<image_placeholder>\n" + question},
            {"role": "<|Assistant|>", "content": ""},
        ]
        proc = processor(
            conversations=conversation,
            images=[image],
            return_tensors="pt",
            force_batchify=True,
            padding_cst_length=False
        ).to(model.device)
        # 注意：根据你的模型 dtype 需求做调整
        proc = dict(proc)
        proc["pixel_values"] = proc["pixel_values"].to(dtype=torch.float16)
        for k in ["images_seq_mask", "images_emb_mask"]:
            if k in proc:
                proc[k] = proc[k].to(dtype=torch.bool)
        return proc

    def on_step_end(self, args, state, control, **kwargs):
        # 仅 rank 0 做输出
        if torch.distributed.is_initialized() and torch.distributed.get_rank() != 0:
            return

        if state.global_step % self.every_steps != 0:
            return

        model = kwargs["model"]
        # 随机抽一个样本
        sample = random.choice(self.dataset)
        inputs = self.prepare_inputs(
            self.processor, model, sample["image"], sample["question"]
        )
        # 生成
        inputs_embeds = kwargs["model"].prepare_inputs_embeds(**inputs)

        with torch.inference_mode(), torch.cuda.amp.autocast():
            outputs = kwargs["model"].language_model.generate(
                inputs_embeds=inputs_embeds,
                attention_mask=inputs["attention_mask"],
                pad_token_id=self.processor.tokenizer.eos_token_id,
                bos_token_id=self.processor.tokenizer.bos_token_id,
                eos_token_id=self.processor.tokenizer.eos_token_id,
                max_new_tokens=512,
                do_sample=False,
                use_cache=True,
            )

        # raw generated token ids
        gen_ids = outputs[0]

        gen_ids = gen_ids.tolist()
        gen_len = len(gen_ids)

        # 解码
        gen_text = self.processor.tokenizer.decode(
            gen_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True
        )

        # 写文件
        debug_file = os.path.join(
            self.debug_dir, f"step_{state.global_step}.txt")
        with open(debug_file, "w", encoding="utf-8") as f:
            f.write("问题:\n" + sample["question"] + "\n\n")
            f.write("参考答案:\n" + sample["answer"] + "\n\n")
            f.write(f"生成长度: {gen_len}\n")
            f.write(f"生成 Token IDs: {gen_ids}\n")
            f.write("模型生成 Raw Text (含特殊 token):\n" + gen_text + "\n")
        logger.info(
            f"[DebugCallback] step {state.global_step} 输出保存至 {debug_file}")


###########################################
# 数据collate函数
###########################################
def collate_fn(batch, processor, debug=False, debug_file="./experiments/debug.txt"):
    prepare_list = []

    # 明确使用这个placeholder字符串（确保与processor一致）
    # image_placeholder_str = "<image_placeholder>"
    image_placeholder_ids = [100580, 185]
    # print(image_placeholder_ids)
    for item in batch:
        image, question, answer = item["image"], item["question"], item["answer"]

        prepare = processor(
            conversations=[
                {"role": "<|User|>", "content": "<image_placeholder>\n" + question},
                {"role": "<|Assistant|>", "content": answer}
            ],
            images=[image],
            force_batchify=False
        )
        prepare_list.append(prepare)

    # batchify
    batched_output = processor.batchify(prepare_list,cst_length=350)
    encoded = dict(batched_output)
    input_ids = encoded["input_ids"]
    labels = input_ids.clone()

    for i in range(input_ids.size(0)):
        ids_list = input_ids[i].tolist()
        img_end_idx = None

        # 滑窗匹配 <image_placeholder>\n 位置
        for j in range(len(ids_list) - len(image_placeholder_ids) + 1):
            if ids_list[j : j + len(image_placeholder_ids)] == image_placeholder_ids:
                img_end_idx = j + len(image_placeholder_ids)  # 取结束位置
                # 不 break，保留最后一次匹配的（多个 <image_placeholder> 情况下取最后的）
        
        if img_end_idx is None:
            raise ValueError(f"第{i}条数据中未找到 '<image_placeholder>\\n' 对应的token序列")

        labels[i, :img_end_idx] = -100  # 仅屏蔽图像 token部分之前

    encoded["labels"] = labels

    # ===Debug === #
    if debug:
        debug_idx = 0  # 明确取第一个样本
        debug_ids = input_ids[debug_idx].tolist()
        debug_labels = labels[debug_idx]
        kept_ids = [debug_ids[k]
                    for k in range(len(debug_ids)) if debug_labels[k] != -100]
        kept_txt = processor.tokenizer.decode(
            kept_ids, skip_special_tokens=False)
        toks = processor.tokenizer.convert_ids_to_tokens(debug_ids)
        full_text = processor.tokenizer.decode(
            debug_ids, skip_special_tokens=False)

        os.makedirs(os.path.dirname(debug_file), exist_ok=True)
        with open(debug_file, "w", encoding="utf-8") as f:
            f.write("=== FULL DECODED INPUT (含特殊 token) ===\n")
            f.write(full_text + "\n\n")
            f.write("=== INPUT IDs ===\n")
            f.write(f"{debug_ids}\n\n")
            f.write("=== TOKENS ===\n")
            f.write(f"{toks}\n\n")
            f.write("=== Debug collate_fn ===\n")
            f.write(f"Question: {batch[debug_idx]['question']}\n")
            f.write(f"Answer (gt): {batch[debug_idx]['answer']}\n")
            f.write(f"<image_placeholder> 结束位置 idx: {img_end_idx}\n")
            f.write(f"Kept IDs (unmasked): {kept_ids}\n")
            f.write(f'Kept decoded (含特殊 token): "{kept_txt}"\n')
        raise RuntimeError("Debug break after first batch")
    return encoded

###########################################
# 主流程：加载数据、模型、冻结参数、训练并保存模型
###########################################


def prepare_model_for_training(
    pretrained_model_path,
    state_dict_path="pretrained/Vilref/ViLReF_ViT.pt",
    state_dict_key="visual",
    vit_name="vit_b",
    aligner_input_dim=768,
    language_layers_to_unfreeze=2,
    aligner_lr=1e-5,
    language_lr=1e-6,
    dtype=torch.float16,
    logger=None
):
    # 加载 processor 和模型
    processor = VLChatProcessor.from_pretrained(pretrained_model_path)

    model = EyeKnowner.from_pretrained(
        pretrained_model_path, torch_dtype=dtype
    )

    # 更新 vision config 并替换 vision model
    model.config.vision_config = {
        "cls": "VilrefEncoder",
        "model_type": "vision",
        "params": {
            "state_dict_path": state_dict_path,
            "state_dict_key": state_dict_key,
            "vit_name": vit_name
        }
    }

    vision_cls = model_name_to_cls(model.config.vision_config["cls"])
    model.vision_model = vision_cls(**model.config.vision_config["params"])

    # 替换 aligner（随机初始化）
    aligner_config = model.config.aligner_config
    aligner_config.params["input_dim"] = aligner_input_dim
    aligner_cls = model_name_to_cls(aligner_config.cls)
    model.aligner = aligner_cls(aligner_config.params)

    # 冻结和放开参数
    aligner_params = []
    language_params = []
    total_params = 0
    trainable_params = 0
    aligner_trainable = 0
    language_trainable = 0

    for name, param in model.named_parameters():
        total_params += param.numel()

        if "aligner." in name:
            param.requires_grad = True
            aligner_params.append(param)
            trainable_params += param.numel()
            aligner_trainable += param.numel()

        elif "language_model.model.layers." in name:
            layer_num = int(name.split(".")[3])
            if layer_num < language_layers_to_unfreeze:
                param.requires_grad = True
                language_params.append(param)
                trainable_params += param.numel()
                language_trainable += param.numel()
            else:
                param.requires_grad = False
        else:
            param.requires_grad = False

    if logger:
        def to_million(x):
            return round(x / 1e6, 2)

        logger.info("冻结参数完成：")
        logger.info(f"  👁️  总参数: {to_million(total_params)}M")
        logger.info(f"  🔧 可训练参数总计: {to_million(trainable_params)}M")
        logger.info(f"    - aligner部分: {to_million(aligner_trainable)}M")
        logger.info(
            f"    - 语言模型前{language_layers_to_unfreeze}层: {to_million(language_trainable)}M")

    # 分组优化器设置不同学习率
    optimizer_grouped_parameters = [
        {'params': aligner_params, 'lr': aligner_lr},
        {'params': language_params, 'lr': language_lr},
    ]

    optimizer = AdamW(optimizer_grouped_parameters)

    return model, processor, optimizer
###########################################
# 主流程：加载数据、模型、冻结参数、训练并保存模型
###########################################


def main():
    args = parse_args()
    set_seed(args.seed)

    # 如果在 Ascend 服务器上运行，则打印设备信息
    if args.server:
        try:
            import torch_npu
            from torch_npu.contrib import transfer_to_npu
        except:
            raise ValueError("当前服务器不支持华为昇腾环境或者没有完整配置")
        print_device_info()

    # 构造数据集
    dataset = DiseaseMappingDataset(args.data_path)

    total_size = len(dataset)
    eval_size = int(0.02 * total_size)
    train_size = total_size - eval_size
    train_dataset, eval_dataset = random_split(
        dataset, [train_size, eval_size], generator=torch.Generator().manual_seed(args.seed))

    logger.info(
        f"数据集加载完成！训练数据数数量:{len(train_dataset)}, 评估数据数量:{len(eval_dataset)}")

    model, processor, optimizer = prepare_model_for_training(
        pretrained_model_path=args.pretrained_model,
        state_dict_path="pretrained/Vilref/ViLReF_ViT.pt",
        aligner_input_dim=768,
        language_layers_to_unfreeze=4,
        aligner_lr=1e-4,
        language_lr=1e-6,
        dtype=torch.float16,
        logger=logger
    )

    # ===== Scheduler 设置 =====

    # 计算总步数
    gradient_accumulation_steps = 32
    num_training_steps = args.epochs * \
        len(dataset) // (args.batch_size * gradient_accumulation_steps)
    num_warmup_steps = int(0.05 * num_training_steps)

    scheduler = get_cosine_schedule_with_warmup(
        optimizer,
        num_warmup_steps=num_warmup_steps,
        num_training_steps=num_training_steps,
        num_cycles=0.5,  # 只退一次，不是来回震荡
    )

    # 设置训练参数
    training_args = TrainingArguments(
        output_dir=args.output_dir,
        num_train_epochs=args.epochs,
        per_device_train_batch_size=args.batch_size,
        gradient_accumulation_steps=gradient_accumulation_steps,
        learning_rate=args.lr,
        max_grad_norm=0.5,
        evaluation_strategy="steps",  # 每N步评估一次
        eval_steps=1000,
        save_strategy="epoch",           # 只在每个epoch结束时保存一次
        save_total_limit=2,               # 最多保存2个checkpoint，自动删旧的
        logging_steps=10,
        remove_unused_columns=False,
        dataloader_num_workers=8,
        report_to="none",
        dataloader_drop_last=True,
        dataloader_pin_memory=False,
        ddp_find_unused_parameters=True,
    )

    # 构造自定义 Trainer
    trainer = CustomTrainer(
        model=model,
        args=training_args,
        train_dataset=train_dataset,
        eval_dataset=eval_dataset,
        compute_metrics=compute_metrics,

        data_collator=partial(collate_fn, processor=processor),
        optimizers=(optimizer, scheduler),
        tokenizer=processor.tokenizer
    )
    # 添加调试回调，每 1000 步保存一次生成结果到 debug_dir
    # trainer.add_callback(DebugCallback(processor, dataset,
    #                      args.debug_dir, every_steps=100))
    # 添加调试回调，评估时打印数据
    # trainer.add_callback(EvalPrintSampleCallback(tokenizer=processor.tokenizer, num_samples=3))
    ############ debug############
    # logger.info("[Sanity Check] 正在进行一次生成测试...")
    # print(model.language_model.config.max_position_embeddings)
    # sample = random.choice(dataset)
    # inputs = DebugCallback.prepare_inputs(
    #     None, processor, model, sample["image"], sample["question"])
    # inputs_embeds = model.prepare_inputs_embeds(**inputs)
    # # squeeze 这一行就行
    # if inputs["pixel_values"].ndim == 5:
    #     inputs["pixel_values"] = inputs["pixel_values"].squeeze(1)

    # with torch.inference_mode(), torch.cuda.amp.autocast():
    #     print(inputs_embeds.shape)
    #     print(inputs["attention_mask"])

    #     outputs = model.language_model.generate(
    #         inputs_embeds=inputs_embeds,
    #         attention_mask=inputs["attention_mask"],
    #         pad_token_id=processor.tokenizer.eos_token_id,
    #         bos_token_id=processor.tokenizer.bos_token_id,
    #         eos_token_id=processor.tokenizer.eos_token_id,
    #         max_new_tokens=512,
    #         do_sample=False,
    #         use_cache=True,

    #         # new debug flags:
    #         return_dict_in_generate=True,
    #         output_scores=True,
    #     )
    # print("实际送入语言模型的inputs_embeds长度:", inputs_embeds.shape[1])
    # print("inputs['input_ids']的长度:", inputs["input_ids"].shape[1])

    # gen_ids = outputs[0].tolist()
    # print(gen_ids)
    # gen_text = processor.tokenizer.decode(gen_ids[0], skip_special_tokens=False)

    # logger.info("[Sanity Check] 问题: %s", sample["question"])
    # logger.info("[Sanity Check] 参考答案: %s", sample["answer"])
    # logger.info("[Sanity Check] 生成 Token IDs: %s", gen_ids)
    # logger.info("[Sanity Check] 生成文本: %s", gen_text)
    # with torch.no_grad():
    #     outputs = model.language_model(
    #         inputs_embeds=inputs_embeds, attention_mask=inputs["attention_mask"])
    # logits = outputs.logits
    # print("Logits shape:", logits.shape)
    # print("Last token logits top-5:", logits[:, -1, :].softmax(dim=-1).topk(5))
    # with torch.no_grad():
    #     vision_outputs = model.vision_model(inputs["pixel_values"])
    #     aligner_outputs = model.aligner(vision_outputs)
    # print("Aligner mean/std:", aligner_outputs.mean(), aligner_outputs.std())
    # raise
    ############ debug############

    logger.info("🚀 开始模型训练 ...")

    with torch.cuda.amp.autocast():
        trainer.train()

    logger.info("✅ 训练结束，正在保存模型 ...")
    trainer.save_model(args.output_dir)
    processor.save_pretrained(args.output_dir)


if __name__ == "__main__":
    main()
