# Copyright 2025 HuggingFace Inc. and the LlamaFactory team.
#
# This code is inspired by the HuggingFace's transformers library.
# https://github.com/huggingface/transformers/blob/v4.40.0/src/transformers/trainer_seq2seq.py
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import json
import os
from types import MethodType
from typing import TYPE_CHECKING, Any, Optional, Union, List, Dict
from collections import defaultdict

import numpy as np
import torch
import torch.distributed as dist
from transformers import Seq2SeqTrainer
from typing_extensions import override

from ...extras import logging
from ...extras.constants import IGNORE_INDEX
from ...extras.packages import is_transformers_version_greater_than
from ..callbacks import SaveProcessorCallback
from ..trainer_utils import create_custom_optimizer, create_custom_scheduler

# ✅ 多模态预处理（与官方示例一致）
from qwen_vl_utils import process_vision_info


if TYPE_CHECKING:
    from torch.utils.data import Dataset
    from transformers import PreTrainedTokenizer, ProcessorMixin
    from transformers.trainer import PredictionOutput

    from ...hparams import FinetuningArguments


logger = logging.get_logger(__name__)


class CustomSeq2SeqTrainer(Seq2SeqTrainer):
    r"""Inherits Seq2SeqTrainer to compute generative metrics such as BLEU and ROUGE.
    同时在 evaluate() 结束后，按需执行一次“分布式多模态 benchmark 推理并保存结果”：
      - 仅当在 __init__ 传入 benchmark_data_dir 时启用；
      - 不改变原 evaluate 的任何返回与日志行为；
      - 使用项目 logger，避免 print 干扰进度条；
      - 若未初始化分布式，按你当前的工程约束，直接报错（不做危险兜底）。
    """

    def __init__(
        self,
        finetuning_args: "FinetuningArguments",
        processor: Optional["ProcessorMixin"],
        gen_kwargs: Optional[dict[str, Any]] = None,
        # === 新增可选参数（不破坏原有调用） ===
        benchmark_data_dir: Optional[str] = None,
        benchmark_save_dir: str = "benchmark_outputs",
        # ======================================
        **kwargs,
    ) -> None:
        if is_transformers_version_greater_than("4.46"):
            kwargs["processing_class"] = kwargs.pop("tokenizer")
        else:
            self.processing_class: PreTrainedTokenizer = kwargs.get("tokenizer")

        super().__init__(**kwargs)

        if processor is not None:
            # avoid wrong loss under gradient accumulation
            # https://github.com/huggingface/transformers/pull/36044#issuecomment-2746657112
            self.model_accepts_loss_kwargs = False

        self.finetuning_args = finetuning_args
        if gen_kwargs is not None:
            # https://github.com/huggingface/transformers/blob/v4.45.0/src/transformers/trainer_seq2seq.py#L287
            self._gen_kwargs = gen_kwargs

        if processor is not None:
            self.add_callback(SaveProcessorCallback(processor))

        if finetuning_args.use_badam:
            from badam import BAdamCallback, clip_grad_norm_old_version  # type: ignore

            self.accelerator.clip_grad_norm_ = MethodType(clip_grad_norm_old_version, self.accelerator)
            self.add_callback(BAdamCallback)

        # =========================
        # 🔧 分布式 Benchmark 配置区
        # =========================
        # self._benchmark_enabled: bool = benchmark_data_dir is not None
        # self._benchmark_data_dir: Optional[str] = benchmark_data_dir
        # self._benchmark_save_dir: str = benchmark_save_dir
        # self._benchmark_processor: Optional["ProcessorMixin"] = processor  # 必须含 tokenizer
        # self._benchmark_all_samples: List[Dict[str, Any]] = []
        # self._benchmark_rank_samples: Optional[List[Dict[str, Any]]] = None
        # self._benchmark_world_size: int = 1
        # self._benchmark_prepared: bool = False

        # if self._benchmark_enabled:
        #     # 严格校验必要组件
        #     if self._benchmark_processor is None:
        #         raise ValueError("❌ Benchmark enabled but `processor` is None.")
        #     if not hasattr(self._benchmark_processor, "tokenizer"):
        #         raise ValueError("❌ Processor for benchmark must contain a tokenizer (e.g. AutoProcessor for Qwen2.5-VL).")
        #     if self._gen_kwargs is None:
        #         raise ValueError("❌ Benchmark enabled but `gen_kwargs` is None.")
        #     # pad/eos 保证存在；若外部未设置，则此处自然报错（遵从你的“自然报错”原则）
        #     _ = self._gen_kwargs.get("pad_token_id")  # 访问以触发 KeyError
        #     _ = self._gen_kwargs.get("eos_token_id")

        #     # 加载全部 benchmark 样本（只做轻处理，不在此时张量化图像）
        #     self._benchmark_all_samples = self._load_all_benchmarks(self._benchmark_data_dir)
        #     logger.info_rank0(f"Benchmark enabled. Loaded {len(self._benchmark_all_samples)} samples.")
        # else:
        #     raise ValueError("❌ Benchmark disabled. Please provide `benchmark_data_dir` to enable it.")
    @override
    def create_optimizer(self) -> "torch.optim.Optimizer":
        if self.optimizer is None:
            self.optimizer = create_custom_optimizer(self.model, self.args, self.finetuning_args)
        return super().create_optimizer()

    @override
    def create_scheduler(
        self, num_training_steps: int, optimizer: Optional["torch.optim.Optimizer"] = None
    ) -> "torch.optim.lr_scheduler.LRScheduler":
        create_custom_scheduler(self.args, num_training_steps, optimizer)
        return super().create_scheduler(num_training_steps, optimizer)

    @override
    def _get_train_sampler(self, *args, **kwargs) -> Optional["torch.utils.data.Sampler"]:
        if self.finetuning_args.disable_shuffling:
            return torch.utils.data.SequentialSampler(self.train_dataset)
        return super()._get_train_sampler(*args, **kwargs)

    @override
    def compute_loss(self, model, inputs, *args, **kwargs):
        return super().compute_loss(model, inputs, *args, **kwargs)

    @override
    def prediction_step(
        self,
        model: "torch.nn.Module",
        inputs: dict[str, Union["torch.Tensor", Any]],
        prediction_loss_only: bool,
        ignore_keys: Optional[list[str]] = None,
        **gen_kwargs,
    ) -> tuple[Optional[float], Optional["torch.Tensor"], Optional["torch.Tensor"]]:
        r"""Remove the prompt part in the generated tokens.

        Subclass and override to inject custom behavior.
        """
        if self.args.predict_with_generate:  # do not pass labels to model when generate
            labels = inputs.pop("labels", None)
        else:
            labels = inputs.get("labels")

        loss, generated_tokens, _ = super().prediction_step(
            model, inputs, prediction_loss_only=prediction_loss_only, ignore_keys=ignore_keys, **gen_kwargs
        )
        if generated_tokens is not None and self.args.predict_with_generate:
            generated_tokens[:, : inputs["input_ids"].size(-1)] = self.processing_class.pad_token_id
            generated_tokens = generated_tokens.contiguous()

        return loss, generated_tokens, labels

    def save_predictions(
        self, dataset: "Dataset", predict_results: "PredictionOutput", skip_special_tokens: bool = True
    ) -> None:
        r"""Save model predictions to `output_dir`.

        A custom behavior that not contained in Seq2SeqTrainer.
        """
        if not self.is_world_process_zero():
            return

        output_prediction_file = os.path.join(self.args.output_dir, "generated_predictions.jsonl")
        logger.info_rank0(f"Saving prediction results to {output_prediction_file}")

        labels = np.where(
            predict_results.label_ids != IGNORE_INDEX, predict_results.label_ids, self.processing_class.pad_token_id
        )
        preds = np.where(
            predict_results.predictions != IGNORE_INDEX,
            predict_results.predictions,
            self.processing_class.pad_token_id,
        )

        for i in range(len(preds)):
            pad_len = np.nonzero(preds[i] != self.processing_class.pad_token_id)[0]
            if len(pad_len):  # move pad token to last
                preds[i] = np.concatenate((preds[i][pad_len[0] :], preds[i][: pad_len[0]]), axis=-1)

        decoded_inputs = self.processing_class.batch_decode(dataset["input_ids"], skip_special_tokens=False)
        decoded_preds = self.processing_class.batch_decode(preds, skip_special_tokens=skip_special_tokens)
        decoded_labels = self.processing_class.batch_decode(labels, skip_special_tokens=skip_special_tokens)

        with open(output_prediction_file, "w", encoding="utf-8") as f:
            for text, pred, label in zip(decoded_inputs, decoded_preds, decoded_labels):
                f.write(json.dumps({"prompt": text, "predict": pred, "label": label}, ensure_ascii=False) + "\n")

    # =====================================================
    # ✅ 新增：分布式多模态 Benchmark（evaluate() 后自动执行）
    # =====================================================

    # @override
    # def evaluate(
    #     self,
    #     eval_dataset: Optional["Dataset"] = None,
    #     ignore_keys: Optional[list[str]] = None,
    #     metric_key_prefix: str = "eval",
    #     **kwargs,  # ✅ 支持 gen_kwargs 等冗余参数
    # ) -> dict:
    #     """
    #     保持原 evaluate 行为不变；在其**完成之后**，若启用了 benchmark，则执行一次分布式多模态推理与保存。
    #     - 不修改返回值；
    #     - 不更改现有日志/指标；
    #     - 使用项目 logger，避免 print 破坏进度条。
    #     """
    #     metrics = super().evaluate(
    #         eval_dataset=eval_dataset,
    #         ignore_keys=ignore_keys,
    #         metric_key_prefix=metric_key_prefix,
    #         **kwargs,  # ✅ 透传 gen_kwargs 等
    #     )

    #     if self._benchmark_enabled:
    #         self._maybe_prepare_benchmark_rank_split()
    #         self._run_distributed_benchmark_and_save(step=self.state.global_step)
    #     else:
    #         raise ValueError("❌ Benchmark disabled. Please provide `benchmark_data_dir` to enable it.") # 这里是一个魔改的调试，因为我们大多数情况下都是要计算分数的。如果不计算分数，我会手动在这里注释掉
    #     return metrics
    # -----------------------------
    # Benchmark: 准备与执行（内部）
    # -----------------------------
    # def _maybe_prepare_benchmark_rank_split(self) -> None:
    #     """按分布式 rank 均分样本；只做一次。"""
    #     if self._benchmark_prepared:
    #         return
    #     if not dist.is_initialized():
    #         # 遵从你的“自然报错”原则：启用 benchmark 必须有分布式
    #         raise RuntimeError("❌ Benchmark enabled but torch.distributed is not initialized.")

    #     rank = dist.get_rank()
    #     self._benchmark_world_size = dist.get_world_size()

    #     # 均匀切分（保留顺序；若需随机可在此处 random.shuffle）
    #     self._benchmark_rank_samples = self._benchmark_all_samples[rank::self._benchmark_world_size]
    #     logger.info_rank0(
    #         f"Prepared benchmark split: world_size={self._benchmark_world_size}, "
    #         f"avg_per_rank≈{len(self._benchmark_all_samples) / max(self._benchmark_world_size,1):.1f}"
    #     )
    #     self._benchmark_prepared = True

    # def _run_distributed_benchmark_and_save(self, step: int) -> None:
    #     """各 rank 并行推理，all_gather 汇总，rank0 分类保存到 benchmark_save_dir/eval_<step>/<benchmark>.json。"""
    #     assert self._benchmark_rank_samples is not None, "Benchmark rank samples not prepared."
    #     assert self._benchmark_processor is not None, "Processor required for benchmark."
    #     model = self.model

    #     # 评测前后切换模式
    #     model_was_training = model.training
    #     model.eval()

    #     # 推理
    #     results: List[Dict[str, Any]] = []
    #     with torch.no_grad():
    #         for idx, ex in enumerate(self._benchmark_rank_samples):
    #             try:
    #                 #  处理多模态输入
    #                 image_inputs, video_inputs = process_vision_info(ex["messages"])
    #                 inputs = self._benchmark_processor(
    #                     text=[ex["text"]],
    #                     images=image_inputs if image_inputs else None,
    #                     videos=video_inputs if video_inputs else None,
    #                     padding=True,
    #                     return_tensors="pt",
    #                 )
    #                 inputs = inputs.to(model.device)
        
    #                 # 3 调用生成
    #                 generated_ids = model.generate(**inputs, **self._gen_kwargs)
        
    #                 #  去除 prompt
    #                 trimmed = [
    #                     out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
    #                 ]
        
    #                 results.append({
    #                     "benchmark":   ex["benchmark"],
    #                     "instruction": ex["instruction"],
    #                     "input":       ex["input"],
    #                     "gt_output":   ex["gt_output"],
    #                     "images":      ex["images"],
    #                     "generated_ids": [t.cpu().tolist() for t in trimmed],
    #                 })
        
    #             except Exception as e:
    #                 # ------------------------
    #                 # 🔥 调试信息 + 原样抛出异常
    #                 # ------------------------
    #                 logger.info_rank0(
    #                     f"\n❌ Exception during benchmark inference:\n"
    #                     f"  • rank = {dist.get_rank()}\n"
    #                     f"  • benchmark = {ex.get('benchmark')}\n"
    #                     f"  • sample_idx = {idx}\n"
    #                     f"  • instruction = {ex.get('instruction', '')[:200]!r}\n"
    #                     f"  • input = {ex.get('input', '')[:200]!r}\n"
    #                     f"  • gt_output = {ex.get('gt_output', '')[:200]!r}\n"
    #                     f"  • images = {ex.get('images', [])}\n"
    #                     f"  • text_len = {len(ex.get('text', ''))}\n"
    #                     f"  • messages_len = {len(str(ex.get('messages', '')))}\n"
    #                     f"  • ErrorType = {type(e).__name__}\n"
    #                     f"  • ErrorMsg = {str(e)}\n",
    #                     exc_info=True,  # ✅ 打印完整 traceback
    #                 )
    #                 raise  ValueError("stop")# ❗保留原堆栈直接抛出
                

    #     # 分布式聚合
    #     world_size = self._benchmark_world_size
    #     gathered: List[Optional[List[Dict[str, Any]]]] = [None for _ in range(world_size)]
    #     dist.all_gather_object(gathered, results)
    #     dist.barrier()

    #     # rank0 写盘
    #     if self.is_world_process_zero():
    #         flat = [item for sub in gathered for item in (sub or [])]
    #         by_bench: Dict[str, List[Dict[str, Any]]] = defaultdict(list)
    #         for item in flat:
    #             by_bench[item["benchmark"]].append(item)

    #         out_dir = os.path.join(self._benchmark_save_dir, f"eval_{step}")
    #         os.makedirs(out_dir, exist_ok=True)
    #         for bench_name, rows in by_bench.items():
    #             path = os.path.join(out_dir, f"{bench_name}.json")
    #             with open(path, "w", encoding="utf-8") as f:
    #                 json.dump(rows, f, ensure_ascii=False, indent=2)
    #             logger.info_rank0(f"Saved {len(rows)} benchmark results → {path}")

    #     # 恢复训练模式
    #     if model_was_training:
    #         model.train()

    # # -----------------------------
    # # Benchmark: 数据加载（内部）
    # # -----------------------------
    # def _load_all_benchmarks(self, base_dir: str) -> List[Dict[str, Any]]:
    #     """
    #     读取 dataset_info.json，根据 columns 映射载入所有 *_test.json；
    #     为每条样本构建 messages & chat text（不 tokenize 图片）：
    #        {
    #          "benchmark": str,
    #          "instruction": str,
    #          "input": str,
    #          "gt_output": str,
    #          "images": list[str],
    #          "messages": list[dict],   # Qwen 官方格式
    #          "text": str               # apply_chat_template(add_generation_prompt=True)
    #        }
    #     说明：
    #       - 按你当前工程约束，这里假设所有数据列已统一为 Alpaca 多模态风格；
    #       - 若文件缺失/键缺失，按你要求“自然报错”，不做危险兜底。
    #     """
    #     info_path = os.path.join(base_dir, "dataset_info.json")
    #     if not os.path.exists(info_path):
    #         raise FileNotFoundError(f"❌ dataset_info.json not found in {base_dir}")

    #     with open(info_path, "r", encoding="utf-8") as f:
    #         dataset_info = json.load(f)

    #     processor = self._benchmark_processor
    #     assert processor is not None
    #     used_dataset=[
    #         # 'med_pub_test_quality_diagnosis',
    #         'med_pub_test_compare',
    #         'med_pub_test_judge',
    #         "med_pub_test_diagnosis",
    #         "med_pub_test_choice"
    #     ]
    #     all_samples: List[Dict[str, Any]] = []
    #     for name in used_dataset:
    #         # if not name.endswith("_test"):
    #         #     continue
    #         meta = dataset_info.get(name)
    #         if meta is None:
    #             raise ValueError(f"❌ Benchmark metadata for '{name}' not found in dataset_info.json")

    #         file_path = os.path.join(base_dir, meta["file_name"])
    #         if not os.path.exists(file_path):
    #             raise FileNotFoundError(f"❌ Benchmark file not found: {file_path}")

    #         with open(file_path, "r", encoding="utf-8") as f:
    #             data = json.load(f)

    #         columns = meta["columns"]
    #         prompt_key = columns["prompt"]     # "instruction"
    #         query_key  = columns["query"]      # "input"
    #         resp_key   = columns["response"]   # "output"
    #         images_key = columns.get("images", "images")

    #         bench_name = name.replace("_test", "")
    #         logger.info_rank0(f"Loaded {len(data)} samples from {file_path} (benchmark={bench_name})")

    #         for ex in data:
    #             instruction = ex.get(prompt_key, "")
    #             in_text     = ex.get(query_key, "")
    #             gt_out      = ex.get(resp_key, "")
    #             images      = ex.get(images_key, []) or []
    #             if isinstance(images, str):
    #                 images = [images]

    #             content = []
    #             for img_path in images:
    #                 content.append({"type": "image", "image": img_path})
    #             user_text = instruction if not in_text else f"{instruction}\n{in_text}"
    #             content.append({"type": "text", "text": user_text})
    #             messages = [{"role": "user", "content": content}]

    #             text = processor.apply_chat_template(
    #                 messages, tokenize=False, add_generation_prompt=True
    #             )
    #             tokenized = processor.tokenizer(text, add_special_tokens=False)
    #             # token_len = len(tokenized["input_ids"])
    #             # if token_len > 1000:  # 你可以设为模型最大 context 2k 或 4k
    #             #     logger.warning(
    #             #         f"⚠️  Benchmark sample too long: {bench_name}, token_len={token_len}, "
    #             #         f"instruction={instruction[:60]!r}, input={in_text[:60]!r}, images={images}"
    #             #     )
    #             all_samples.append({
    #                 "benchmark": bench_name,
    #                 "instruction": instruction,
    #                 "input": in_text,
    #                 "gt_output": gt_out,
    #                 "images": images,
    #                 "messages": messages,
    #                 "text": text,
    #             })

    #     return all_samples
