import os
import json
import torch
import torch.distributed as dist
from collections import defaultdict
from transformers import TrainerCallback
from qwen_vl_utils import process_vision_info  # 与官方示例一致


class DistributedBenchmarkCallback(TrainerCallback):
    """
    Qwen2.5-VL 多模态分布式 benchmark：
      - on_train_start：读取 alpaca 目录，收集所有 *_test.json，解析 columns 映射；
                        为每条样本构建 messages，并用 processor.apply_chat_template 得到 text；
                        合并所有 benchmark 后按 rank 均分。
      - on_evaluate：各 rank 并行推理自己的样本份额；只保存生成的 token ids（去掉 prompt 部分）；
                     rank0 聚合并按 benchmark 分文件保存：save_dir/eval_<step>/<benchmark>.json
    """

    def __init__(
        self,
        processor=None,
        tokenizer=None,
        alpaca_data_dir=None,
        save_dir="benchmark_outputs",
        **kwargs,
    ):
        """
        初始化分布式多模态评测回调。

        Args:
            processor (Optional[AutoProcessor]): Qwen2.5-VL 官方 AutoProcessor，用于多模态输入构建。
            tokenizer (Optional[AutoTokenizer]): 若无 processor，可使用 tokenizer 兜底。
            alpaca_data_dir (str): 包含 dataset_info.json 及 *_test.json 的目录。
            save_dir (str): 推理结果保存路径。
            **kwargs: 冗余参数（兼容解构式调用，如 **tokenizer_module）。
        """
        # === 校验输入 ===
        if processor is None and tokenizer is None:
            raise ValueError(
                "❌ Neither `processor` nor `tokenizer` provided. "
                "Please pass one of them (e.g. `DistributedBenchmarkCallback(processor=AutoProcessor.from_pretrained(...))`)."
            )

        self.processor = processor or tokenizer  # ✅ 两者只要有一个即可
        self.alpaca_data_dir = alpaca_data_dir
        self.save_dir = save_dir

        # 生成参数（测试期稳定、可复现）
        # 注意：Qwen2.5-VL 的 AutoProcessor 内含 tokenizer，可直接访问 pad/eos id。
        self.gen_kwargs = dict(
            max_new_tokens=1024,
            temperature=0.0,
            do_sample=False,
            num_beams=1,
            pad_token_id=self.processor.tokenizer.pad_token_id,
            eos_token_id=self.processor.tokenizer.eos_token_id,
            use_cache=True,
            synced_gpus=True,
        )

        # === 初始化状态 ===
        self.samples_rank = None   # 当前 rank 的样本（预构建 text + messages）
        self.world_size = 1
        self.trainer = None

        # === 预载全量 benchmark 数据 ===
        self.all_samples = self._load_all_benchmarks(self.alpaca_data_dir)


    # -------------------------------------------------
    def _load_all_benchmarks(self, base_dir):
        """
        读取 dataset_info.json，根据 columns 映射载入所有 *_test.json；
        为每条样本构建 messages & chat text，保存必要的元信息（便于回溯与分类）。
        返回 list[ dict ]，每项结构：
           {
             "benchmark": str,
             "instruction": str,
             "input": str,
             "gt_output": str,
             "images": list[str],  # image paths
             "messages": list[dict],   # 官方范式 messages（含图片与文本）
             "text": str               # apply_chat_template 后的串
           }

        这里的 datainfo 统一为：
        {
            "datainfo": {
                "columns": {
                    "prompt": "instruction",
                    "query": "input",
                    "response": "output",
                    "images": "images"
                }
            }
        }
        虽然理论上应读取对应文件进行校验，
        但由于任务限定为多模态 SFT with Alpaca 数据且已统一结构，
        此处做简化处理。
        """
        info_path = os.path.join(base_dir, "dataset_info.json")
        if not os.path.exists(info_path):
            raise FileNotFoundError(f"❌ dataset_info.json not found in {base_dir}")

        with open(info_path, "r", encoding="utf-8") as f:
            dataset_info = json.load(f)

        all_samples = []
        for name, meta in dataset_info.items():
            if '_test_' not in name:
                continue  # 仅处理测试集文件
            file_path = os.path.join(base_dir, meta["file_name"])
            if not os.path.exists(file_path):
                raise FileNotFoundError(f"❌ Benchmark file not found: {file_path}")

            with open(file_path, "r", encoding="utf-8") as f:
                data = json.load(f)

            columns = meta["columns"]
            prompt_key = columns["prompt"]     # "instruction"
            query_key = columns["query"]       # "input"
            resp_key = columns["response"]     # "output"
            images_key = columns.get("images", "images")

            bench_name = name.replace("_test", "")
            print(f"📘 Loaded {len(data)} samples from {file_path} (benchmark={bench_name})")

            for ex in data:
                instruction = ex.get(prompt_key, "")
                in_text = ex.get(query_key, "")
                gt_out = ex.get(resp_key, "")
                images = ex.get(images_key, []) or []
                if isinstance(images, str):
                    images = [images]

                # === 构造官方 messages ===
                content = []
                for img_path in images:
                    content.append({"type": "image", "image": img_path})
                user_text = instruction if not in_text else f"{instruction}\n{in_text}"
                content.append({"type": "text", "text": user_text})

                messages = [{"role": "user", "content": content}]

                # === 生成 chat 模板文本（不 tokenize，仅构造字符串）===
                text = self.processor.apply_chat_template(
                    messages, tokenize=False, add_generation_prompt=True
                )

                all_samples.append({
                    "benchmark": bench_name,
                    "instruction": instruction,
                    "input": in_text,
                    "gt_output": gt_out,
                    "images": images,
                    "messages": messages,
                    "text": text,
                })

        print(f"✅ Total {len(all_samples)} benchmark samples loaded.")
        return all_samples

    # -------------------------------------------------
    def on_train_start(self, args, state, control, **kwargs):
        """混合后按 rank 均分样本；这里不把图片张量化到内存（避免占用太大），
           推理时再基于 messages 动态 processor(...)。"""
        rank = dist.get_rank() if dist.is_initialized() else 0
        self.world_size = dist.get_world_size() if dist.is_initialized() else 1

        self.samples_rank = self.all_samples[rank::self.world_size]
        print(f"🔧 [Rank {rank}] Prepared {len(self.samples_rank)} samples for benchmark.")

    # -------------------------------------------------
        
    def on_evaluate(self, args, state, control, **kwargs):
        """
        每次评估阶段执行分布式 benchmark：
          - 各 rank 并行推理自己的样本份额；
          - 收集并保存结果；
          - rank0 汇总并按 benchmark 名称分类写入 save_dir/eval_<step>/<benchmark>.json
        """
        trainer = kwargs.get("trainer", None)
        if trainer is None:
            raise RuntimeError("❌ `trainer` is not provided in on_evaluate() — this callback must be used with Trainer.evaluate().")

        if not dist.is_initialized():
            raise RuntimeError("❌ Distributed environment not initialized (torch.distributed).")

        rank = dist.get_rank()
        print(f"🚀 [Rank {rank}] Evaluation step {state.global_step}: Running distributed benchmark ...")

        model = trainer.model
        model.eval()
        results = []

        with torch.no_grad():
            for ex in self.samples_rank:
                # 构建多模态输入
                image_inputs, video_inputs = process_vision_info(ex["messages"])
                inputs = self.processor(
                    text=[ex["text"]],
                    images=image_inputs,
                    videos=video_inputs,
                    padding=True,
                    return_tensors="pt",
                )
                inputs = inputs.to(model.device)

                # 生成
                generated_ids = model.generate(**inputs, **self.gen_kwargs)
                trimmed = [
                    out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
                ]

                # 保存原始 token ids 及输入元数据
                results.append({
                    "benchmark": ex["benchmark"],
                    "instruction": ex["instruction"],
                    "input": ex["input"],
                    "gt_output": ex["gt_output"],
                    "images": ex["images"],
                    "generated_ids": [t.cpu().tolist() for t in trimmed],
                })

        # 分布式聚合
        world_size = self.world_size
        gathered = [None for _ in range(world_size)]
        dist.all_gather_object(gathered, results)

        # rank0 分类保存：save_dir/eval_<step>/<benchmark>.json
        if rank == 0:
            flat = [item for sub in gathered for item in sub]
            by_bench = defaultdict(list)
            for item in flat:
                by_bench[item["benchmark"]].append(item)

            out_dir = os.path.join(self.save_dir, f"eval_{state.global_step}")
            os.makedirs(out_dir, exist_ok=True)
            for bench_name, rows in by_bench.items():
                path = os.path.join(out_dir, f"{bench_name}.json")
                with open(path, "w", encoding="utf-8") as f:
                    json.dump(rows, f, ensure_ascii=False, indent=2)
                print(f"✅ [Rank 0] Saved {len(rows)} results → {path}")

        model.train()
        return control