"""Qwen2.5-VL 全参数微调训练脚本

本脚本实现了针对 JSONL 对话格式（含多模态图像信息）的全参数监督微调流程，核心特性：
1. 数据加载：支持包含多条消息（system / user / assistant）的对话结构，自动解析并规范化图像路径。
2. 标签构造：仅监督最后一条 assistant 消息中的“答案”内容；忽略系统提示、用户消息、图像占位 token 以及模板控制 token。
3. 多模态支持：通过 AutoProcessor 与 qwen_vl_utils 处理视觉输入（图像路径 / URL / data URI）。
4. 训练加速：使用 Accelerate 进行多 GPU + 梯度累计，支持 bf16、FlashAttention2、梯度检查点。
5. 学习率调度：线性 warmup + 线性衰减，调度严格对齐“优化器真实更新步”（update_step），避免与 micro step 混淆。
6. 精细日志：输出学习率阶段、监督 token 数、吞吐、显存占用；可选 TensorBoard 记录。
7. 掩码策略：标签张量初始全设为 -100，仅对识别出的答案 token 区间写入真实标签，保证 loss 聚焦于答案本身。

注意：
—— 不引入额外参数控制掩码策略；脚本默认且始终只监督最终答案区间。
—— 保持训练逻辑稳定，不包含实验性回退路径。
—— 所有函数与类均提供中文注释以便维护。
"""

import os
import json
import argparse
from pathlib import Path
from typing import List, Dict, Any
import logging
import time
import platform
from datetime import datetime
import math
import re

import torch
from torch.utils.data import Dataset, DataLoader
from transformers import (
    Qwen2_5_VLForConditionalGeneration,
    AutoProcessor,
    get_scheduler,
    set_seed as hf_set_seed,
)
import transformers as _transformers
from accelerate import Accelerator
from tqdm.auto import tqdm
from sklearn.metrics import f1_score

# Hard-disable TorchDynamo to avoid FA2 varlen + Dynamo FakeTensor issues
try:  # pragma: no cover
    import torch._dynamo as _dynamo

    _dynamo.config.suppress_errors = True
    _dynamo.disable()
except Exception:
    pass


try:
    from qwen_vl_utils import process_vision_info
except Exception as e:  # pragma: no cover
    raise RuntimeError(
        "Missing dependency qwen-vl-utils. Install: pip install qwen-vl-utils"
    ) from e


def is_url_or_data(path: str) -> bool:
    return path.startswith("http://") or path.startswith("https://") or path.startswith("data:") or path.startswith("file://")


def join_image_path(image: str, image_root: str | None) -> str:
    if is_url_or_data(image) or image_root is None:
        return image
    p = Path(image)
    if p.is_absolute():
        return str(p)
    return str((Path(image_root) / image).resolve())


class JSONLSFTDataset(Dataset):
    """基于 JSONL 的监督微调数据集读取器。

    预期每行是一个 JSON，包含字段：
    {
        "messages": [
            {"role": "system", "content": ...},
            {"role": "user", "content": [... 文本 / 图像段 ...]},
            {"role": "assistant", "content": "答案"}
        ]
    }

    功能：
    1. 解析并缓存全部样本；
    2. 将用户消息中的相对图像路径拼接为绝对路径；
    3. 为构造标签时只监督最后一条 assistant 消息，生成用于 Prompt 的 messages_prompt（移除第一条 assistant）。
    """

    def __init__(self, jsonl_path: str, image_root: str | None = None):
        self.path = Path(jsonl_path)
        if not self.path.exists():
            raise FileNotFoundError(f"Not found: {self.path}")
        self.image_root = image_root
        self.samples: List[Dict[str, Any]] = []

        # 读取 JSONL，每行一个样本
        with self.path.open("r", encoding="utf-8") as f:
            for line in f:
                line = line.strip()
                if not line:
                    continue
                try:
                    item = json.loads(line)
                except json.JSONDecodeError:
                    continue  # 跳过格式错误行
                if "messages" not in item:
                    continue
                messages = item["messages"]
                # 规范化 user 消息中的图片路径
                for msg in messages:
                    if msg.get("role") == "user":
                        content = msg.get("content", [])
                        if isinstance(content, list):
                            for part in content:
                                if isinstance(part, dict) and part.get("type") == "image":
                                    img = part.get("image")
                                    if isinstance(img, str):
                                        part["image"] = join_image_path(img, self.image_root)
                self.samples.append({"messages": messages})

        if not self.samples:
            raise ValueError(f"No valid samples found in {self.path}")

    def __len__(self) -> int:
        return len(self.samples)

    def __getitem__(self, idx: int) -> Dict[str, Any]:
        """返回一个样本及其用于构造 Prompt 的裁剪版本。

        messages_full: 完整消息（包含答案）。
        messages_prompt: 去掉第一条 assistant，用于生成只含上下文的 Prompt，以便后续区分可监督区间。
        """
        item = self.samples[idx]
        messages: List[Dict[str, Any]] = item["messages"]

        messages_prompt: List[Dict[str, Any]] = []
        assistant_found = False
        for m in messages:
            if m.get("role") == "assistant" and not assistant_found:
                assistant_found = True  # 跳过第一条 assistant（其内容将被监督）
                continue
            messages_prompt.append(m)

        return {"messages_full": messages, "messages_prompt": messages_prompt}


class Collator:
    """批量组装函数（DataLoader 的 `collate_fn`）。

    职责：
    1. 使用模型的 chat 模板分别构造：
       (a) 完整对话文本（包含答案） text_full
       (b) 仅上下文 Prompt 文本（不含首个 assistant 答案，并附生成提示） text_prompt
    2. 计算 prompt 区域 token 数（后续用于回退策略或调试）。
    3. 处理视觉信息（通过 process_vision_info 返回的 image_inputs 列表）。
    4. 构造标签：仅监督最后一段 assistant 答案 token，忽略模板、图像占位、换行与前缀空白。
    5. 输出包含 input_ids / attention_mask / pixel_values（若有）/ labels。
    """

    def __init__(self, processor: AutoProcessor):
        self.processor = processor

    def __call__(self, batch: List[Dict[str, Any]]) -> Dict[str, torch.Tensor]:
        texts_full: List[str] = []          # 每个样本的完整多轮对话文本
        prompt_lens: List[int] = []         # 每个样本的 prompt token 长度（不含答案）
        all_image_inputs: List[Any] = []    # 视觉输入聚合

        for sample in batch:
            messages_full = sample["messages_full"]
            messages_prompt = sample["messages_prompt"]

            # 构造完整文本（含答案）
            text_full = self.processor.apply_chat_template(
                messages_full, tokenize=False, add_generation_prompt=False
            )
            # 构造提示文本（用于确定 prompt 边界）
            text_prompt = self.processor.apply_chat_template(
                messages_prompt, tokenize=False, add_generation_prompt=True
            )
            texts_full.append(text_full)

            # 计算 prompt token 长度（不含答案 token）
            prompt_ids = self.processor.tokenizer(
                text_prompt, add_special_tokens=False
            ).input_ids
            prompt_lens.append(len(prompt_ids))

            # 视觉信息解析（返回图像输入列表）
            image_inputs, _ = process_vision_info(messages_full)
            all_image_inputs.append(image_inputs)

        # tokenizer 统一编码
        proc_kwargs = dict(text=texts_full, padding=True, return_tensors="pt")
        if any(x is not None for x in all_image_inputs):  # 有任一样本包含图像
            proc_kwargs["images"] = all_image_inputs
        inputs = self.processor(**proc_kwargs)

        # 基础张量引用
        input_ids = inputs["input_ids"]
        attention_mask = inputs.get("attention_mask", None)
        # 标签初始全 -100，代表默认不参与损失
        labels = torch.full_like(input_ids, -100)

        # 解析 token：定位最后一个 assistant 块中的答案区间
        tokenizer = self.processor.tokenizer
        special_im_start = "<|im_start|>"
        special_im_end = "<|im_end|>"

        for i in range(input_ids.size(0)):
            ids_i = input_ids[i].tolist()
            # 仅保留有效长度（忽略 padding）
            if attention_mask is not None:
                effective_len = int(attention_mask[i].sum().item())
            else:
                effective_len = len(ids_i)
            ids_trim = ids_i[:effective_len]
            tokens = tokenizer.convert_ids_to_tokens(ids_trim)

            # 扫描：寻找 <|im_start|> + assistant + 答案正文 + <|im_end|>
            answer_start = None
            answer_end = None
            j = 0
            while j < len(tokens):
                if tokens[j] == special_im_start:
                    window = tokens[j + 1:j + 5]
                    if any(tok == 'assistant' for tok in window):
                        # assistant token 相对定位
                        try:
                            rel = window.index('assistant')
                            assistant_tok_index = j + 1 + rel
                        except ValueError:
                            assistant_tok_index = None
                        # assistant 之后可能是换行，需跳过
                        k = assistant_tok_index + 1 if assistant_tok_index is not None else j + 1
                        while k < len(tokens) and tokens[k] in ['\n']:
                            k += 1
                        # 向前直到遇到 <|im_end|>
                        m = k
                        while m < len(tokens) and tokens[m] != special_im_end:
                            m += 1
                        answer_start = k
                        answer_end = m  # 区间为 [start, end)
                        j = m  # 继续向后，保留最后一次匹配
                        continue
                j += 1

            if answer_start is not None and answer_end is not None and answer_end > answer_start:
                # 去除答案区间前导空白/换行
                trim_start = answer_start
                while trim_start < answer_end:
                    try:
                        piece = tokenizer.decode([ids_trim[trim_start]], skip_special_tokens=False)
                    except Exception:
                        piece = ""
                    if piece.strip() == "" or piece in ["\n", "\r", "\t"]:
                        trim_start += 1
                        continue
                    break
                if trim_start < answer_end:
                    labels[i, trim_start:answer_end] = input_ids[i, trim_start:answer_end]
            else:
                # 回退策略：若未成功解析答案块，则监督 prompt 之后的全部（避免训练完全失效）
                pl = prompt_lens[i] if i < len(prompt_lens) else 0
                labels[i, pl:effective_len] = input_ids[i, pl:effective_len]

        inputs["labels"] = labels
        # 额外保留 prompt_lens（仅用于首步调试日志，不参与前向）
        inputs["prompt_lens"] = torch.tensor(prompt_lens, dtype=torch.long)
        return inputs


def get_optimizer(model: torch.nn.Module, lr: float, weight_decay: float) -> torch.optim.Optimizer:
    """构建 AdamW 优化器，区分需要权重衰减与不需要权重衰减的参数。

    规则：
    - LayerNorm / RMSNorm / Bias 不做 weight decay；
    - 其余参数应用指定 weight_decay。
    """
    decay, no_decay = [], []
    for n, p in model.named_parameters():
        if not p.requires_grad:
            continue
        if any(nd in n for nd in [
            "bias", "LayerNorm.weight", "layernorm.weight", "ln_f.weight", "norm.weight", "rmsnorm.weight",
            "norm.bias", "LayerNorm.bias", "layernorm.bias", "rmsnorm.bias"
        ]):
            no_decay.append(p)
        else:
            decay.append(p)
    param_groups = [
        {"params": decay, "weight_decay": weight_decay},
        {"params": no_decay, "weight_decay": 0.0},
    ]
    return torch.optim.AdamW(param_groups, lr=lr, betas=(0.9, 0.95), eps=1e-8)

Stage2Idx = {"W": 0, "N1": 1, "N2": 2, "N3": 3, "R": 4}
Idx2Stage = {v: k for k, v in Stage2Idx.items()}

def parse_model_output(out_text: str):
    """解析模型生成的包含 JSON 的回答，返回 (sleep_stage, reasoning_text, applicable_rules, error_msg)。

    逻辑与 test_qwen25_vl_fullft.py 保持一致：
    1. 优先匹配 ```json fenced code 中 JSON；
    2. 否则抓取第一个包含 sleep_stage 的 {...}；
    3. 尝试 json.loads，必要时做轻量清理；
    4. 返回解析字段；失败返回 None 并附错误信息。
    """
    text = out_text.strip()
    raw_json_str = None
    try:
        fence_match = re.search(r"```(?:json)?\s*([\s\S]*?)```", text, re.IGNORECASE)
        if fence_match:
            raw_json_str = fence_match.group(1).strip()
        else:
            brace_match = re.search(r"\{[\s\S]*?\}", text)
            if brace_match and 'sleep_stage' in brace_match.group(0):
                raw_json_str = brace_match.group(0)
        if raw_json_str is None:
            raise ValueError("未找到 JSON 结构")
        raw_json_str = raw_json_str.strip().lstrip('`').rstrip('`').strip()
        raw_json_str = raw_json_str.replace('“', '"').replace('”', '"').replace('’', "'")
        try:
            data = json.loads(raw_json_str)
        except json.JSONDecodeError:
            cleaned = re.sub(r",\s*}\s*$", "}\n", raw_json_str)
            cleaned = re.sub(r",\s*]", "]", cleaned)
            data = json.loads(cleaned)
        sleep_stage = data.get("sleep_stage")
        if isinstance(sleep_stage, str):
            sleep_stage = sleep_stage.strip().upper()
        if sleep_stage not in Stage2Idx:
            raise ValueError(f"sleep_stage 非法: {sleep_stage}")
        return sleep_stage, data.get("reasoning_text"), data.get("applicable_rules"), None
    except Exception as e:
        return None, None, None, str(e)

def evaluate(model, val_dataset: JSONLSFTDataset, processor: AutoProcessor, accelerator: Accelerator, batch_size: int = 1) -> float:
    """基于生成与解析 JSON 输出计算分类 Macro-F1。

    过程：
    1. 各进程按 index % world_size == rank 分片处理验证集（避免重复）；
    2. 对每个样本构造去掉首个 assistant 的 messages（即仅 system + user）；
    3. 使用 chat 模板 + add_generation_prompt=True 生成；
    4. 解析生成 JSON 得到预测 sleep_stage；从原始答案解析真实标签；
    5. gather 全部预测与标签，在 rank0 计算 macro_f1；广播给各进程。
    返回：macro_f1 (float)。若无有效样本返回 0.0。
    """
    # 需要使用 unwrap 后的原始模型以访问 generate；DDP 包装对象可能不直接暴露 generate
    unwrapped = accelerator.unwrap_model(model)
    unwrapped.eval()
    world_size = accelerator.num_processes
    rank = accelerator.process_index
    indices = [i for i in range(len(val_dataset)) if i % world_size == rank]

    local_preds: List[int] = []
    local_labels: List[int] = []

    with torch.no_grad():
        for start in range(0, len(indices), batch_size):
            batch_indices = indices[start:start + batch_size]
            if not batch_indices:
                continue
            texts = []
            image_inputs = []
            gt_labels = []
            for idx in batch_indices:
                sample = val_dataset[idx]
                messages_full = sample["messages_full"] if "messages_full" in sample else sample.get("messages") or sample
                # 重建 messages_prompt：去掉第一条 assistant
                messages_prompt = []
                assistant_skipped = False
                gt_stage = None
                for m in messages_full:
                    if m.get("role") == "assistant" and not assistant_skipped:
                        assistant_skipped = True
                        # 解析真实标签
                        gt_stage, _, _, _ = parse_model_output(m.get("content", ""))
                        continue
                    messages_prompt.append(m)
                if gt_stage is None:
                    # 无法解析真实标签则跳过该样本
                    continue
                gt_labels.append(Stage2Idx.get(gt_stage, -1))
                text = processor.apply_chat_template(messages_prompt, tokenize=False, add_generation_prompt=True)
                texts.append(text)
                img_in, _ = process_vision_info(messages_prompt)
                image_inputs.append(img_in)

            if not texts:
                continue
            inputs = processor(text=texts, images=image_inputs, padding=True, return_tensors="pt").to(accelerator.device)
            generated_ids = unwrapped.generate(
                **inputs,
                max_new_tokens=512,
                top_p=0.8,
                temperature=0.7
            )
            generated_ids_trimmed = [out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)]
            output_texts = processor.batch_decode(generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False)
            for out_text, gt_label in zip(output_texts, gt_labels):
                pred_stage, _, _, _ = parse_model_output(out_text)
                pred = Stage2Idx.get(pred_stage, -1) if pred_stage else -1
                local_preds.append(pred)
                local_labels.append(gt_label)

    # 转为 tensor 以便 gather
    if not local_labels:
        local_preds_tensor = torch.empty(0, dtype=torch.int32, device=accelerator.device)
        local_labels_tensor = torch.empty(0, dtype=torch.int32, device=accelerator.device)
    else:
        local_preds_tensor = torch.tensor(local_preds, dtype=torch.int32, device=accelerator.device)
        local_labels_tensor = torch.tensor(local_labels, dtype=torch.int32, device=accelerator.device)

    all_preds = accelerator.gather(local_preds_tensor)
    all_labels = accelerator.gather(local_labels_tensor)

    macro_f1 = 0.0
    if accelerator.is_main_process:
        try:
            y_true = all_labels.cpu().numpy()
            y_pred = all_preds.cpu().numpy()
            mask = (y_true >= 0) & (y_pred >= 0)
            if mask.any():
                macro_f1 = f1_score(y_true[mask], y_pred[mask], average="macro")
            else:
                macro_f1 = 0.0
        except Exception:
            macro_f1 = 0.0
    # 广播指标到各进程
    macro_f1_tensor = torch.tensor([macro_f1], device=accelerator.device)
    # 使用 torch.distributed.broadcast 在所有进程同步指标；避免依赖不存在的 accelerator.broadcast
    if accelerator.num_processes > 1:
        try:
            import torch.distributed as dist
            if dist.is_available() and dist.is_initialized():
                dist.broadcast(macro_f1_tensor, src=0)
        except Exception:
            pass  # 若广播失败，非主进程将保持本地初始值（仅主进程用于保存模型）
    return float(macro_f1_tensor.item())


def _setup_logging(output_dir: Path, is_main_process: bool, process_index: int) -> logging.Logger:
    """配置日志：主进程控制台 + 全进程文件。

    日志文件： output_dir / training.log
    日志格式： 时间 | 级别 | rank | 信息
    """
    log_dir = output_dir
    log_dir.mkdir(parents=True, exist_ok=True)
    log_file = log_dir / "training.log"

    logger = logging.getLogger("qwen25_vl_fullft")
    logger.setLevel(logging.INFO)
    logger.propagate = False

    # Avoid duplicate handlers if re-invoked (e.g., in notebooks)
    if not logger.handlers:
        fmt = logging.Formatter(
            fmt=f"%(asctime)s | %(levelname)s | rank={process_index} | %(message)s",
            datefmt="%Y-%m-%d %H:%M:%S",
        )

        # File handler
        fh = logging.FileHandler(log_file, encoding="utf-8")
        fh.setLevel(logging.INFO)
        fh.setFormatter(fmt)
        logger.addHandler(fh)

        # Console handler (only on main process)
        if is_main_process:
            ch = logging.StreamHandler()
            ch.setLevel(logging.INFO)
            ch.setFormatter(fmt)
            logger.addHandler(ch)

    return logger


def _log_env_and_args(logger: logging.Logger, args: argparse.Namespace, accelerator: Accelerator) -> None:
    """打印环境与参数信息，便于复现实验。

    包含：主机、OS、Python、Torch、Transformers、进程/显卡信息、所有超参数键值对。
    """
    try:
        import qwen_vl_utils as _qvu
        qvu_ver = getattr(_qvu, "__version__", "unknown")
    except Exception:
        qvu_ver = "unavailable"

    cuda_visible = os.environ.get("CUDA_VISIBLE_DEVICES", "")
    cuda_ok = torch.cuda.is_available()
    gpu_info = []
    if cuda_ok:
        try:
            for i in range(torch.cuda.device_count()):
                gpu_info.append({
                    "index": i,
                    "name": torch.cuda.get_device_name(i),
                    "capability": torch.cuda.get_device_capability(i),
                    "total_mem_GB": round(torch.cuda.get_device_properties(i).total_memory / 1024**3, 2),
                })
        except Exception:
            pass

    logger.info("=========== Environment ===========")
    logger.info(f"Host: {platform.node()} | OS: {platform.platform()}")
    logger.info(f"Python: {platform.python_version()} | Torch: {torch.__version__} | Transformers: {_transformers.__version__}")
    logger.info(f"Accelerate: {accelerator.state.__class__.__module__.split('.')[0]} | qwen-vl-utils: {qvu_ver}")
    logger.info(f"CUDA available: {cuda_ok} | CUDA_VISIBLE_DEVICES: {cuda_visible}")
    logger.info(f"Accelerator processes: {accelerator.num_processes} | local_process_index: {accelerator.process_index}")
    if gpu_info:
        for info in gpu_info:
            logger.info(f"GPU[{info['index']}]: {info['name']} | capability={info['capability']} | total_mem={info['total_mem_GB']} GB")
    logger.info("=========== Hyperparameters =======")
    for k, v in sorted(vars(args).items()):
        logger.info(f"{k}: {v}")
    logger.info("===================================")


def parse_args() -> argparse.Namespace:
    """解析命令行参数。

    参数涵盖：模型路径、数据文件、图像根目录、优化与调度超参、日志与评估频率、TensorBoard 控制等。
    """
    parser = argparse.ArgumentParser(description="Full-parameter finetuning for Qwen2.5-VL-3B-Instruct")
    parser.add_argument("--model_path", type=str, required=True, help="Path or HF repo to base model")
    parser.add_argument("--train_file", type=str, required=True)
    parser.add_argument("--val_file", type=str, required=True)
    parser.add_argument("--image_root", type=str, default=None, help="Root dir to resolve relative image paths")
    parser.add_argument("--output_dir", type=str, required=True)

    parser.add_argument("--epochs", type=int, default=1)
    parser.add_argument("--per_device_train_batch_size", type=int, default=1)
    parser.add_argument("--per_device_eval_batch_size", type=int, default=1)
    parser.add_argument("--gradient_accumulation_steps", type=int, default=8)
    parser.add_argument("--learning_rate", type=float, default=1e-5)
    parser.add_argument("--weight_decay", type=float, default=0.1)
    parser.add_argument("--warmup_ratio", type=float, default=0.03)
    parser.add_argument("--max_grad_norm", type=float, default=1.0)
    parser.add_argument("--eval_steps", type=int, default=500)
    parser.add_argument("--save_steps", type=int, default=500)
    parser.add_argument("--seed", type=int, default=42)
    parser.add_argument("--num_workers", type=int, default=32)
    parser.add_argument("--log_every", type=int, default=50, help="Print loss every N steps")
    parser.add_argument("--no_progress", action="store_true", help="Disable tqdm progress bar")

    parser.add_argument("--min_pixels", type=int, default=256 * 28 * 28)
    parser.add_argument("--max_pixels", type=int, default=1280 * 28 * 28)

    # TensorBoard logging
    parser.add_argument("--tensorboard", action="store_true", help="Enable TensorBoard logging")
    parser.add_argument("--tb_log_dir", type=str, default=None, help="TensorBoard log dir (default: output_dir/tensorboard)")

    return parser.parse_args()


def main():
    """主训练入口函数。

    主要步骤：
    1. 解析参数 & 初始化加速器与随机种子。
    2. 构建数据集、数据加载器与处理器。
    3. 加载预训练多模态模型并开启梯度检查点。
    4. 计算总更新步、构建优化器与调度器并对齐梯度累计策略。
    5. 进入训练循环：前向、反向、梯度裁剪、优化器更新、调度器步进、日志记录、周期性评估与保存。
    6. 训练结束进行总结与资源清理。
    """
    args = parse_args()

    # Initialize Accelerator with explicit gradient_accumulation_steps for clarity
    accelerator = Accelerator(
        mixed_precision="bf16",
        gradient_accumulation_steps=args.gradient_accumulation_steps,
    )
    hf_set_seed(args.seed)
    torch.backends.cuda.matmul.allow_tf32 = True

    output_dir = Path(args.output_dir)
    if accelerator.is_main_process:
        output_dir.mkdir(parents=True, exist_ok=True)

    # Setup logging (console for main process; all ranks write to file)
    logger = _setup_logging(output_dir, accelerator.is_main_process, accelerator.process_index)
    if accelerator.is_main_process:
        logger.info(f"Log file: {(output_dir / 'training.log').as_posix()}")
    _log_env_and_args(logger, args, accelerator)

    # TensorBoard writer (main process only)
    writer = None
    if accelerator.is_main_process and args.tensorboard:
        tb_dir = Path(args.tb_log_dir) if args.tb_log_dir else (output_dir / "tensorboard")
        tb_dir.mkdir(parents=True, exist_ok=True)
        try:
            from torch.utils.tensorboard import SummaryWriter  # type: ignore
            writer = SummaryWriter(log_dir=str(tb_dir))
            logger.info(f"TensorBoard enabled. Log dir: {tb_dir}")
            # Log hparams and environment as text
            try:
                writer.add_text("env/host", platform.node(), 0)
                writer.add_text("env/os", platform.platform(), 0)
                writer.add_text("env/python", platform.python_version(), 0)
                writer.add_text("env/torch", torch.__version__, 0)
                writer.add_text("env/transformers", _transformers.__version__, 0)
                writer.add_text("env/cuda_available", str(torch.cuda.is_available()), 0)
                writer.add_text("hparams", json.dumps(vars(args), ensure_ascii=False, indent=2)[:8000], 0)
            except Exception:
                pass
        except Exception as e:
            writer = None
            logger.warning(f"Failed to init TensorBoard SummaryWriter: {e}. Install with `pip install tensorboard`.")

    processor = AutoProcessor.from_pretrained(
        args.model_path,
        min_pixels=args.min_pixels,
        max_pixels=args.max_pixels,
        use_fast=True,
    )
    logger.info(f"Loaded processor from: {args.model_path} (min_pixels={args.min_pixels}, max_pixels={args.max_pixels})")

    model = Qwen2_5_VLForConditionalGeneration.from_pretrained(
        args.model_path,
        dtype=torch.bfloat16,
        attn_implementation="flash_attention_2",
    )
    model.config.use_cache = False
    if hasattr(model, "gradient_checkpointing_enable"):
        model.gradient_checkpointing_enable()
        logger.info("Enabled gradient checkpointing on model.")

    # Model params info
    total_params = sum(p.numel() for p in model.parameters())
    trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
    logger.info(f"Model parameters (total/trainable): {total_params:,} / {trainable_params:,}")

    train_dataset = JSONLSFTDataset(args.train_file, image_root=args.image_root)
    val_dataset = JSONLSFTDataset(args.val_file, image_root=args.image_root)
    logger.info(f"Dataset sizes -> train: {len(train_dataset)} | val: {len(val_dataset)}")

    collator = Collator(processor)
    train_loader = DataLoader(
        train_dataset,
        batch_size=args.per_device_train_batch_size,
        shuffle=True,
        num_workers=args.num_workers,
        collate_fn=collator,
        pin_memory=True,
    )
    val_loader = DataLoader(
        val_dataset,
        batch_size=args.per_device_eval_batch_size,
        shuffle=False,
        num_workers=args.num_workers,
        collate_fn=collator,
        pin_memory=True,
    )

    optimizer = get_optimizer(model, lr=args.learning_rate, weight_decay=args.weight_decay)

    # ---------- Correct computation of optimizer update steps ----------
    # Use ceil so that any remainder mini-batches still count as an optimizer update.
    num_update_steps_per_epoch = math.ceil(len(train_loader) / args.gradient_accumulation_steps)
    total_training_steps = num_update_steps_per_epoch * args.epochs
    num_warmup_steps = int(total_training_steps * args.warmup_ratio)

    if accelerator.is_main_process:
        logger.info(
            f"[SchedulerSetup] train_batches={len(train_loader)} | grad_accum={args.gradient_accumulation_steps} | "
            f"updates/epoch={num_update_steps_per_epoch} | total_updates={total_training_steps} | warmup_updates={num_warmup_steps}"
        )
    lr_scheduler = get_scheduler(
        name="linear",
        optimizer=optimizer,
        num_warmup_steps=num_warmup_steps,
        num_training_steps=total_training_steps,
    )

    model, optimizer, train_loader, val_loader, lr_scheduler = accelerator.prepare(
        model, optimizer, train_loader, val_loader, lr_scheduler
    )

    # Log training plan summary
    if accelerator.is_main_process:
        logger.info(
            "Training plan: "
            f"train_batches/epoch={len(train_loader)}, val_batches={len(val_loader)}, "
            f"epochs={args.epochs}, grad_accum(args)={args.gradient_accumulation_steps}, "
            f"accelerator.grad_accum(actual)={getattr(accelerator, 'gradient_accumulation_steps', 1)}, "
            f"est_updates/epoch={num_update_steps_per_epoch}, total_updates={total_training_steps}, warmup_steps={num_warmup_steps}"
        )

    best_metric = -1.0  # macro_f1 最佳
    global_step = 0          # micro steps (raw batches)
    update_step = 0          # optimizer & scheduler real steps
    total_steps_est = len(train_loader) * args.epochs  # for progress bar (micro steps)
    if accelerator.is_main_process:
        logger.info(
            f"Train batches: {len(train_loader)}, Eval batches: {len(val_loader)}, "
            f"GradAccum(args): {args.gradient_accumulation_steps}, Epochs: {args.epochs}"
        )
    show_progress = not args.no_progress and accelerator.is_main_process
    pbar = tqdm(total=total_steps_est, disable=not show_progress, dynamic_ncols=True)

    train_start_time = time.perf_counter()
    tokens_processed = 0

    for epoch in range(args.epochs):
        model.train()
        # Ensure per-epoch reshuffle in DDP if sampler supports it
        try:
            sampler = getattr(train_loader, "sampler", None)
            if sampler is not None and hasattr(sampler, "set_epoch"):
                sampler.set_epoch(epoch)
        except Exception:
            pass
        for step, batch in enumerate(train_loader):  # step is 0-based micro step within epoch
            iter_start = time.perf_counter()
            with accelerator.accumulate(model):
                # Extract prompt_lens (CPU tensor) before moving other tensors to device
                prompt_lens = batch.pop("prompt_lens", None)
                batch = {k: (v.to(accelerator.device) if isinstance(v, torch.Tensor) else v) for k, v in batch.items()}
                # tokens in batch (for throughput estimation)
                batch_tokens = int(batch.get("attention_mask", torch.tensor([])).sum().item()) if isinstance(batch.get("attention_mask", None), torch.Tensor) else 0
                outputs = model(**batch)
                loss = outputs.loss
                accelerator.backward(loss)
                grad_norm_logged = None
                if accelerator.sync_gradients:
                    # Only clip gradients when they are synchronized (true update step)
                    try:
                        grad_norm_logged = accelerator.clip_grad_norm_(model.parameters(), args.max_grad_norm)
                    except Exception:
                        pass
                optimizer.step()
                # Scheduler should step ONLY on real optimizer updates to keep schedule length correct
                if accelerator.sync_gradients:
                    lr_scheduler.step()
                    update_step += 1
                optimizer.zero_grad()

            global_step += 1
            if show_progress:
                pbar.update(1)
                if (global_step % max(1, args.log_every)) == 0:
                    pbar.set_postfix({"epoch": epoch, "step": global_step, "loss": f"{loss.item():.4f}"})
            elif accelerator.is_main_process and (global_step % max(1, args.log_every) == 0):
                # keep quiet here; we log via logger below
                pass

            # Step-level logging (main process)
            if accelerator.is_main_process and (global_step % max(1, args.log_every) == 0):
                step_time = time.perf_counter() - iter_start
                tokens_processed += batch_tokens
                lr = None
                try:
                    # get_last_lr returns list when using schedulers
                    lr_list = getattr(lr_scheduler, "get_last_lr", lambda: [optimizer.param_groups[0]["lr"]])()
                    lr = lr_list[0] if isinstance(lr_list, (list, tuple)) else float(lr_list)
                except Exception:
                    lr = optimizer.param_groups[0]["lr"]

                mem_alloc = mem_reserved = peak_mem = None
                if torch.cuda.is_available():
                    try:
                        mem_alloc = round(torch.cuda.memory_allocated() / (1024**2), 1)
                        mem_reserved = round(torch.cuda.memory_reserved() / (1024**2), 1)
                        peak_mem = round(torch.cuda.max_memory_allocated() / (1024**2), 1)
                    except Exception:
                        pass

                # Debug: supervised token count (per sample) to ensure labels are not accidentally huge/small
                try:
                    supervised_counts = None
                    if "labels" in batch and isinstance(batch["labels"], torch.Tensor):
                        lbl = batch["labels"]
                        supervised_counts = (lbl != -100).sum(dim=1).tolist()
                        # For large batches log only summary
                        if len(supervised_counts) > 8:
                            sc_summary = f"min={min(supervised_counts)} max={max(supervised_counts)} avg={sum(supervised_counts)/len(supervised_counts):.2f}"
                        else:
                            sc_summary = str(supervised_counts)
                    else:
                        sc_summary = "n/a"
                except Exception:
                    sc_summary = "err"

                scheduler_phase = "warmup" if update_step < num_warmup_steps else ("decay" if update_step <= total_training_steps else "post")
                logger.info(
                    " | ".join([
                        f"epoch={epoch}",
                        f"micro_step={global_step}",
                        f"update_step={update_step}",
                        f"loss={loss.item():.6f}",
                        f"lr={lr:.6e}",
                        f"phase={scheduler_phase}",
                        f"grad_norm={(grad_norm_logged if grad_norm_logged is None else float(grad_norm_logged))}",
                        f"supervised_tok={sc_summary}",
                        f"batch_tokens={batch_tokens}",
                        f"tok/s={(batch_tokens / step_time) if step_time > 0 else 0:.1f}",
                        (f"mem(MB) alloc={mem_alloc} reserved={mem_reserved} peak={peak_mem}" if mem_alloc is not None else "mem(MB)=n/a"),
                    ])
                )

                # TensorBoard scalars
                if writer is not None:
                    try:
                        writer.add_scalar("train/loss", float(loss.item()), global_step)
                        writer.add_scalar("train/lr", float(lr), global_step)
                        writer.add_scalar("train/update_step", float(update_step), global_step)
                        if grad_norm_logged is not None:
                            try:
                                writer.add_scalar("train/grad_norm", float(grad_norm_logged), global_step)
                            except Exception:
                                pass
                        if supervised_counts is not None:
                            writer.add_scalar("train/avg_supervised_tokens", float(sum(supervised_counts)/len(supervised_counts)), global_step)
                        writer.add_scalar("train/batch_tokens", int(batch_tokens), global_step)
                        if step_time > 0:
                            writer.add_scalar("train/tokens_per_sec", float(batch_tokens / step_time), global_step)
                            writer.add_scalar("train/step_time_sec", float(step_time), global_step)
                        if mem_alloc is not None:
                            writer.add_scalar("cuda/mem_alloc_MB", float(mem_alloc), global_step)
                            writer.add_scalar("cuda/mem_reserved_MB", float(mem_reserved), global_step)
                            writer.add_scalar("cuda/max_mem_alloc_MB", float(peak_mem), global_step)
                        writer.add_scalar("train/epoch_progress", float(epoch + (step + 1) / max(1, len(train_loader))), global_step)
                    except Exception:
                        pass

            # One-time detailed debug: show what supervised token span actually contains (first micro step only)
            if accelerator.is_main_process and global_step == 1 and prompt_lens is not None:
                try:
                    # Decode supervised region for sample 0
                    sample_idx = 0
                    pl0 = int(prompt_lens[sample_idx]) if prompt_lens is not None else 0
                    input_ids0 = batch["input_ids"][sample_idx].detach().cpu()
                    labels0 = batch["labels"][sample_idx].detach().cpu()
                    supervised_positions = (labels0 != -100).nonzero(as_tuple=True)[0]
                    supervised_count = int(supervised_positions.numel())
                    # Reconstruct contiguous supervised text (may include template tokens)
                    supervised_tokens = input_ids0[supervised_positions]
                    decoded_full = processor.tokenizer.decode(supervised_tokens.tolist(), skip_special_tokens=False)
                    logger.info(
                        f"[DebugSupervisedSpan] sample=0 prompt_len={pl0} supervised_tok_count={supervised_count} snippet={decoded_full!r}"
                    )
                except Exception as e:
                    logger.warning(f"[DebugSupervisedSpan] failed: {e}")

            # Evaluation & checkpointing triggered by update steps (NOT micro steps) to align with lr progression
            if update_step > 0 and (update_step % args.eval_steps == 0) and accelerator.sync_gradients:
                macro_f1 = evaluate(model, val_dataset, processor, accelerator, batch_size=args.per_device_eval_batch_size)
                if accelerator.is_main_process:
                    logger.info(f"[Eval] update_step={update_step} (micro_step={global_step}) macro_f1={macro_f1:.6f}")
                    if writer is not None:
                        try:
                            writer.add_scalar("eval/macro_f1", float(macro_f1), global_step)
                        except Exception:
                            pass
                accelerator.wait_for_everyone()
                if macro_f1 > best_metric:
                    best_metric = macro_f1
                    if accelerator.is_main_process:
                        save_dir = output_dir / "best"
                        save_dir.mkdir(parents=True, exist_ok=True)
                        unwrapped = accelerator.unwrap_model(model)
                        unwrapped.save_pretrained(save_dir, safe_serialization=True)
                        processor.save_pretrained(save_dir)
                        logger.info(f"Saved BEST checkpoint to: {save_dir} (macro_f1={best_metric:.6f})")

            if update_step > 0 and (update_step % args.save_steps == 0) and accelerator.is_main_process and accelerator.sync_gradients:
                ckpt_dir = output_dir / f"checkpoint-{update_step}"
                ckpt_dir.mkdir(parents=True, exist_ok=True)
                unwrapped = accelerator.unwrap_model(model)
                unwrapped.save_pretrained(ckpt_dir, safe_serialization=True)
                processor.save_pretrained(ckpt_dir)
                logger.info(f"Saved checkpoint: {ckpt_dir} (update_step={update_step}, micro_step={global_step})")

        # end epoch eval
        epoch_eval_start = time.perf_counter()
        macro_f1 = evaluate(model, val_dataset, processor, accelerator, batch_size=args.per_device_eval_batch_size)
        if accelerator.is_main_process:
            logger.info(f"[Epoch {epoch}] macro_f1={macro_f1:.6f}")
            if writer is not None:
                try:
                    writer.add_scalar("eval/macro_f1_epoch", float(macro_f1), epoch)
                except Exception:
                    pass
        if macro_f1 > best_metric:
            best_metric = macro_f1
            if accelerator.is_main_process:
                save_dir = output_dir / "best"
                save_dir.mkdir(parents=True, exist_ok=True)
                unwrapped = accelerator.unwrap_model(model)
                unwrapped.save_pretrained(save_dir, safe_serialization=True)
                processor.save_pretrained(save_dir)
                logger.info(f"Saved BEST checkpoint to: {save_dir} (macro_f1={best_metric:.6f})")
        if accelerator.is_main_process:
            logger.info(f"[Epoch {epoch}] eval_time={time.perf_counter() - epoch_eval_start:.2f}s")

    if show_progress:
        pbar.close()
    accelerator.wait_for_everyone()
    if accelerator.is_main_process:
        total_time = time.perf_counter() - train_start_time
        logger.info(f"Training finished. best_macro_f1={best_metric:.6f} | total_time={total_time/60:.2f} min | tokens_processed={tokens_processed}")
        if writer is not None:
            try:
                writer.add_scalar("metrics/best_macro_f1", float(best_metric), global_step)
                writer.close()
            except Exception:
                pass
    # Graceful shutdown for DDP
    try:
        accelerator.end_training()
    except Exception:
        pass


if __name__ == "__main__":
    main()
