import glob
import json
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from typing import Dict, List, Optional, Union, Literal, Tuple
from types import MethodType
from torchvision import transforms
from datetime import datetime

import torch
import transformers
from accelerate.utils import DistributedType
from deepspeed import zero
from deepspeed.runtime.zero.partition_parameters import ZeroParamStatus
from transformers import AutoModel, AutoTokenizer
from transformers.integrations import deepspeed
# 之前引入的 TrainerCallback 已不再需要，移除以简化依赖
# from transformers import TrainerCallback

from dataset import SupervisedDataset, data_collator
from trainer import Trainer
from ValidDataset import ValidDataset, simple_valid_collator  # 🔧 修改：导入简化的数据整理器

from peft import LoraConfig, get_peft_model, prepare_model_for_kbit_training

# 新增：BLEU和绘图相关
from nltk.translate.bleu_score import sentence_bleu, SmoothingFunction
import matplotlib.pyplot as plt
import numpy as np
import re

@dataclass
class ModelArguments:
    model_name_or_path: Optional[str] = field(
        default=None, metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
    )

@dataclass
class DataArguments:
    data_path: str = field(
        default=None, metadata={"help": "Path to the training data."}
    )
    eval_data_path: str = field(
        default=None, metadata={"help": "Path to evaluation data. Supports: single file, directory, or comma-separated file list"}
    )
    valid_image_root: Optional[str] = field(
        default=None, metadata={"help": "Root directory for valid images."}
    )
    max_eval_samples: Optional[int] = field(
        default=None, metadata={"help": "Maximum number of evaluation samples to use"}
    )
    eval_task_type: Optional[str] = field(
        default="auto", metadata={"help": "Task type for evaluation: 'auto', 'caption', 'vqa'"}
    )
    eval_file_pattern: Optional[str] = field(
        default="*.json", metadata={"help": "File pattern for auto-discovery (e.g., '*mcq*.json')"}
    )


@dataclass
class TrainingArguments(transformers.TrainingArguments):
    """简化的 TrainingArguments 类 - 恢复原版风格，避免 DeepSpeed 参数冲突"""
    cache_dir: Optional[str] = field(default=None)
    optim: str = field(default="adamw_torch")
    model_max_length: int = field(
        default=2048,
        metadata={
            "help": "Maximum sequence length. Sequences will be right padded (and possibly truncated)."
        },
    )
    tune_vision: Optional[bool] = field(default=True)
    tune_llm: Optional[bool] = field(default=True)
    use_lora: Optional[bool] = field(default=False)
    max_slice_nums: Optional[int] = field(default=9)
    # 移除可能干扰 DeepSpeed 解析的字段：
    # save_merged_model: Optional[bool] = field(default=True)
    # save_lora_adapter: Optional[bool] = field(default=True)
    # use_timestamp_dir: Optional[bool] = field(default=True)


@dataclass
class LoraArguments:
    lora_r: int = 64
    lora_alpha: int = 64
    lora_dropout: float = 0.05
    lora_target_modules: str = r"llm\..*layers\.\d+\.self_attn\.(q_proj|k_proj|v_proj)"
    lora_weight_path: str = ""
    lora_bias: str = "none"
    q_lora: bool = False
    lora_modules_to_save: str = ""
    lora_layer_replication: Optional[List[Tuple[int, int]]] = None
    lora_layers_to_transform: Optional[List[int]] = None
    lora_layers_pattern: Optional[str] = None

local_rank = None
def rank0_print(*args):
    if local_rank == 0:
        print(*args)


def safe_save_model_for_hf_trainer(trainer, output_dir: str, bias="none"):
    """根据配置保存模型（支持LoRA适配器和合并后的完整模型）"""
    if not (trainer.args.should_save and trainer.args.local_rank == 0):
        return
        
    # 1. 保存 LoRA 适配器（如果需要）
    if getattr(trainer.args, 'save_lora_adapter', False) and trainer.args.use_lora:
        lora_output_dir = os.path.join(output_dir, "lora_adapter")
        trainer.save_model(lora_output_dir)
        rank0_print(f"LoRA adapter saved to: {lora_output_dir}")
    
    # 2. 保存合并后的完整模型（如果需要且使用了LoRA）
    if getattr(trainer.args, 'save_merged_model', True) and trainer.args.use_lora:
        rank0_print("Merging LoRA weights into base model...")
        
        try:
            # 合并权重
            merged_model = trainer.model.merge_and_unload()
            
            # 保存合并后的模型
            merged_output_dir = os.path.join(output_dir, "merged_model")
            merged_model.save_pretrained(
                merged_output_dir,
                safe_serialization=True,
                max_shard_size="2GB"
            )
            
            # 保存 tokenizer 和配置
            trainer.tokenizer.save_pretrained(merged_output_dir)
            
            rank0_print(f"Merged model saved to: {merged_output_dir}")
            
        except Exception as e:
            rank0_print(f"Warning: Failed to save merged model: {e}")
            rank0_print("LoRA adapter is still available for manual merging.")
    
    # 3. 保存完整模型（如果不使用LoRA）
    if getattr(trainer.args, 'save_merged_model', True) and not trainer.args.use_lora:
        merged_output_dir = os.path.join(output_dir, "final_model")
        trainer.save_model(merged_output_dir)
        rank0_print(f"Final model saved to: {merged_output_dir}")


def load_valid_data_flexible(eval_path, image_root=None):
    """
    灵活加载Valid数据 - 支持多种输入模式：
    1. 单个JSON文件路径
    2. 目录路径（自动发现JSON文件）
    3. 逗号分隔的文件列表
    """
    all_data = []
    
    # 解析输入路径
    if ',' in eval_path:
        # 模式3: 逗号分隔的文件列表
        json_files = [path.strip() for path in eval_path.split(',')]
        rank0_print(f"Loading from file list: {len(json_files)} files")
    elif os.path.isdir(eval_path):
        # 模式2: 目录路径
        json_files = discover_json_files(eval_path)
        rank0_print(f"Auto-discovered {len(json_files)} JSON files in directory")
    elif os.path.isfile(eval_path):
        # 模式1: 单个文件
        json_files = [eval_path]
        rank0_print(f"Loading single file: {os.path.basename(eval_path)}")
    else:
        rank0_print(f"Invalid eval_data_path: {eval_path}")
        return []
    
    # 加载所有JSON文件
    for json_file in json_files:
        if not os.path.isabs(json_file) and os.path.dirname(eval_path):
            # 相对路径，基于eval_path的目录
            json_file = os.path.join(os.path.dirname(eval_path), json_file)
        
        if not os.path.exists(json_file):
            rank0_print(f"⚠️ File not found: {json_file}")
            continue
            
        try:
            rank0_print(f"📄 Loading: {os.path.basename(json_file)}")
            with open(json_file, 'r', encoding='utf-8') as f:
                data = json.load(f)
            
            if not isinstance(data, list):
                rank0_print(f"⚠️ Expected list format in {json_file}")
                continue
            
            # 标记数据来源
            for item in data:
                if isinstance(item, dict):
                    item['_source_file'] = os.path.basename(json_file)
            
            all_data.extend(data)
            rank0_print(f"   ✅ Loaded {len(data)} samples")
            
        except Exception as e:
            rank0_print(f"❌ Error loading {json_file}: {e}")
            continue
    
    rank0_print(f"📊 Total loaded samples: {len(all_data)}")
    return all_data


def discover_json_files(directory):
    """
    自动发现目录中的JSON文件
    按照优先级排序：mcq > caption > 其他
    """
    json_files = []
    
    # 查找所有JSON文件
    for root, dirs, files in os.walk(directory):
        for file in files:
            if file.lower().endswith('.json'):
                json_files.append(os.path.join(root, file))
    
    # 按优先级排序
    def get_priority(filename):
        name = os.path.basename(filename).lower()
        if 'mcq' in name:
            return 1  # 高优先级
        elif 'caption' in name:
            return 2
        else:
            return 3  # 低优先级
    
    json_files.sort(key=get_priority)
    return json_files


def determine_task_type(data_list):
    """
    根据数据内容自动判断任务类型
    """
    if not data_list:
        return "caption"
    
    # 统计不同类型的样本数量
    caption_count = 0
    vqa_count = 0
    
    for item in data_list[:50]:  # 只检查前50个样本
        if not isinstance(item, dict):
            continue
            
        # 检查是否有选择题标志
        choices = item.get('Answer choices', item.get('Choices', []))
        question_type = item.get('Question Type', '').lower()
        
        if choices or 'multiple choice' in question_type or 'mcq' in question_type:
            vqa_count += 1
        elif 'caption' in question_type or not choices:
            caption_count += 1
    
    # 根据多数样本类型决定
    if vqa_count > caption_count:
        return "vqa"
    else:
        return "caption"


def make_supervised_data_module(
    tokenizer: transformers.PreTrainedTokenizer,
    data_args,
    transform,
    data_collator=None,
    slice_config=None,
    patch_size=14,
    query_nums=64,
    batch_vision=False,
    max_length=2048,
) -> Dict:
    """Make dataset and collator for supervised fine-tuning."""
    dataset_cls = SupervisedDataset

    rank0_print("Loading data...")

    train_json = json.load(open(data_args.data_path, "r", encoding="utf-8"))
    train_dataset = dataset_cls(
        train_json,
        transform,
        tokenizer,
        slice_config=slice_config,
        patch_size=patch_size,
        query_nums=query_nums,
        batch_vision=batch_vision,
        max_length=max_length,
    )

    eval_dataset = None
    if data_args.eval_data_path:
        # 支持多种模式：单文件、目录、文件列表
        eval_data = load_valid_data_flexible(data_args.eval_data_path, data_args.valid_image_root)
        
        if eval_data:
            # 根据数据内容自动判断任务类型
            task_type = determine_task_type(eval_data) if data_args.eval_task_type == "auto" else data_args.eval_task_type
            rank0_print(f"Task type: {task_type}")
            
            # 限制样本数量
            if data_args.max_eval_samples and len(eval_data) > data_args.max_eval_samples:
                eval_data = eval_data[:data_args.max_eval_samples]
                rank0_print(f"Limited to {data_args.max_eval_samples} evaluation samples")
            
            # 🔧 关键修改：使用修改后的 ValidDataset
            eval_dataset = ValidDataset(
                eval_data,
                image_root=data_args.valid_image_root,
                tokenizer=tokenizer,
                transform=transform,
                task_type=task_type
            )
        else:
            rank0_print("No valid evaluation data found")

    # 🔧 修改：区分训练和评估的数据整理器
    return dict(
        train_dataset=train_dataset,
        eval_dataset=eval_dataset,
        data_collator=partial(data_collator, max_length=max_length),
        eval_data_collator=simple_valid_collator,  # 🔧 新增：评估专用的数据整理器
    )


def build_transform():
    IMAGENET_INCEPTION_MEAN = (0.5, 0.5, 0.5) # timm.data.IMAGENET_INCEPTION_MEAN
    IMAGENET_INCEPTION_STD = (0.5, 0.5, 0.5)  # timm.data.IMAGENET_INCEPTION_STD
    return transforms.Compose(
            [
                transforms.ToTensor(),
                transforms.Normalize(
                    mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD
                ),
            ]
        )

def get_parameter_number(model):
    trainable_params, all_param = 0, 0
    for param in model.parameters():
        num_params = param.numel()
        # if using DS Zero 3 and the weights are initialized empty
        if num_params == 0 and hasattr(param, "ds_numel"):
            num_params = param.ds_numel

        all_param += num_params
        if param.requires_grad:
            trainable_params += num_params
        
    return {'Total': all_param, 'Trainable': trainable_params}


local_rank = 0

# 🔧 修改：简化的评估指标计算
def compute_bleu(preds, labels):
    """计算 BLEU 分数（用于 Caption 任务）"""
    if not preds or not labels:
        return 0.0
        
    smoothing = SmoothingFunction().method4
    bleu_scores = []
    
    for pred, label in zip(preds, labels):
        if not isinstance(pred, str) or not isinstance(label, str):
            bleu_scores.append(0.0)
            continue
            
        ref = label.strip().lower().split()
        hyp = pred.strip().lower().split()
        
        if len(ref) == 0 or len(hyp) == 0:
            bleu_scores.append(0.0)
        else:
            try:
                bleu_1 = sentence_bleu([ref], hyp, weights=(1,0,0,0), smoothing_function=smoothing)
                bleu_2 = sentence_bleu([ref], hyp, weights=(0.5,0.5,0,0), smoothing_function=smoothing)
                bleu_4 = sentence_bleu([ref], hyp, weights=(0.25,0.25,0.25,0.25), smoothing_function=smoothing)
                bleu = (bleu_1 + bleu_2 + bleu_4) / 3
                bleu_scores.append(bleu)
            except:
                bleu_scores.append(0.0)
    
    return float(np.mean(bleu_scores)) if bleu_scores else 0.0

def compute_vqa_accuracy(preds, labels, choices_list=None):
    """计算 VQA 准确率（用于选择题任务）"""
    if not preds or not labels:
        return 0.0
        
    correct = 0
    total = len(preds)
    
    for i, (pred, label) in enumerate(zip(preds, labels)):
        if not isinstance(pred, str) or not isinstance(label, str):
            continue
            
        choices = choices_list[i] if choices_list and i < len(choices_list) else []
        if match_mcq_answer(label, pred, choices):
            correct += 1
    
    return correct / total if total > 0 else 0.0

def match_mcq_answer(ground_truth, prediction, choices):
    """匹配选择题答案（参考 valid_evaluator 逻辑）"""
    if not ground_truth or not prediction:
        return False
        
    gt = ground_truth.strip().upper()
    pred = prediction.strip().upper()
    
    # 多选题处理
    if len(gt) > 1 and all(c in 'ABCD' for c in gt):
        gt_options = set(gt)
        pred_letters = set(re.findall(r'\b[A-D]\b', pred))
        if pred_letters == gt_options:
            return True
        if gt_options.issubset(pred_letters):
            return True
        return False
    else:
        # 单选题处理
        if gt in pred:
            return True
        pred_letters = re.findall(r'\b[A-D]\b', pred)
        if pred_letters and gt in pred_letters:
            return True
        
        # 检查选项内容匹配
        if choices:
            for i, choice in enumerate(choices):
                choice_letter = chr(65 + i)  # A, B, C, D
                if choice_letter == gt:
                    choice_content = choice.strip()
                    if choice_content.startswith(f"({choice_letter})"):
                        choice_content = choice_content[3:].strip()
                    if choice_content.lower() in pred.lower():
                        return True
        return False

# 新增：简易 chat 评估函数，仅遍历 eval_dataset，统一用 model.chat 并打印 BLEU/ACC
def run_chat_eval_simple(model, tokenizer, eval_dataset, max_samples=None):
    if eval_dataset is None:
        return
    model.eval()

    caption_preds, caption_refs = [], []
    acc_correct, acc_total = 0, 0

    n = len(eval_dataset)
    if max_samples is not None:
        try:
            n = min(n, int(max_samples))
        except Exception:
            pass

    for i in range(n):
        try:
            ex = eval_dataset[i]
            msgs = ex.get('msgs')
            if not msgs and 'image' in ex and 'question' in ex:
                msgs = [{'role': 'user', 'content': [ex['image'], ex['question']]}]
            gt = ex.get('ground_truth', '')
            choices = ex.get('answer_choices', [])

            with torch.no_grad():
                pred = model.chat(
                    image=None,
                    msgs=msgs,
                    tokenizer=tokenizer,
                    sampling=False,
                    max_new_tokens=256,
                )
            pred = str(pred)

            if choices:
                acc_total += 1
                if match_mcq_answer(gt, pred, choices):
                    acc_correct += 1
            else:
                caption_preds.append(pred)
                caption_refs.append(str(gt))
        except Exception:
            if 'answer_choices' in ex and ex['answer_choices']:
                acc_total += 1

    bleu = compute_bleu(caption_preds, caption_refs) if caption_preds else 0.0
    acc = (acc_correct / acc_total) if acc_total > 0 else 0.0

    rank0_print(f"eval_bleu: {bleu:.6f}")
    rank0_print(f"eval_accuracy: {acc:.6f}")


def compute_metrics(eval_preds):
    """Compute BLEU and VQA accuracy from HuggingFace eval_preds and print only those two metrics.

    Prints are restricted to rank0 via rank0_print to avoid clutter in distributed runs.
    """
    try:
        preds, labels = eval_preds
    except Exception:
        # HF sometimes passes an EvalPrediction object; try to access attributes
        try:
            preds = eval_preds.predictions
            labels = eval_preds.label_ids
        except Exception:
            rank0_print("eval_bleu: 0.0")
            rank0_print("eval_accuracy: 0.0")
            return {"eval_placeholder": 0.0}

    tokenizer = getattr(compute_metrics, 'tokenizer', None)
    if tokenizer is None:
        rank0_print("eval_bleu: 0.0")
        rank0_print("eval_accuracy: 0.0")
        return {"eval_placeholder": 0.0}

    # Helper to decode token id sequences to strings
    def decode_seq(seq):
        try:
            # If seq is numpy array of ints or list of ints
            if hasattr(seq, 'dtype') and (str(seq.dtype).startswith('int') or str(seq.dtype).startswith('uint')):
                # 2D arrays -> decode each row
                if getattr(seq, 'ndim', 1) == 2:
                    return [tokenizer.decode(s, skip_special_tokens=True).strip() for s in seq]
                else:
                    return [tokenizer.decode(seq, skip_special_tokens=True).strip()]
            # object or list of strings
            if isinstance(seq, (list, tuple)):
                out = []
                for s in seq:
                    if isinstance(s, (list, tuple, np.ndarray)):
                        out.append(tokenizer.decode(np.array(s, dtype=np.int64), skip_special_tokens=True).strip())
                    else:
                        out.append(str(s).strip())
                return out
            # fallback
            return [str(seq).strip()]
        except Exception:
            # last resort
            try:
                return [str(s).strip() for s in seq]
            except Exception:
                return []

    # If preds are logits, take argmax along last dim
    try:
        if isinstance(preds, np.ndarray) and preds.dtype.kind == 'f' and preds.ndim == 3:
            preds = np.argmax(preds, axis=-1)
    except Exception:
        pass

    pred_texts = decode_seq(preds)

    # Process labels: replace -100 with pad token id before decoding
    try:
        if isinstance(labels, np.ndarray) and labels.dtype.kind in ('i', 'u'):
            lab = labels.copy()
            lab[lab == -100] = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else 0
            labels_texts = decode_seq(lab)
        else:
            labels_texts = decode_seq(labels)
    except Exception:
        labels_texts = decode_seq(labels)

    # Trim whitespace
    pred_texts = [p.strip() for p in pred_texts]
    labels_texts = [l.strip() for l in labels_texts]

    # Decide task: if most labels are single letters A-D (or short), treat as VQA/mcq
    def is_mcq_label(s):
        if not s:
            return False
        s_clean = s.replace(',', '').replace(' ', '')
        return all(c in 'ABCD' for c in s_clean) and len(s_clean) <= 5

    mcq_count = sum(1 for l in labels_texts if is_mcq_label(l))
    caption_count = sum(1 for l in labels_texts if len(l.split()) > 1)

    results = {}
    # Compute BLEU when captions dominate
    if caption_count >= mcq_count and len(pred_texts) > 0:
        try:
            bleu_score = compute_bleu(pred_texts, labels_texts)
            results['eval_bleu'] = float(bleu_score)
        except Exception:
            results['eval_bleu'] = 0.0

    # Compute VQA accuracy when MCQ labels dominate
    if mcq_count > caption_count and len(pred_texts) > 0:
        try:
            acc = compute_vqa_accuracy(pred_texts, labels_texts)
            results['eval_accuracy'] = float(acc)
        except Exception:
            results['eval_accuracy'] = 0.0

    # Always include a placeholder so earlier code relying on it still works
    results.setdefault('eval_placeholder', 0.0)

    # 仅打印我们关心的指标
    bleu_val = results.get('eval_bleu', None)
    acc_val = results.get('eval_accuracy', None)
    if bleu_val is not None:
        rank0_print(f"eval_bleu: {float(bleu_val):.6f}")
    else:
        rank0_print("eval_bleu: 0.000000")
    if acc_val is not None:
        rank0_print(f"eval_accuracy: {float(acc_val):.6f}")
    else:
        rank0_print("eval_accuracy: 0.000000")

    return results

# 新增：在线评测（chat）回调，仅打印 BLEU 与 ACC，不做解码与绘图
# class ValidEvalPrintCallback(TrainerCallback):
#     def __init__(self, tokenizer, max_samples=None):
#         self.tokenizer = tokenizer
#         self.max_samples = max_samples

#     def _run_eval_chat(self, trainer):
#         ds = getattr(trainer, "eval_dataset", None)
#         if ds is None:
#             return
#         # 仅主进程打印
#         if getattr(trainer.args, 'local_rank', 0) not in (-1, 0):
#             return

#         model = trainer.model
#         model.eval()

#         caption_preds, caption_refs = [], []
#         acc_correct, acc_total = 0, 0

#         n = len(ds)
#         if self.max_samples is not None:
#             try:
#                 n = min(n, int(self.max_samples))
#             except Exception:
#                 pass

#         for i in range(n):
#             try:
#                 ex = ds[i]
#                 msgs = ex.get('msgs')
#                 if not msgs and 'image' in ex and 'question' in ex:
#                     msgs = [{'role': 'user', 'content': [ex['image'], ex['question']]}]
#                 gt = ex.get('ground_truth', '')
#                 choices = ex.get('answer_choices', [])

#                 with torch.no_grad():
#                     pred = model.chat(
#                         image=None,
#                         msgs=msgs,
#                         tokenizer=self.tokenizer,
#                         sampling=False,
#                         max_new_tokens=256,
#                     )
#                 pred = str(pred)

#                 if choices:
#                     acc_total += 1
#                     if match_mcq_answer(gt, pred, choices):
#                         acc_correct += 1
#                 else:
#                     caption_preds.append(pred)
#                     caption_refs.append(str(gt))
#             except Exception:
#                 # 出错样本记为错（若为选择题）或跳过（caption）
#                 if 'answer_choices' in ex and ex['answer_choices']:
#                     acc_total += 1

#         bleu = compute_bleu(caption_preds, caption_refs) if caption_preds else 0.0
#         acc = (acc_correct / acc_total) if acc_total > 0 else 0.0

#         # 仅打印两行指标
#         rank0_print(f"eval_bleu: {bleu:.6f}")
#         rank0_print(f"eval_accuracy: {acc:.6f}")

#     def on_evaluate(self, args, state, control, model=None, **kwargs):
#         trainer = kwargs.get('trainer', None)
#         if trainer is not None:
#             self._run_eval_chat(trainer)
#         return control

#     def on_train_end(self, args, state, control, model=None, **kwargs):
#         trainer = kwargs.get('trainer', None)
#         if trainer is not None:
#             self._run_eval_chat(trainer)
#         return control


def plot_metrics(log_history, output_dir):
    """绘制训练曲线"""
    steps = []
    losses = []
    bleus = []
    accuracies = []
    
    for entry in log_history:
        if "loss" in entry and "step" in entry:
            steps.append(entry["step"])
            losses.append(entry["loss"])
        if "eval_bleu" in entry and "step" in entry:
            bleus.append((entry["step"], entry["eval_bleu"]))
        if "eval_accuracy" in entry and "step" in entry:
            accuracies.append((entry["step"], entry["eval_accuracy"]))
    
    os.makedirs(os.path.join(output_dir, "metric_figs"), exist_ok=True)
    
    # 绘制 loss
    if steps and losses:
        plt.figure(figsize=(8, 6))
        plt.plot(steps, losses, label="Training Loss", color="green")
        plt.xlabel("Steps")
        plt.ylabel("Loss")
        plt.legend()
        plt.title("Training Loss Curve")
        plt.grid(True)
        plt.savefig(os.path.join(output_dir, "metric_figs", "loss_curve.png"))
        plt.close()
    
    # 绘制 BLEU
    if bleus:
        bleu_steps, bleu_scores = zip(*bleus)
        plt.figure(figsize=(8, 6))
        plt.plot(bleu_steps, bleu_scores, label="BLEU Score", color="blue")
        plt.xlabel("Steps")
        plt.ylabel("BLEU")
        plt.legend()
        plt.title("Eval BLEU Curve (Caption Task)")
        plt.grid(True)
        plt.savefig(os.path.join(output_dir, "metric_figs", "bleu_curve.png"))
        plt.close()
    
    # 绘制准确率
    if accuracies:
        acc_steps, acc_scores = zip(*accuracies)
        plt.figure(figsize=(8, 6))
        plt.plot(acc_steps, acc_scores, label="Accuracy", color="red")
        plt.xlabel("Steps")
        plt.ylabel("Accuracy")
        plt.legend()
        plt.title("Eval Accuracy Curve (VQA Task)")
        plt.grid(True)
        plt.savefig(os.path.join(output_dir, "metric_figs", "accuracy_curve.png"))
        plt.close()
    
    # 综合图表
    if bleus or accuracies:
        plt.figure(figsize=(12, 6))
        
        if bleus:
            bleu_steps, bleu_scores = zip(*bleus)
            plt.plot(bleu_steps, bleu_scores, label="BLEU Score", color="blue")
        
        if accuracies:
            acc_steps, acc_scores = zip(*accuracies)
            plt.plot(acc_steps, acc_scores, label="Accuracy", color="red")
        
        plt.xlabel("Steps")
        plt.ylabel("Score")
        plt.legend()
        plt.title("Evaluation Metrics")
        plt.grid(True)
        plt.savefig(os.path.join(output_dir, "metric_figs", "all_metrics.png"))
        plt.close()

def create_timestamped_output_dir(base_output_dir: str, use_timestamp: bool = True) -> str:
    """创建带时间戳的输出目录"""
    if use_timestamp:
        timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
        output_dir = f"{base_output_dir}_{timestamp}"
    else:
        output_dir = base_output_dir
    
    # 创建目录
    os.makedirs(output_dir, exist_ok=True)
    
    return output_dir

def save_experiment_config(output_dir: str, model_args, data_args, training_args, lora_args):
    """保存实验配置信息"""
    config_info = {
        "experiment_info": {
            "timestamp": datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
            "output_dir": output_dir,
        },
        "model_args": {
            "model_name_or_path": model_args.model_name_or_path,
        },
        "data_args": {
            "data_path": data_args.data_path,
            "eval_data_path": data_args.eval_data_path,
            "valid_image_root": data_args.valid_image_root,
            "max_eval_samples": getattr(data_args, 'max_eval_samples', None),
            "eval_task_type": getattr(data_args, 'eval_task_type', 'auto'),
        },
        "training_args": {
            "learning_rate": training_args.learning_rate,
            "per_device_train_batch_size": training_args.per_device_train_batch_size,
            "per_device_eval_batch_size": training_args.per_device_eval_batch_size,
            "num_train_epochs": training_args.num_train_epochs,
            "max_steps": training_args.max_steps,
            "eval_steps": training_args.eval_steps,
            "save_steps": training_args.save_steps,
            "model_max_length": training_args.model_max_length,
            "tune_vision": training_args.tune_vision,
            "tune_llm": training_args.tune_llm,
            "use_lora": training_args.use_lora,
            "use_timestamp_dir": getattr(training_args, 'use_timestamp_dir', True),
            "save_merged_model": getattr(training_args, 'save_merged_model', True),
            "save_lora_adapter": getattr(training_args, 'save_lora_adapter', False),
        },
        "lora_args": {
            "lora_r": lora_args.lora_r,
            "lora_alpha": lora_args.lora_alpha,
            "lora_dropout": lora_args.lora_dropout,
            "lora_target_modules": lora_args.lora_target_modules,
        } if training_args.use_lora else None
    }
    
    config_file = os.path.join(output_dir, "experiment_config.json")
    with open(config_file, "w", encoding="utf-8") as f:
        json.dump(config_info, f, indent=2, ensure_ascii=False)
    
    rank0_print(f"Experiment config saved to: {config_file}")


def parse_custom_args(args_list):
    """解析自定义参数（从脚本传入的参数）"""
    custom_args = {}
    i = 0
    while i < len(args_list):
        arg = args_list[i]
        if arg.startswith('--'):
            param_name = arg[2:]  # 移除 '--'
            if i + 1 < len(args_list) and not args_list[i + 1].startswith('--'):
                # 有值的参数
                param_value = args_list[i + 1]
                # 转换布尔值
                if param_value.lower() in ('true', 'false'):
                    param_value = param_value.lower() == 'true'
                custom_args[param_name] = param_value
                i += 2
            else:
                # 标志参数（无值）
                custom_args[param_name] = True
                i += 1
        else:
            i += 1
    
    return custom_args


def train():
    global local_rank

    # 恢复原版风格的参数解析
    parser = transformers.HfArgumentParser(
        (ModelArguments, DataArguments, TrainingArguments, LoraArguments)
    )

    (model_args, data_args, training_args, lora_args) = parser.parse_args_into_dataclasses()

    # 动态添加新功能的参数（从命令行脚本中获取）
    import sys
    custom_args = parse_custom_args(sys.argv[1:])
    
    # 设置新功能参数的默认值，可被命令行覆盖
    training_args.use_timestamp_dir = custom_args.get('use_timestamp_dir', True)
    training_args.save_merged_model = custom_args.get('save_merged_model', True)
    training_args.save_lora_adapter = custom_args.get('save_lora_adapter', False)
    
    rank0_print(f"Dynamic settings:")
    rank0_print(f"  use_timestamp_dir: {training_args.use_timestamp_dir}")
    rank0_print(f"  save_merged_model: {training_args.save_merged_model}")
    rank0_print(f"  save_lora_adapter: {training_args.save_lora_adapter}")

    # 创建带时间戳的输出目录
    original_output_dir = training_args.output_dir
    if training_args.use_timestamp_dir:
        timestamped_output_dir = create_timestamped_output_dir(original_output_dir, use_timestamp=True)
        training_args.output_dir = timestamped_output_dir
        training_args.logging_dir = timestamped_output_dir
        rank0_print(f"Using timestamped output directory: {timestamped_output_dir}")
    else:
        os.makedirs(training_args.output_dir, exist_ok=True)

    # 保存实验配置
    save_experiment_config(training_args.output_dir, model_args, data_args, training_args, lora_args)

    if getattr(training_args, "deepspeed", None):
        training_args.distributed_state.distributed_type = DistributedType.DEEPSPEED

    compute_dtype = (
        torch.float16 if training_args.fp16
        else (torch.bfloat16 if training_args.bf16 else torch.float32)
    )

    local_rank = training_args.local_rank
    world_size = int(os.environ.get("WORLD_SIZE", 1))
    ddp = world_size != 1
    device_map = None

    if lora_args.q_lora:
        device_map = {"": int(os.environ.get("LOCAL_RANK") or 0)} if ddp else None
        if len(training_args.fsdp) > 0 or deepspeed.is_deepspeed_zero3_enabled():
            logging.warning("FSDP or ZeRO3 are not incompatible with QLoRA.")

    model = AutoModel.from_pretrained(
        model_args.model_name_or_path,
        trust_remote_code=True,
        torch_dtype=compute_dtype,
        device_map=device_map,
    )

    tokenizer = AutoTokenizer.from_pretrained(
        model_args.model_name_or_path, trust_remote_code=True
    )

    # 让 compute_metrics 能访问 tokenizer
    compute_metrics.tokenizer = tokenizer

    if not training_args.tune_vision:
        model.vpm.requires_grad_(False)
    if not training_args.tune_llm:
        model.llm.requires_grad_(False)

    if training_args.use_lora:
        if training_args.use_lora and training_args.tune_llm:
            raise ValueError("The model cannot simultaneously adjust LLM parameters and apply LoRA.")

        for name, param in model.llm.named_parameters():
            param.requires_grad = False
        modules_to_save = ['embed_tokens', 'resampler']
        if training_args.tune_vision:
            modules_to_save.append('vpm')
        lora_config = LoraConfig(
            r=lora_args.lora_r,
            lora_alpha=lora_args.lora_alpha,
            target_modules=lora_args.lora_target_modules,
            lora_dropout=lora_args.lora_dropout,
            bias=lora_args.lora_bias,
            layers_to_transform=lora_args.lora_layers_to_transform,
            modules_to_save=modules_to_save,
        )
        if not hasattr(model, 'get_input_embeddings'):
            def get_input_embeddings(self):
                return self.llm.get_input_embeddings()
            model.get_input_embeddings = MethodType(get_input_embeddings, model)
        if lora_args.q_lora:
            model = prepare_model_for_kbit_training(
                model, use_gradient_checkpointing=training_args.gradient_checkpointing
            )

        model = get_peft_model(model, lora_config)
        if training_args.gradient_checkpointing:
            model.enable_input_require_grads()

    rank0_print(get_parameter_number(model))

    # Load data
    if hasattr(model.config, "slice_config"):
        model.config.slice_config.max_slice_nums = training_args.max_slice_nums
        slice_config = model.config.slice_config.to_dict()
    else:
        model.config.max_slice_nums = training_args.max_slice_nums
        slice_config = model.config.to_dict()

    if hasattr(model.config, "batch_vision_input"):
        batch_vision = model.config.batch_vision_input
    else:
        batch_vision = False

    transform_func = build_transform()
    data_module = make_supervised_data_module(
        tokenizer=tokenizer,
        data_args=data_args,
        transform=transform_func,
        data_collator=data_collator,
        slice_config=slice_config,
        patch_size=model.config.patch_size,
        query_nums=model.config.query_num,
        batch_vision=batch_vision,
        max_length=training_args.model_max_length,
    )

    training_args.gradient_checkpointing_kwargs = {"use_reentrant": False}

    # 🔧 关键修改：使用修改后的 Trainer 初始化
    trainer = Trainer(
        model=model,
        tokenizer=tokenizer,
        args=training_args,
        # 不再依赖 compute_metrics 解码评测
        # compute_metrics=compute_metrics,
        train_dataset=data_module["train_dataset"],
        eval_dataset=data_module["eval_dataset"],
        data_collator=data_module["data_collator"],
    )
    
    # 不再添加回调，评估统一在训练结束后直接运行一次 chat 评测
    
    # 🔧 新增：为 trainer 设置评估专用的数据整理器
    if data_module["eval_dataset"] is not None:
        trainer.eval_data_collator = data_module["eval_data_collator"]
        rank0_print("✅ Evaluation dataset and collator configured")

    # 开始训练
    rank0_print("Starting training...")
    trainer.train()
    trainer.save_state()

    # 训练结束：统一用 chat 评估并仅打印 BLEU/ACC
    if data_module["eval_dataset"] is not None:
        rank0_print("Running final chat evaluation...")
        run_chat_eval_simple(
            model=trainer.model,
            tokenizer=tokenizer,
            eval_dataset=data_module["eval_dataset"],
            max_samples=getattr(data_args, 'max_eval_samples', None)
        )

    # 不再绘图
    # rank0_print("Plotting training curves...")
    # plot_metrics(trainer.state.log_history, training_args.output_dir)

    # 保存模型
    rank0_print("Saving models...")
    safe_save_model_for_hf_trainer(
        trainer=trainer,
        output_dir=training_args.output_dir,
        bias=lora_args.lora_bias
    )

    rank0_print(f"Training completed! All outputs saved to: {training_args.output_dir}")


if __name__ == "__main__":
    train()