# Copyright (c) Alibaba, Inc. and its affiliates.
import math
import os
import platform
from dataclasses import dataclass, field
from functools import wraps
from typing import List, Literal, Optional, Union

import torch
import torch.utils.checkpoint
from transformers.training_args import TrainingArguments as HfTrainingArguments
from transformers.training_args_seq2seq import Seq2SeqTrainingArguments as HfSeq2SeqTrainingArguments

from swift.utils import get_dist_setting, get_logger, is_liger_available, use_torchacc
from .optimizers.galore import GaLoreConfig

logger = get_logger()


@dataclass
class TrainArgumentsMixin:
    """
    check_model (bool): 是否检查模型为最新版，默认 True
    acc_strategy (Literal['token', 'seq']): 梯度累积策略，默认按 token 级别
    """

    # 每个设备的训练 batch 大小，默认为 1
    per_device_train_batch_size: int = 1
    # 每个设备的评估 batch 大小，默认为 1
    per_device_eval_batch_size: int = 1
    # 梯度累积步数，如果不设置则在 __post_init__ 中自动计算
    gradient_accumulation_steps: Optional[int] = None

    # 是否开启梯度检查点（减少显存开销）
    gradient_checkpointing: bool = True
    # 梯度检查点的附加参数，可为 dict 或 JSON 字符串
    gradient_checkpointing_kwargs: Optional[Union[dict, str]] = None

    # 是否记录第一步训练日志
    logging_first_step: bool = True
    # 日志记录步数间隔
    logging_steps: int = 5

    # 权重衰减
    weight_decay: float = 0.1
    # Adam 优化器的 beta2 参数
    adam_beta2: float = 0.95
    # 学习率调度策略，默认使用 cosine
    lr_scheduler_type: str = 'cosine'
    # 学习率调度器附加参数
    lr_scheduler_kwargs: Optional[Union[dict, str]] = None

    # 日志上报方式，默认使用 tensorboard
    report_to: List[str] = field(default_factory=lambda: ['tensorboard'])

    # dataloader 的工作线程数
    dataloader_num_workers: Optional[int] = None
    # dataloader 的预取因子
    dataloader_prefetch_factor: Optional[int] = None

    # 是否使用 liger 自定义 kernel 加速
    use_liger_kernel: bool = False

    # ---------------------- 额外参数 ---------------------- #
    check_model: bool = True  # 是否检查模型最新版本
    acc_strategy: Literal['token', 'seq'] = 'token'  # 梯度累积策略
    train_dataloader_shuffle: bool = True  # 是否对训练集打乱
    max_epochs: Optional[int] = None  # 最大训练 epoch 数

    # torchacc 优化相关参数
    metric_warmup_step: Optional[float] = 0
    fsdp_num: int = 1  # Fully Sharded Data Parallel 数量
    acc_steps: int = 1  # 手动指定的累积步数（区别于上面的 gradient_accumulation_steps）

    # 训练-评估循环相关参数
    eval_use_evalscope: bool = False  # 是否使用 evalscope 框架
    eval_datasets: List[str] = field(default_factory=list)  # 评估数据集列表
    eval_limit: Optional[int] = None  # 限制评估样本数
    eval_datasets_args: Optional[Union[str, dict]] = None  # 评估数据集附加参数
    eval_generation_config: Optional[Union[str, dict]] = None  # 生成设置参数

    # ------------------ 私有方法：修复梯度检查点参数 ------------------ #
    def _fix_gradient_checkpointing(self):
        # 避免重复打补丁
        if hasattr(torch.utils.checkpoint, '_old_checkpoint'):
            return
        # 设置 use_reentrant 参数，默认为 True（与 transformers 默认一致）
        use_reentrant_ = (
            self.gradient_checkpointing_kwargs.get('use_reentrant', True)
            if self.gradient_checkpointing_kwargs else True)

        _old_checkpoint = torch.utils.checkpoint.checkpoint

        # 包装旧 checkpoint 函数，强制使用指定 use_reentrant
        @wraps(_old_checkpoint)
        def _new_checkpoint(*args, use_reentrant=None, **kwargs):
            return _old_checkpoint(*args, use_reentrant=use_reentrant_, **kwargs)

        # 替换 torch 自带 checkpoint
        torch.utils.checkpoint._old_checkpoint = _old_checkpoint
        torch.utils.checkpoint.checkpoint = _new_checkpoint

        # 针对老版本 transformers 的兼容性处理
        try:
            import transformers.modeling_utils
            transformers.modeling_utils.checkpoint = _new_checkpoint
        except (ImportError, AttributeError):
            pass

    # ------------------ 初始化 liger 内核 ------------------ #
    def _init_liger(self):
        if self.use_liger_kernel:
            # 检查是否安装了 liger-kernel 库
            assert is_liger_available(), 'use_liger_kernel requires liger_kernels, try `pip install liger-kernel`'

    # ------------------ 初始化流程钩子函数 ------------------ #
    def __post_init__(self):
        # 引入模型参数类
        from swift.llm.argument.base_args.model_args import ModelArguments

        # 如果使用 torchacc，则设置 drop_last 以对齐 batch size
        if use_torchacc():
            self.dataloader_drop_last = True

        # 若未指定梯度累积步数，自动根据世界大小、batch size 计算
        if self.gradient_accumulation_steps is None:
            world_size = get_dist_setting()[2]
            self.gradient_accumulation_steps = max(1, math.ceil(16 / self.per_device_train_batch_size / world_size))
            logger.info(f'Setting args.gradient_accumulation_steps: {self.gradient_accumulation_steps}')

        # 若 scheduler 参数为字符串格式，则解析为 dict
        if self.lr_scheduler_kwargs:
            self.lr_scheduler_kwargs = ModelArguments.parse_to_dict(self.lr_scheduler_kwargs)

        # 同样解析梯度检查点参数
        if self.gradient_checkpointing_kwargs:
            self.gradient_checkpointing_kwargs = ModelArguments.parse_to_dict(self.gradient_checkpointing_kwargs)

        # 修复 checkpoint 包装逻辑
        self._fix_gradient_checkpointing()

        # 初始化 liger 加速支持
        self._init_liger()

        # 设置 dataloader 线程数，默认在 Linux 上设为 1，Windows 为 0
        if self.dataloader_num_workers is None:
            if platform.system() == 'Windows':
                self.dataloader_num_workers = 0
            else:
                self.dataloader_num_workers = 1
            logger.info(f'Setting args.dataloader_num_workers: {self.dataloader_num_workers}')

        # 如果 worker 数量大于 0，设置预取因子
        if self.dataloader_prefetch_factor is None and self.dataloader_num_workers > 0:
            self.dataloader_prefetch_factor = 10

        # 如果启用 evalscope，则检查是否安装并解析配置
        if self.eval_use_evalscope:
            try:
                import evalscope
            except ImportError:
                raise ImportError('evalscope is not installed, please install it by `pip install evalscope`')
            self.eval_datasets_args = ModelArguments.parse_to_dict(self.eval_datasets_args)
            self.eval_generation_config = ModelArguments.parse_to_dict(self.eval_generation_config)

        # 调用父类的 __post_init__（通常为 transformers 的 TrainingArguments）
        super().__post_init__()


@dataclass
class SwiftArgumentsMixin(TrainArgumentsMixin):
    # Value copied from TrainArguments
    train_type: Optional[str] = None
    optimizer: Optional[str] = None
    local_repo_path: Optional[str] = None
    galore_config: Optional[GaLoreConfig] = None

    def __post_init__(self):
        if hasattr(self, 'output_dir'):
            self.output_dir = os.path.abspath(os.path.expanduser(self.output_dir))
        super().__post_init__()

    @property
    def place_model_on_device(self):
        return False if use_torchacc() else super().place_model_on_device


@dataclass
class GRPOArgumentsMixin:
    epsilon: float = 0.2
    epsilon_high: Optional[float] = None
    top_k: int = 50
    top_p: float = 0.9
    repetition_penalty: float = 1.
    num_infer_workers: int = 1
    # vllm
    vllm_device: List[str] = field(default_factory=lambda: ['auto'])
    vllm_gpu_memory_utilization: float = 0.9
    vllm_max_model_len: Optional[int] = None
    vllm_max_num_seqs: int = 256
    vllm_enforce_eager: bool = False
    vllm_limit_mm_per_prompt: Optional[Union[dict, str]] = None  # '{"image": 5, "video": 2}'
    vllm_enable_prefix_caching: bool = True
    # reward function args, see details in swift/plugin/orm.py
    # cosine reward, https://arxiv.org/abs/2502.03373
    cosine_min_len_value_wrong: float = -0.5  # r^w_0 in paper, Reward for wrong answers with zero completion length.
    cosine_max_len_value_wrong: float = 0.0  # r^w_L in paper, Reward for wrong answers with max completion length.
    cosine_min_len_value_correct: float = 1.0  # r^c_0 in paper, Reward for correct answers with zero completion length.
    cosine_max_len_value_correct: float = 0.5  # r^c_L in paper, Reward for correct answers with max completion length.
    cosine_max_len: Optional[int] = None  # Lmax in paper, default equal to max_completion_length
    # repetition penalty, https://arxiv.org/abs/2502.03373
    repetition_n_grams: int = 3
    repetition_max_penalty: float = -1.0

    # LMDeploy in GRPO
    use_lmdeploy: bool = False
    lmdeploy_device: Optional[str] = 'auto'
    lmdeploy_session_len: Optional[int] = None
    lmdeploy_cache_max_entry_count: float = 0.8

    async_generate: bool = False
    tensor_parallel_size: int = 1
    sleep_level: int = 0
    move_model_batches: Optional[int] = None
    offload_optimizer: bool = False
    offload_model: bool = False
    gc_collect_after_offload: bool = False
    multi_turn_func: Optional[str] = None

    # DAPO, https://arxiv.org/abs/2503.14476
    dynamic_sample: bool = False
    max_resample_times: int = 3
    overlong_filter: bool = False
    soft_max_length: Optional[int] = None
    soft_cache_length: Optional[int] = None

    # Dr. GRPO, https://arxiv.org/abs/2503.20783
    scale_rewards: bool = True

    # compatible with trl main branch(0.17.0.dev0)
    wandb_log_unique_prompts: Optional[bool] = None

    # external vllm
    vllm_server_host: Optional[str] = None
    vllm_server_port: int = 8000
    vllm_server_timeout: float = 240.0
    vllm_client = None

    # dataset
    dataset_shuffle: Optional[bool] = True


@dataclass
class TrainingArguments(SwiftArgumentsMixin, HfTrainingArguments):
    pass


@dataclass
class Seq2SeqTrainingArguments(SwiftArgumentsMixin, HfSeq2SeqTrainingArguments):
    pass
