# emapo_config.py

from dataclasses import dataclass
from transformers.training_args import TrainingArguments
from typing import Optional

@dataclass
class EMAPOConfig(TrainingArguments):
    """
    Configuration class for the Exponential Moving Average Policy Optimization (EMAPO) algorithm.
    Inherits from standard HuggingFace TrainingArguments and adds custom parameters specific to EMAPO.
    """

    # --- 原始 GRPO 参数 ---
    beta: float = 0.1
    num_generations: int = 2
    temperature: float = 0.7
    max_prompt_length: Optional[int] = None
    max_completion_length: Optional[int] = None
    use_vllm: bool = False
    vllm_device: str = "auto"
    vllm_gpu_memory_utilization: float = 0.9
    model_init_kwargs: Optional[dict] = None

    # --- 新增参数 ---
    alpha: float = 0.5  # 长度归一化指数
    use_old_policy: bool = True  # 是否使用旧策略模型
    tau_min: float = 0.9  # τ 的最小值
    tau_max: float = 0.995  # τ 的最大值
    learn_tau: bool = True  # 是否让 τ 可学习
    learning_rate: float = 5e-5  # 学习率（已包含在 TrainingArguments 中）
    per_device_train_batch_size: int = 8
    gradient_accumulation_steps: int = 4
    num_train_epochs: int = 3
    logging_steps: int = 10
    save_steps: int = 500
    output_dir: str = "./emapo_output"
    evaluation_strategy: str = "no"  # 默认不评估
    report_to: str = "wandb"  # 或 "none"
