# src/hfco/configuration_transformer.py

from transformers import PretrainedConfig


class TransformerConfig(PretrainedConfig):
    """
    这是你的自定义 Transformer 模型的配置类。
    它继承自 `PretrainedConfig`，用于存储模型的所有超参数。
    """

    model_type = "llm_course_transformer"

    def __init__(
        self,
        d_model: int = 512,
        num_heads: int = 8,
        d_ff: int = 2048,
        num_layers: int = 6,
        rope_theta: float = 10000.0,
        max_seq_len: int = 512,
        vocab_size: int = 32768,
        share_embeddings: bool = False,
        pad_token_id: int = 0,
        bos_token_id: int = 1,
        eos_token_id: int = 2,
        # 消融实验配置
        use_rope: bool = True,  # 是否使用RoPE（False时使用2017原始注意力）
        use_residual: bool = True,  # 是否使用残差连接
        norm_type: str = "rms",  # "rms" or "layer"
        norm_position: str = "pre",  # "pre" or "post"
        **kwargs,
    ):
        super().__init__(**kwargs)  # **kwargs 必须传递给父类

        # --- 验证参数 ---
        if d_model % num_heads != 0:
            raise ValueError(
                f"d_model ({d_model}) 必须能被 num_heads ({num_heads}) 整除。"
            )
        
        if norm_type not in ["rms", "layer"]:
            raise ValueError(f"norm_type 必须是 'rms' 或 'layer'，得到 '{norm_type}'")
        
        if norm_position not in ["pre", "post"]:
            raise ValueError(f"norm_position 必须是 'pre' 或 'post'，得到 '{norm_position}'")

        # --- 存储参数 ---
        self.d_model = d_model
        self.num_heads = num_heads
        self.d_ff = d_ff
        self.num_layers = num_layers
        self.rope_theta = rope_theta
        self.max_seq_len = max_seq_len
        
        # --- 消融实验参数 ---
        self.use_rope = use_rope
        self.use_residual = use_residual
        self.norm_type = norm_type
        self.norm_position = norm_position
        
        # --- 添加 HF 兼容性属性别名 ---
        self.hidden_size = d_model
        self.num_attention_heads = num_heads
        self.num_hidden_layers = num_layers

        # 统一词汇表设置 (支持中英文混杂的BPE分词)
        self.vocab_size = vocab_size
        # 为了与Transformer_Seq2Seq模型兼容，设置src和tgt词汇表大小相同
        self.src_vocab_size = vocab_size
        self.tgt_vocab_size = vocab_size

        # 对于统一词汇表，share_embeddings总是有意义的
        self.share_embeddings = share_embeddings

        self.pad_token_id = pad_token_id
        self.bos_token_id = bos_token_id
        self.eos_token_id = eos_token_id

        # --- Label Smoothing ---
        self.label_smoothing = kwargs.get("label_smoothing", 0.1)

        # --- HF: 明确标记为 encoder-decoder 模型，并提供解码起始 token ---
        self.is_encoder_decoder = True
        # `generate` 在 Seq2Seq 通常使用 decoder_start_token_id 作为第一步输入
        self.decoder_start_token_id = bos_token_id
