"""
LLM模型配置类
基于MiniMind2架构，支持Dense和MoE两种模式
"""

from dataclasses import dataclass
from typing import Optional, Union
import math


@dataclass
class LLMConfig:
    """LLM模型配置类"""
    
    # 基础配置
    vocab_size: int = 6400
    hidden_size: int = 768
    num_hidden_layers: int = 16
    num_attention_heads: int = 8
    num_key_value_heads: int = 2
    intermediate_size: Optional[int] = None
    max_position_embeddings: int = 8192
    
    # 激活函数和归一化
    hidden_act: str = "silu"  # SwiGLU激活函数
    rms_norm_eps: float = 1e-5
    
    # 位置编码
    rope_theta: float = 1e6
    
    # 正则化
    dropout: float = 0.0
    attention_dropout: float = 0.0
    
    # 特殊token
    bos_token_id: int = 1
    eos_token_id: int = 2
    pad_token_id: int = 0
    
    # 训练相关
    initializer_range: float = 0.02
    use_cache: bool = True
    tie_word_embeddings: bool = True
    
    # MoE配置
    use_moe: bool = False
    num_experts_per_tok: int = 2
    n_routed_experts: int = 4
    n_shared_experts: int = 1
    scoring_func: str = "softmax"
    aux_loss_alpha: float = 0.1
    seq_aux: bool = True
    norm_topk_prob: bool = True
    
    # 其他
    flash_attn: bool = True
    torch_dtype: str = "bfloat16"
    
    def __post_init__(self):
        """初始化后处理"""
        if self.intermediate_size is None:
            # 根据SwiGLU的公式计算intermediate_size
            # SwiGLU: Swish(W1(x)) * W2(x)，所以需要4倍hidden_size
            self.intermediate_size = int(self.hidden_size * 8 / 3)
            # 确保是64的倍数（为了优化）
            self.intermediate_size = 64 * ((self.intermediate_size + 64 - 1) // 64)
        
        # 验证配置
        assert self.hidden_size % self.num_attention_heads == 0, \
            "hidden_size必须能被num_attention_heads整除"
        assert self.num_attention_heads % self.num_key_value_heads == 0, \
            "num_attention_heads必须能被num_key_value_heads整除"
        
        if self.use_moe:
            assert self.n_routed_experts > 0, "MoE模式下n_routed_experts必须大于0"
            assert self.num_experts_per_tok <= self.n_routed_experts, \
                "num_experts_per_tok不能超过n_routed_experts"
    
    @property
    def head_dim(self) -> int:
        """每个注意力头的维度"""
        return self.hidden_size // self.num_attention_heads
    
    @property
    def num_rep(self) -> int:
        """KV头重复次数"""
        return self.num_attention_heads // self.num_key_value_heads
    
    def get_model_size(self) -> int:
        """计算模型参数量（百万）"""
        # 词嵌入层
        embed_params = self.vocab_size * self.hidden_size
        
        # 每个Transformer层的参数
        # 注意力层：Q, K, V, O投影
        # Q: hidden_size -> num_heads * head_dim
        # K: hidden_size -> num_kv_heads * head_dim  
        # V: hidden_size -> num_kv_heads * head_dim
        # O: num_heads * head_dim -> hidden_size
        head_dim = self.hidden_size // self.num_attention_heads
        kv_head_dim = self.hidden_size // self.num_key_value_heads
        
        q_params = self.hidden_size * (self.num_attention_heads * head_dim)
        k_params = self.hidden_size * (self.num_key_value_heads * kv_head_dim)
        v_params = self.hidden_size * (self.num_key_value_heads * kv_head_dim)
        o_params = (self.num_attention_heads * head_dim) * self.hidden_size
        
        attn_params = q_params + k_params + v_params + o_params
        
        # 前馈网络：gate, up, down投影
        ffn_params = 3 * self.hidden_size * self.intermediate_size
        
        # 归一化层
        norm_params = 2 * self.hidden_size
        
        # 单层参数
        layer_params = attn_params + ffn_params + norm_params
        
        # 最终归一化层
        final_norm_params = self.hidden_size
        
        # 语言模型头
        lm_head_params = self.hidden_size * self.vocab_size
        
        # 总参数
        total_params = embed_params + self.num_hidden_layers * layer_params + final_norm_params + lm_head_params
        
        return total_params / 1e6


# 预定义的模型配置
def get_minimind2_small_config() -> LLMConfig:
    """MiniMind2-Small配置 (25.83M参数) - 与官方完全一致"""
    return LLMConfig(
        vocab_size=6400,
        hidden_size=512,
        num_hidden_layers=8,
        num_attention_heads=8,
        num_key_value_heads=2,
        max_position_embeddings=32768,  # 与官方一致
        rms_norm_eps=1e-5,
        rope_theta=1e6,
        use_moe=False,
        dropout=0.0,
        attention_dropout=0.0,
        initializer_range=0.02
    )


def get_minimind2_config() -> LLMConfig:
    """MiniMind2配置 (104M参数)"""
    return LLMConfig(
        vocab_size=6400,
        hidden_size=768,
        num_hidden_layers=16,
        num_attention_heads=8,
        num_key_value_heads=2,
        max_position_embeddings=8192,
        use_moe=False
    )


def get_minimind2_moe_config() -> LLMConfig:
    """MiniMind2-MoE配置 (145M参数)"""
    return LLMConfig(
        vocab_size=6400,
        hidden_size=640,
        num_hidden_layers=8,
        num_attention_heads=8,
        num_key_value_heads=2,
        max_position_embeddings=8192,
        use_moe=True,
        n_routed_experts=4,
        n_shared_experts=1,
        num_experts_per_tok=2
    )


def get_gpt2_small_config() -> LLMConfig:
    """GPT-2 Small配置 (117M参数)"""
    return LLMConfig(
        vocab_size=50257,
        hidden_size=768,
        num_hidden_layers=12,
        num_attention_heads=12,
        num_key_value_heads=12,
        max_position_embeddings=1024,
        hidden_act="gelu",
        use_moe=False
    )