# file:D:\share\python\python_net\base_model\nlp\transformer1\UsherConfig.py
import os
from typing import Literal

from transformers import PretrainedConfig, AutoTokenizer


class UsherConfig(PretrainedConfig):
    model_type = "usherTransformer"
    auto_map = {
        "AutoModelForCausalLM": "Transformer.Transformer"
    }

    def __init__(
            self,

            max_batch_size: int = 128,
            max_seq_len: int = 128,
            vocab_size: int = 512,
            dim: int = 32,
            inter_dim: int = 32,
            moe_inter_dim: int = 64,
            n_layers: int = 2,
            n_dense_layers: int = 1,
            n_heads: int = 4,
            # moe
            n_routed_experts: int = 0,
            n_shared_experts: int = 2,
            n_activated_experts: int = 0,
            n_expert_groups: int = 1,
            n_limited_groups: int = 1,
            score_func: Literal["softmax", "sigmoid"] = "softmax",
            route_scale: float = 1.,
            # mla
            q_lora_rank: int = 16,
            kv_lora_rank: int = 32,
            qk_nope_head_dim: int = 16,
            qk_rope_head_dim: int = 8,
            v_head_dim: int = 16,
            # yarn
            original_seq_len: int = 512,
            rope_theta: float = 10000.0,
            rope_factor: float = 40,
            beta_fast: int = 32,
            beta_slow: int = 1,
            mscale: float = 1.,
            # max_batch_size: int = 8,
            # max_seq_len: int = 4096 * 4,
            # dtype: Literal["bf16", "fp8"] = "bf16",
            # vocab_size: int = 102400,
            # dim: int = 2048,
            # inter_dim: int = 10944,
            # moe_inter_dim: int = 1408,
            # n_layers: int = 27,
            # n_dense_layers: int = 1,
            # n_heads: int = 16,
            # # moe
            # n_routed_experts: int = 64,
            # n_shared_experts: int = 2,
            # n_activated_experts: int = 6,
            # n_expert_groups: int = 1,
            # n_limited_groups: int = 1,
            # score_func: Literal["softmax", "sigmoid"] = "softmax",
            # route_scale: float = 1.,
            # # mla
            # q_lora_rank: int = 0,
            # kv_lora_rank: int = 512,
            # qk_nope_head_dim: int = 128,
            # qk_rope_head_dim: int = 64,
            # v_head_dim: int = 128,
            # # yarn
            # original_seq_len: int = 4096,
            # rope_theta: float = 10000.0,
            # rope_factor: float = 40,
            # beta_fast: int = 32,
            # beta_slow: int = 1,
            # mscale: float = 1.,
            path=r"result",
            **kwargs
    ):
        """
    Data class for defining model arguments and hyperparameters.

     Attributes:
        max_batch_size (int): 最大批次大小。
        max_seq_len (int): 最大序列长度。
        dtype (Literal["bf16", "fp8"]): 计算的数据类型。
        vocab_size (int): 词汇表大小。
        dim (int): 模型维度。
        inter_dim (int): MLP层的中间维度。
        moe_inter_dim (int): MoE层的中间维度。
        n_layers (int): Transformer层的数量。
        n_dense_layers (int): 模型中密集层的数量。
        n_heads (int): 注意力头的数量。
        n_routed_experts (int): MoE层中路由的专家数量。
        n_shared_experts (int): MoE层中共享的专家数量。
        n_activated_experts (int): MoE层中激活的专家数量。
        n_expert_groups (int): 专家组的数量。
        n_limited_groups (int): MoE路由中有限的组数量。
        score_func (Literal["softmax", "sigmoid"]): MoE路由的评分函数。
        route_scale (float): 路由分数的缩放因子。
        q_lora_rank (int): 查询投影的LoRA秩。
        kv_lora_rank (int): 键值投影的LoRA秩。
        qk_nope_head_dim (int): 不带位置嵌入的查询-键投影的维度。
        qk_rope_head_dim (int): 带旋转位置嵌入的查询-键投影的维度。
        v_head_dim (int): 值投影的维度。
        original_seq_len (int): 原始序列长度。
        rope_theta (float): 旋转位置编码的基值。
        rope_factor (float): 扩展序列长度的缩放因子。
        beta_fast (int): 快速beta校正因子。
        beta_slow (int): 慢速beta校正因子。
        mscale (float): 扩展注意力的缩放因子。
    """
        # 调用父类初始化方法
        super().__init__(**kwargs)
        # 初始化模型参数

        self.max_batch_size: int = max_batch_size
        self.max_seq_len: int = max_seq_len
        self.vocab_size: int = vocab_size
        self.dim: int = dim
        self.inter_dim: int = inter_dim
        self.moe_inter_dim: int = moe_inter_dim
        self.n_layers: int = n_layers
        self.n_dense_layers: int = n_dense_layers
        self.n_heads: int = n_heads
        # moe
        self.n_routed_experts: int = n_routed_experts
        self.n_shared_experts: int = n_shared_experts
        self.n_activated_experts: int = n_activated_experts
        self.n_expert_groups: int = n_expert_groups
        self.n_limited_groups: int = n_limited_groups
        self.score_func: Literal["softmax", "sigmoid"] = score_func
        self.route_scale: float = route_scale
        # mla
        self.q_lora_rank: int = q_lora_rank
        self.kv_lora_rank: int = kv_lora_rank
        self.qk_nope_head_dim: int = qk_nope_head_dim
        self.qk_rope_head_dim: int = qk_rope_head_dim
        self.v_head_dim: int = v_head_dim
        # yarn
        self.original_seq_len: int = original_seq_len
        self.rope_theta: float = rope_theta
        self.rope_factor: float = rope_factor
        self.beta_fast: int = beta_fast
        self.beta_slow: int = beta_slow
        self.mscale: float = mscale
        # 转换为绝对路径
        self.path = os.path.abspath(path)
