import torch
import math
from einops import einsum, rearrange, reduce


def softmax(x: torch.Tensor, dim: int) -> torch.Tensor:
    """计算softmax函数，用于注意力权重计算
    Args:
        x: 输入张量
        dim: 计算softmax的维度
    Returns:
        softmax结果张量
    """
    # 为了数值稳定性，减去最大值
    x_max = x.max(dim=dim, keepdim=True).values
    x_exp = torch.exp(x - x_max)
    return x_exp / x_exp.sum(dim=dim, keepdim=True)


def scaled_dot_product_attention(
    Q: torch.Tensor, K: torch.Tensor, V: torch.Tensor, mask: torch.Tensor
):
    """缩放点积注意力机制
    Args:
        Q: 查询张量
        K: 键张量
        V: 值张量
        mask: 掩码张量
    Returns:
        注意力输出张量
    """
    # 获取查询向量的维度
    d_k = Q.shape[-1]

    # 计算注意力分数: Q @ K^T
    attention_scores = einsum(Q, K, "... seq_q d, ... seq_k d -> ... seq_q seq_k")
    # 缩放注意力分数
    attention_scores = attention_scores / math.sqrt(d_k)
    # 应用掩码，将掩码位置设为负无穷
    attention_scores = torch.where(mask, attention_scores, float("-inf"))

    # 对注意力分数应用softmax得到注意力权重
    attention_weights = softmax(attention_scores, dim=-1)
    # 计算加权值: attention_weights @ V
    output = einsum(attention_weights, V, "... seq_q seq_k, ... seq_k d -> ... seq_q d")

    return output


class Linear(torch.nn.Module):
    """自定义线性层，使用截断正态分布初始化权重"""
    def __init__(
        self,
        in_features: int,      # 输入特征数
        out_features: int,     # 输出特征数
        device: torch.device | None = None,  # 设备
        dtype: torch.dtype | None = None,    # 数据类型
    ):
        super().__init__()

        # 计算截断正态分布的参数
        mean = 0
        std = math.sqrt(2 / (out_features + in_features))
        lower = -3 * std
        upper = 3 * std

        # 创建权重张量并使用截断正态分布初始化
        w = torch.empty((out_features, in_features), device=device, dtype=dtype)
        torch.nn.init.trunc_normal_(w, mean=mean, std=std, a=lower, b=upper)

        # 将权重注册为可学习参数
        self.weight = torch.nn.Parameter(w)

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        """前向传播函数
        Args:
            x: 输入张量
        Returns:
            线性变换后的输出张量
        """
        return einsum(self.weight, x, "d_out d_in, ... d_in -> ... d_out")


class RotaryPositionalEmbedding(torch.nn.Module):
    """旋转位置编码（RoPE）模块"""
    def __init__(
        self,
        theta: float,          # RoPE参数theta
        d_k: int,              # 键/查询向量维度
        max_seq_len: int,      # 最大序列长度
        device: torch.device | None = None,  # 设备
        dtype: torch.dtype | None = None,    # 数据类型
    ):
        super().__init__()

        # 生成位置索引 (max_seq_len, 1)
        positions = torch.arange(max_seq_len, device=device).unsqueeze(1)
        # 生成频率索引
        freqs = torch.arange(0, d_k, 2, device=device) / d_k
        # 计算逆频率
        inv_freq = 1.0 / (theta**freqs)
        # 计算角度
        angles = positions * inv_freq

        # 注册cos和sin缓冲区，用于位置编码
        self.register_buffer("cos", angles.cos().to(dtype), persistent=False)
        self.register_buffer("sin", angles.sin().to(dtype), persistent=False)

    def forward(self, x: torch.Tensor, token_positions: torch.Tensor) -> torch.Tensor:
        """前向传播函数
        Args:
            x: 输入张量
            token_positions: token位置索引
        Returns:
            应用RoPE后的张量
        """
        # 获取对应位置的cos和sin值
        cos_pos = self.cos[token_positions]
        sin_pos = self.sin[token_positions]

        # 分别获取偶数和奇数位置的元素
        x_even = x[..., 0::2]
        x_odd = x[..., 1::2]

        # 应用旋转位置编码公式
        x_rot_even = x_even * cos_pos - x_odd * sin_pos
        x_rot_odd = x_even * sin_pos + x_odd * cos_pos

        # 重新排列维度
        x_rot = rearrange([x_rot_even, x_rot_odd], "two ... -> ... two")
        x_out = rearrange(x_rot, "... d1 d2 -> ... (d1 d2)")

        return x_out


class CausalMultiHeadSelfAttention(torch.nn.Module):
    """因果多头自注意力机制"""
    def __init__(self, d_model: int, num_heads: int, device=None, dtype=None, **kwargs):
        super().__init__()

        # 创建线性层用于生成Q、K、V
        self.wqkv = Linear(d_model, 3 * d_model, device, dtype)
        # 输出投影层
        self.output_proj = Linear(d_model, d_model, device, dtype)

        # 保存超参数
        self.num_heads = num_heads
        self.d_model = d_model
        self.d_head = d_model // num_heads

    def forward(
        self,
        x: torch.Tensor,
        rope: RotaryPositionalEmbedding | None = None,
        token_positions: torch.Tensor | None = None,
    ) -> torch.Tensor:
        """前向传播函数
        Args:
            x: 输入张量
            rope: RoPE模块（可选）
            token_positions: token位置索引（可选）
        Returns:
            注意力输出张量
        """
        batch_size, seq_len, _ = x.shape
        # 通过线性层生成Q、K、V
        qkv = self.wqkv(x)

        # 将QKV分离为独立的张量
        q, k, v = qkv.split(self.d_model, dim=2)

        # 重塑张量维度: (batch, seq_len, dim) -> (batch, heads, seq_len, head_dim)
        q = rearrange(q, "b s (h d) -> b h s d", h=self.num_heads)
        k = rearrange(k, "b s (h d) -> b h s d", h=self.num_heads)
        v = rearrange(v, "b s (h d) -> b h s d", h=self.num_heads)

        # 如果提供了RoPE模块，则应用旋转位置编码
        if rope is not None:
            if token_positions is None:
                token_positions = torch.arange(seq_len, device=x.device)
            q = rope(q, token_positions)
            k = rope(k, token_positions)

        # 创建因果掩码用于自注意力
        mask = ~torch.triu(
            torch.ones((seq_len, seq_len), device=x.device, dtype=torch.bool),
            diagonal=1,
        )

        # 执行缩放点积注意力
        y = scaled_dot_product_attention(q, k, v, mask)
        # 重塑回原始维度
        y = rearrange(y, "b h s d -> b s (h d)")
        # 通过输出投影层
        return self.output_proj(y)


class RMSNorm(torch.nn.Module):
    """RMS归一化层"""
    def __init__(self, d_model: int, eps: float = 1e-5, device=None, dtype=None):
        super().__init__()

        # 保存epsilon值用于数值稳定性
        self.eps = eps
        # 创建可学习的权重参数
        self.weight = torch.nn.Parameter(
            torch.ones(d_model, device=device, dtype=dtype)
        )

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        """前向传播函数
        Args:
            x: 输入张量
        Returns:
            归一化后的张量
        """
        # 保存输入数据类型
        in_dtype = x.dtype
        # 转换为float32以提高数值稳定性
        x = x.to(torch.float32)

        # 计算RMS值
        rms = torch.sqrt(reduce(x**2, "... d -> ... 1", "mean") + self.eps)
        # 应用RMS归一化
        result = x * self.weight / rms

        # 转换回原始数据类型
        return result.to(in_dtype)


def silu_activation(x: torch.Tensor) -> torch.Tensor:
    """SiLU激活函数（也称为Swish）
    Args:
        x: 输入张量
    Returns:
        应用SiLU激活后的张量
    """
    return x * torch.sigmoid(x)


class SwiGLU(torch.nn.Module):
    """SwiGLU前馈网络"""
    def __init__(
        self,
        d_model: int,          # 模型维度
        d_ff: int,             # 前馈网络维度
        device: torch.device | None = None,  # 设备
        dtype: torch.dtype | None = None,    # 数据类型
    ):
        super().__init__()

        # 创建三个线性层
        self.w1 = Linear(d_model, d_ff, device, dtype)
        self.w2 = Linear(d_ff, d_model, device, dtype)
        self.w3 = Linear(d_model, d_ff, device, dtype)

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        """前向传播函数
        Args:
            x: 输入张量
        Returns:
            前馈网络输出张量
        """
        # 第一个线性变换
        a1 = self.w1(x)
        # 应用SiLU激活函数
        silu = silu_activation(a1)
        # 第三个线性变换与SiLU结果相乘后通过第二个线性层
        return self.w2(silu * self.w3(x))


class SiLU(torch.nn.Module):
    """使用SiLU激活函数的前馈网络"""
    def __init__(
        self,
        d_model: int,          # 模型维度
        d_ff: int,             # 前馈网络维度
        device: torch.device | None = None,  # 设备
        dtype: torch.dtype | None = None,    # 数据类型
    ):
        super().__init__()

        # 创建两个线性层
        self.w1 = Linear(d_model, d_ff, device, dtype)
        self.w2 = Linear(d_ff, d_model, device, dtype)

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        """前向传播函数
        Args:
            x: 输入张量
        Returns:
            前馈网络输出张量
        """
        # 第一个线性变换
        a1 = self.w1(x)
        # 应用SiLU激活函数
        silu = silu_activation(a1)
        # 通过第二个线性层
        return self.w2(silu)


class Expert(torch.nn.Module):
    """MoE层中的单个专家 - 本质上是一个前馈网络"""
    def __init__(
        self,
        d_model: int,          # 模型维度
        d_ff: int,             # 前馈网络维度
        device: torch.device | None = None,  # 设备
        dtype: torch.dtype | None = None,    # 数据类型
    ):
        super().__init__()

        # 创建两个线性层
        self.w1 = Linear(d_model, d_ff, device, dtype)
        self.w2 = Linear(d_ff, d_model, device, dtype)

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        """前向传播函数
        Args:
            x: 输入张量
        Returns:
            专家网络输出张量
        """
        # 第一个线性变换
        a1 = self.w1(x)
        # 应用SiLU激活函数
        silu = silu_activation(a1)
        # 通过第二个线性层
        return self.w2(silu)


class MoE(torch.nn.Module):
    """混合专家（Mixture of Experts）层"""
    def __init__(
        self,
        d_model: int,          # 模型维度
        d_ff: int,             # 前馈网络维度
        num_experts: int,      # 专家数量
        num_experts_per_tok: int,  # 每个token使用的专家数量
        device: torch.device | None = None,  # 设备
        dtype: torch.dtype | None = None,    # 数据类型
    ):
        super().__init__()
        # 保存超参数
        self.num_experts = num_experts
        self.num_experts_per_tok = num_experts_per_tok

        # 创建专家网络列表
        self.experts = torch.nn.ModuleList(
            [Expert(d_model, d_ff, device, dtype) for _ in range(num_experts)]
        )

        # 门控网络，用于确定使用哪些专家
        self.gate = Linear(d_model, num_experts, device, dtype)

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        """前向传播函数
        Args:
            x: 输入张量
        Returns:
            MoE层输出张量
        """
        # 获取输入张量形状
        batch_size, seq_len, d_model = x.shape

        # 展平张量以便处理
        x_flat = x.view(-1, d_model)  # (batch_size * seq_len, d_model)

        # 获取门控logits和权重
        gate_logits = self.gate(x_flat)  # (batch_size * seq_len, num_experts)
        gate_weights = torch.softmax(gate_logits, dim=1, dtype=torch.float).to(x.dtype)

        # 为每个token选择top-k专家
        topk_weights, topk_indices = torch.topk(
            gate_weights, self.num_experts_per_tok, dim=1
        )

        # 归一化权重
        topk_weights = topk_weights / topk_weights.sum(dim=1, keepdim=True)

        # 初始化输出张量
        output = torch.zeros_like(x_flat)

        # 与每个专家处理
        for i, expert in enumerate(self.experts):
            # 找出应该由该专家处理的token
            mask = topk_indices == i
            if mask.any():
                # 获取这些token的权重
                expert_weights = (topk_weights * mask).sum(dim=1, keepdim=True)

                # 应用专家变换
                expert_output = expert(x_flat)

                # 加权并累加输出
                output += expert_output * expert_weights

        # 重塑回原始维度
        output = output.view(batch_size, seq_len, d_model)
        return output


class Block(torch.nn.Module):
    """Transformer块"""
    def __init__(
        self,
        d_model: int,          # 模型维度
        num_heads: int,        # 注意力头数
        d_ff: int,             # 前馈网络维度
        rope: RotaryPositionalEmbedding | None = None,  # RoPE模块
        device=None,           # 设备
        dtype=None,            # 数据类型
        **kwargs,
    ):
        super().__init__()

        # 保存RoPE模块
        self.rope = rope

        # 创建层归一化和注意力层
        self.ln1 = RMSNorm(d_model, device=device, dtype=dtype)
        self.attn = CausalMultiHeadSelfAttention(
            d_model, num_heads, device, dtype, **kwargs
        )

        # 创建第二个层归一化和前馈网络
        self.ln2 = RMSNorm(d_model, device=device, dtype=dtype)

        # 获取前馈网络类型
        ffn_type = kwargs.get("ffn_type", "swiglu")

        # 检查是否使用MoE
        use_moe = kwargs.get("use_moe", False)

        # 根据配置创建相应的前馈网络
        if use_moe:
            num_experts = kwargs.get("num_experts", 8)
            num_experts_per_tok = kwargs.get("num_experts_per_tok", 2)
            self.ffn = MoE(
                d_model, d_ff, num_experts, num_experts_per_tok, device, dtype
            )
        elif ffn_type == "silu":
            self.ffn = SiLU(d_model, d_ff, device, dtype)
        elif ffn_type == "swiglu":
            self.ffn = SwiGLU(d_model, d_ff, device, dtype)
        else:
            raise ValueError(f"Unsupported ffn_type: {ffn_type}")

    def forward(self, x: torch.Tensor):
        """前向传播函数
        Args:
            x: 输入张量
        Returns:
            Transformer块输出张量
        """
        # 残差连接和注意力层
        x = x + self.attn(self.ln1(x), self.rope)
        # 残差连接和前馈网络
        x = x + self.ffn(self.ln2(x))
        return x


class Embedding(torch.nn.Module):
    """嵌入层"""
    def __init__(
        self,
        num_embeddings: int,   # 词汇表大小
        embedding_dim: int,    # 嵌入维度
        device: torch.device | None = None,  # 设备
        dtype: torch.dtype | None = None,    # 数据类型
        **kwargs,
    ):
        """
        Initialize the embedding layer.

        Args:
            num_embeddings (int): Size of the vocabulary.
            embedding_dim (int): Dimension of the embedding vectors, i.e., dmodel.
            device (torch.device, optional): The device to use. Defaults to None.
            dtype (torch.dtype, optional): The data type to use. Defaults to None.
            **kwargs: Additional keyword arguments.

        Returns:
            torch.nn.Module: The embedding layer.
        """

        super().__init__()

        # 设置初始化参数
        mean = 0
        std = 1
        lower = -3
        upper = 3

        # 如果提供了嵌入标准差，则使用它
        if kwargs.get("embedding_std", None) is not None:
            std = kwargs.get("embedding_std")

        # 创建嵌入权重矩阵并使用截断正态分布初始化
        w = torch.empty((num_embeddings, embedding_dim), device=device, dtype=dtype)
        torch.nn.init.trunc_normal_(w, mean=mean, std=std, a=lower, b=upper)

        # 将权重注册为可学习参数
        self.weight = torch.nn.Parameter(w)

    def forward(self, token_ids: torch.Tensor) -> torch.Tensor:
        """前向传播函数
        Args:
            token_ids: token ID张量
        Returns:
            嵌入向量张量
        """
        return self.weight[token_ids]


class Transformer(torch.nn.Module):
    """完整的Transformer模型"""
    def __init__(
        self,
        d_model: int,          # 模型维度
        num_heads: int,        # 注意力头数
        d_ff: int,             # 前馈网络维度
        vocab_size: int,       # 词汇表大小
        context_length: int,   # 上下文长度
        num_layers: int,       # 层数
        rope_theta: float = 10000.0,  # RoPE参数theta
        device=None,           # 设备
        dtype=None,            # 数据类型
        **kwargs,
    ):
        super().__init__()

        # 保存上下文长度
        self.context_length = context_length
        # 创建token嵌入层
        self.token_embeddings = Embedding(vocab_size, d_model, device, dtype, **kwargs)

        # 检查模型维度是否能被头数整除
        if d_model % num_heads != 0:
            raise ValueError("d_model must be divisible by num_heads")

        # 计算头维度
        d_head = d_model // num_heads
        # 创建RoPE模块
        rope = RotaryPositionalEmbedding(
            rope_theta, d_head, context_length, device=device, dtype=dtype
        )

        # 创建Transformer层列表
        self.layers = torch.nn.ModuleList(
            [
                Block(d_model, num_heads, d_ff, rope, device, dtype, **kwargs)
                for _ in range(num_layers)
            ]
        )

        # 创建最终的层归一化和语言模型头
        self.ln_final = RMSNorm(d_model, device=device, dtype=dtype)
        self.lm_head = Linear(d_model, vocab_size, device, dtype)

        # 如果启用权重绑定，则共享token嵌入和语言模型头的权重
        if kwargs.get("weight_tying", False):
            self.lm_head.weight = self.token_embeddings.weight

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        """前向传播函数
        Args:
            x: 输入token ID张量
        Returns:
            模型输出张量（logits）
        """
        batch_size, seq_len = x.shape

        # 检查序列长度是否超过上下文长度
        if seq_len > self.context_length:
            raise ValueError(
                f"Input sequence length ({seq_len}) exceeds model context length ({self.context_length})"
            )

        # 应用token嵌入
        x = self.token_embeddings(x)

        # 通过所有Transformer层
        for layer in self.layers:
            x = layer(x)

        # 应用最终的层归一化
        x = self.ln_final(x)
        # 应用语言模型头
        x = self.lm_head(x)

        return x
