from regex import P
import torch
from torch import nn
from math import sqrt
from typing import List
from einops import einsum, reduce, rearrange


class Linear(nn.Module):
    """线性层"""

    def __init__(
        self,
        in_features: int,
        out_features: int,
        device: torch.device | None = None,
        dtype: torch.dtype | None = None,
    ) -> None:
        """
        构建线性层

        参数:
            in_features (int):      输入的最后一维的维度
            out_features (int):     输出的最后一维的维度
            device (torch.device):  保存参数的设备
            dtype (torch.dtype):    参数的数据类型
        """
        super().__init__()  # 调用父类构造函数

        self.dim_in: int = in_features
        self.dim_out: int = out_features

        # 权重矩阵
        self.weights: nn.Parameter = nn.Parameter(
            torch.empty((out_features, in_features), device=device, dtype=dtype)
        )

        # 用 mean = 0, std^2 = 2/(dim_in+dim_out) ，截断区间为 [-3*std, 3*std] 的正态分布初始化权重矩阵
        std = sqrt(2.0 / (in_features + out_features))
        nn.init.trunc_normal_(self.weights, 0, std, -3 * std, 3 * std)

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        """
        前向传播

        Args:
            x: 输入张量，形状为(batch, ..., in_features)
        """
        return einsum(x, self.weights, "... input, output input -> ... output")  # type: ignore[reportReturnType]


class Embedding(nn.Module):
    """嵌入层"""

    def __init__(
        self,
        num_embeddings: int,
        embedding_dim: int,
        device: torch.device | None = None,
        dtype: torch.dtype | None = None,
    ):
        """
        构建嵌入层

        Args:
            num_embeddings (int):   词汇表大小
            embedding_dim (int):    嵌入向量维度
            device (torch.device):  保存参数的设备
            dtype (torch.dtype):    参数的数据类型
        """
        super().__init__()  # 调用父类构造函数

        # 嵌入矩阵，每行是一个 vocab 的嵌入向量
        self.weights: nn.Parameter = nn.Parameter(
            torch.empty((num_embeddings, embedding_dim), device=device, dtype=dtype)
        )

        # 用 mean = 0, std^2 = 1 ，截断区间为 [-3, 3] 的正态分布初始化嵌入矩阵
        nn.init.trunc_normal_(self.weights, 0, 1, -3, 3)

    def forward(self, token_ids: torch.Tensor) -> torch.Tensor:
        """
        前向传播

        Args:
            x: 输入张量，形状为(batch, seq_len)，每个元素是词汇表中的索引
        """
        batch_size = token_ids.shape[0]
        return torch.stack(
            [
                torch.index_select(
                    self.weights,
                    dim=0,
                    index=token_ids[i],
                )
                for i in range(batch_size)
            ]
        )


class RMSNorm(nn.Module):
    """均方根归一化"""

    def __init__(
        self,
        d_model: int,
        eps: float = 1e-5,
        device: torch.device | None = None,
        dtype: torch.dtype | None = None,
    ) -> None:
        """
        构建 RMSNorm 层

        Args:
            d_model (int):          输入特征的维度
            eps (float):            浮点误差
            device (torch.device):  保存参数的设备
            dtype (torch.dtype):    参数的数据类型
        """
        super().__init__()

        self.d_model: int = d_model
        self.eps: float = eps

        # 增益参数
        self.weights: nn.Parameter = nn.Parameter(
            torch.ones(d_model, device=device, dtype=dtype)
        )

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        """
        前向传播

        Args:
            x (torch.Tensor):   输入张量，形状为(batch, seq_len, d_model)
        """
        in_dtype = x.dtype
        x = x.to(torch.float32)

        # 计算均方根
        rms = torch.sqrt(reduce(x**2, "... d -> ... 1", "mean") + self.eps)

        # 归一化，同时乘以增益参数
        norm_res = x / rms * self.weights

        return norm_res.to(in_dtype)


class SwiGLUFFN(nn.Module):
    """使用 SwiGLU 激活函数的前馈神经网络"""

    def __init__(
        self,
        d_model: int,
        d_ff: int | None = None,
        device: torch.device | None = None,
        dtype: torch.dtype | None = None,
    ):
        """
        构建 SwiGLU FFN 层

        Args:
            d_model (int):          输入特征的维度
            d_ff (int):             内部前馈层的维度
            device (torch.device):  保存参数的设备
            dtype (torch.dtype):    参数的数据类型
        """
        super().__init__()

        self.d_model: int = d_model

        # 若 d_ff 没有赋值，则默认赋为 8/3 倍的输入特征维度
        if d_ff is None:
            d_ff = int(8 / 3 * d_model)
        # 确保内部前馈层的维度为 64 的倍数
        self.d_ff: int = (d_ff + 63) // 64 * 64

        self.w1 = Linear(d_model, self.d_ff, device, dtype)
        self.w2 = Linear(self.d_ff, d_model, device, dtype)
        self.w3 = Linear(d_model, self.d_ff, device, dtype)

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        """
        前向传播

        Args:
            x (torch.Tensor):   输入张量，形状为(batch, ..., d_model)
        """
        w1_x = self.w1(x)
        w3_x = self.w3(x)
        swish = w1_x * torch.sigmoid(w1_x)

        return self.w2(swish * w3_x)


class RoPE(nn.Module):
    """旋转位置编码"""

    def __init__(
        self,
        theta: float,
        d_k: int,
        max_seq_len: int,
        device: torch.device | None = None,
    ):
        """
        构建 RoPE 层

        Args:
            theta (float):          旋转角度基数
            d_k (int):              输入的 Q、K 向量的维度
            max_seq_len (int):      最大序列长度
            device (torch.device):  保存参数的设备
        """
        super().__init__()

        self.theta: float = theta
        self.d_k: int = d_k
        self.max_seq_len: int = max_seq_len
        self.device: torch.device | None = device

        self.register_buffer("rope", self._precompute_freqs_cis(), persistent=False)

    def _precompute_freqs_cis(self) -> torch.Tensor:
        """
        预计算频率和相位
        Returns:
            形状为(max_seq_len, d_k)的张量，包含旋转位置编码
        """
        # 计算 \theta_i 序列，也就是频率序列
        # theta_i = 1 / { theta^{2i / d_k} }
        freqs = 1.0 / (
            self.theta
            ** (
                torch.arange(0, self.d_k, 2, device=self.device)[: (self.d_k // 2)]
                / self.d_k
            )
        )

        # 生成序列索引m [0, 1, ..., max_seq_len-1]
        seq_idx = torch.arange(0, self.max_seq_len, device=self.device)

        # 计算 m * \theta_i 矩阵
        # freqs[m][i] = m * \theta_i
        freqs = einsum(seq_idx, freqs, "seq, d -> seq d")

        # 复数化
        # freqs_cis[m][i] = 1 * e^{i * m * \theta_i}
        freqs_cis = torch.polar(torch.ones_like(freqs), freqs)
        return freqs_cis

    def forward(self, x: torch.Tensor, token_positions: torch.Tensor) -> torch.Tensor:
        """
        前向传播

        Args:
            x (torch.Tensor):               输入张量，形状为(..., seq_len, d_k)
            token_positions (torch.Tensor): 位置索引，形状为(..., seq_len)
        Returns:
            旋转位置编码后的张量，形状为(..., seq_len, d_k)
        """
        # 将维度分组
        x_ = rearrange(x, "... seq (d two) -> ... seq d two", two=2).float()
        # 转为复数
        x_ = torch.view_as_complex(x_)

        # 根据token_positions获取对应的位置的频率
        # (batch, ..., seq_len, d_k // 2)
        rope_pos = self.rope[token_positions]  # type: ignore[reportIndexIssue]

        # 旋转，之后转回实数域并展平
        x_out = rearrange(
            torch.view_as_real(x_ * rope_pos), "... seq d two -> ... seq (d two)", two=2
        )

        return x_out.to(x.dtype)  # 转回原始dtype


def softmax(
    x: torch.Tensor,
    dim: int = -1,
) -> torch.Tensor:
    """
    softmax 函数

    Args:
        x (torch.Tensor):   输入向量
        dim (int):          待计算 softmax 的维度
    """

    x_max = torch.max(x, dim=dim, keepdim=True).values
    x_exp = torch.exp(x - x_max)

    return x_exp / torch.sum(x_exp, dim=dim, keepdim=True)


def scaled_dot_production_attention(
    Q: torch.Tensor,
    K: torch.Tensor,
    V: torch.Tensor,
    mask: torch.Tensor | None = None,
) -> torch.Tensor:
    """
    计算缩放点积注意力

    Args:
        Q (torch.Tensor):       Q 张量，形状为 (batch, ..., seq_len_q, d_k)
        K (torch.Tensor):       K 张量，形状为 (batch, ..., seq_len_k, d_k)
        V (torch.Tensor):       V 张量，形状为 (batch, ..., seq_len_k, d_v)
        mask (torch.Tensor):    掩码张量，形状为 (seq_len_q, seq_len_k)
    Returns:
        形状为 (batch, ..., seq_len_q, d_v) 注意力张量
    """
    # 计算注意力分数 QK^T/sqrt(d_k)
    d_k = Q.shape[-1]
    scores = einsum(Q, K, "... seq_q d_k, ... seq_k d_k -> ... seq_q seq_k") / sqrt(d_k)

    # 掩码，将 False 处减去 -inf
    if mask is not None:
        scores = scores.masked_fill_(mask == 0, float("-inf"))

    # 计算 softmax(SCORE) * V
    attention = einsum(
        softmax(scores, -1), V, "... seq_q seq_k, ... seq_k d_v -> ... seq_q d_v"
    )

    return attention


class MHA(nn.Module):
    """多头自注意力"""

    def __init__(
        self,
        d_model: int,
        num_heads: int,
        device: torch.device | None = None,
        dtype: torch.dtype | None = None,
    ) -> None:
        """
        构建多头自注意力层

        Args:
            d_model (int):          输入特征的维度
            num_heads (int):        注意力头的个数
            device (torch.device):  保存参数的设备
            dtype (torch.dtype):    参数的数据类型
        """

        super().__init__()

        self.d_model: int = d_model
        self.num_heads: int = num_heads
        assert d_model % num_heads == 0, "d_model must be divisible by num_heads"
        self.d_kh: int = d_model // num_heads  # Q、K、V head 的维度
        self.device: torch.device | None = device

        # 将 QKV 合并为一个矩阵
        self.w_qkv = Linear(d_model, d_model * 3, device, dtype)
        self.w_o = Linear(d_model, d_model, device, dtype)

    def forward(self, x: torch.Tensor):
        """
        前向传播

        Args:
            x (torch.Tensor):   输入张量，形状为 (..., seq, d_model)
        """

        QKV = self.w_qkv(x)
        Q_heads, K_heads, V_heads = rearrange(
            QKV,
            "... seq (three head d_kh) -> three ... head seq d_kh",
            three=3,
            head=self.num_heads,
        )

        # 因果掩码
        seq = x.shape[-2]
        mask = torch.tril(torch.ones((seq, seq), dtype=torch.bool)).to(self.device)

        # 计算各 head 的 attention
        # (batch ... head seq_q, d_v)
        atten_heads = scaled_dot_production_attention(Q_heads, K_heads, V_heads, mask)

        # 拼接各 head
        # (batch ... seq_q (head*d_v))
        atten = rearrange(atten_heads, "... head seq_q d_kh -> ... seq_q (head d_kh)")

        return self.w_o(atten)


class MHAWithRoPE(MHA):
    """带有旋转位置编码的多头自注意力"""

    def __init__(
        self,
        d_model: int,
        num_heads: int,
        max_seq_len: int,
        theta: float,
        device: torch.device | None = None,
        dtype: torch.dtype | None = None,
    ) -> None:
        """
        构建多头自注意力层

        Args:
            d_model (int):          输入特征的维度
            num_heads (int):        注意力头的个数
            max_seq_len (int):      序列长度的最大值
            device (torch.device):  保存参数的设备
            dtype (torch.dtype):    参数的数据类型
        """

        super().__init__(d_model, num_heads, device, dtype)

        self.rope = RoPE(theta, self.d_kh, max_seq_len, device)

    def forward(self, x: torch.Tensor, token_positions: torch.Tensor):
        """
        前向传播

        Args:
            x (torch.Tensor):   输入张量，形状为 (..., seq, d_model)
        """

        QKV = self.w_qkv(x)
        Q_heads, K_heads, V_heads = rearrange(
            QKV,
            "... seq (three head d_kh) -> three ... head seq d_kh",
            three=3,
            head=self.num_heads,
        )

        # 为 Q 和 K 矩阵注入旋转位置编码
        Q_heads_rope = self.rope(Q_heads, token_positions)
        K_heads_rope = self.rope(K_heads, token_positions)

        # 因果掩码
        seq = x.shape[-2]
        mask = torch.tril(torch.ones((seq, seq), dtype=torch.bool)).to(self.device)

        # 计算各 head 的 attention
        # (batch ... head seq_q, d_v)
        atten_heads = scaled_dot_production_attention(
            Q_heads_rope, K_heads_rope, V_heads, mask
        )

        # 拼接各 head
        # (batch ... seq_q (head*d_v))
        atten = rearrange(atten_heads, "... head seq_q d_kh -> ... seq_q (head d_kh)")

        return self.w_o(atten)


class TransformerBlock(nn.Module):
    """transformer块"""

    def __init__(
        self,
        d_model: int,
        num_heads: int,
        max_seq_len: int,
        theta: float,
        d_ff: int,
        device: torch.device | None = None,
    ) -> None:
        """
        构建 transformer 块

        Args:
           d_model (int):          输入特征的维度
           num_heads (int):        注意力头的个数
           max_seq_len (int):      序列长度的最大值
           d_ff (int):             内部前馈层的维度
           device (torch.device):  保存参数的设备
        """
        super().__init__()

        self.d_model: int = d_model
        self.num_heads: int = num_heads
        self.d_ff: int = (
            d_ff  # 注意这里的 d_ff 也许和 ffn 里的 d_ff 不一样，因为会把它转化为 64 的倍数
        )
        self.device: torch.device | None = device

        self.norm1 = RMSNorm(d_model, device=device)
        self.attn = MHAWithRoPE(d_model, num_heads, max_seq_len, theta, device)
        self.norm2 = RMSNorm(d_model, device=device)
        self.ffn = SwiGLUFFN(d_model, d_ff, device)

    def forward(
        self,
        x: torch.Tensor,
    ):
        """
        前向传播

        Args:
            x (torch.Tensor):   输入张量，形状为 (..., seq, d_model)
        """

        token_positions = torch.arange(x.shape[-2], dtype=torch.int, device=self.device)

        # attn 计算
        attn_output = self.attn(self.norm1(x), token_positions)
        # 残差连接
        x1 = x + attn_output

        # ffn 计算
        ffn = self.ffn(self.norm2(x1))
        # 残差连接
        res = x1 + ffn

        return res


class TransformerLM(nn.Module):
    """完整的 Transformer 语言模型"""

    def __init__(
        self,
        vocab_size: int,
        context_length: int,
        num_layers: int,
        d_model: int,
        num_heads: int,
        theta: float,
        d_ff: int,
        device: torch.device | None = None,
    ) -> None:
        """
        构建 Transformer 语言模型

        Args:
            vocab_size (int):       词汇表的大小
            context_length (int):   上下文的最大长度（即 max_seq_len）
            num_layers (int):       Transformer Block 的个数
            d_model (int):          输入特征的维度
            num_heads (int):        注意力头的个数
            d_ff (int):             内部前馈层的维度
            device (torch.device):  保存参数的设备
            normalized (bool):      logits 是否经过 softmax 归一化
        """
        super().__init__()

        self.embedding = Embedding(vocab_size, d_model, device)
        self.tfblocks: nn.ModuleList = nn.ModuleList(
            TransformerBlock(d_model, num_heads, context_length, theta, d_ff, device)
            for _ in range(num_layers)
        )
        self.output_norm = RMSNorm(d_model, device=device)
        self.output_linear = Linear(d_model, vocab_size, device)

    def forward(self, x: torch.Tensor):
        """
        前向传播

        Args:
            x (torch.Tensor):   输入张量，形状为 (..., seq)，每个元素是词汇表中的索引
        Returns:
            输出张量，形状为 (..., seq, vocab_size)，表示每一个词的 logits
        """

        # Embedding
        res = self.embedding(x)

        # Transformer 块
        for block in self.tfblocks:
            res = block(res)

        # Norm
        res = self.output_norm(res)

        # Linear
        res = self.output_linear(res)

        return res


if __name__ == "__main__":

    model = TransformerLM(10000, 256, 4, 512, 16, 10000, 1344)

    print(sum(p.numel() for p in model.parameters() if p.requires_grad))
