from sympy import use
import einops
from networkx import tetrahedral_graph
import numpy as np
from requests import head
import torch
from torch import Tensor, einsum
import torch.nn as nn
from torch.nn import Sequential

from jaxtyping import Float, Int


# 3.1 Transformer LM
# 3.1.1 Token Embeddings
class MyEmbedding(nn.Module):
    def __init__(
        self, num_embeddings: int, embedding_dim: int, device=None, dtype=None
    ):
        super(MyEmbedding, self).__init__()
        self.weight = nn.Parameter(
            torch.empty(num_embeddings, embedding_dim, dtype=dtype)
        )
        torch.nn.init.trunc_normal_(self.weight, 0, 1, -3, 3)
        self.to(device)

    def forward(self, indices: Tensor) -> Tensor:
        # 支持带batch
        return self.weight[indices]


class MyLinear(nn.Module):
    def __init__(self, in_features, out_features, device=None, dtype=None):
        super(MyLinear, self).__init__()
        self.weight = nn.Parameter(
            torch.empty((in_features, out_features), dtype=dtype)
        )
        std = np.sqrt(2 / (in_features + out_features))
        torch.nn.init.trunc_normal_(self.weight, 0, std, -3 * std, 3 * std)
        self.to(device)

    def forward(self, x: Tensor) -> Tensor:
        return x @ self.weight


class RMSNorm(nn.Module):
    def __init__(self, d_model: int, eps: float = 1e-5, device=None, dtype=None):
        super(RMSNorm, self).__init__()
        self.weight = nn.Parameter(torch.randn(d_model))
        self.d_model = d_model
        self.eps = eps
        self.to(device)

    def forward(self, x: Tensor) -> Tensor:
        # x:shape ... d_model
        in_dtype = x.dtype
        x = x.to(dtype=torch.float32)  # upcast
        # TODO:
        ms = x.pow(2).mean(dim=-1, keepdim=True) + self.eps
        rms = torch.sqrt(ms)
        result = x / rms * self.weight
        return result.to(in_dtype)


class MySiLU(nn.Module):
    def __init__(self):
        super(MySiLU, self).__init__()

    def forward(self, x: Tensor) -> Tensor:
        return x / (1 + torch.exp(-x))


class SwiGLU(nn.Module):
    def __init__(self, d_model: int, d_ff):
        super(SwiGLU, self).__init__()
        self.d_model = Tensor(d_model)
        self.w1 = MyLinear(d_model, d_ff)
        self.w2 = MyLinear(d_ff, d_model)
        self.w3 = MyLinear(d_model, d_ff)
        self.gate = MySiLU()

    def forward(self, x: Tensor):
        gate = self.gate(self.w1(x))
        up = self.w3(x)
        return self.w2(gate * up)


class MyRoPE(nn.Module):
    def __init__(self, theta: float, d_k: int, max_seq_len: int, device=None):
        super(MyRoPE, self).__init__()
        self.theta = theta
        self.d_k = d_k
        self.max_seq_len = max_seq_len
        # 1 / (theta^{2k/d})
        freqs = 1.0 / (
            theta ** (torch.arange(0, d_k, 2, dtype=torch.float32) / d_k)
        )  # shape (d_k/2, )
        t = torch.arange(max_seq_len, dtype=torch.float32)  # shape (seq, )
        freqs = torch.outer(t, freqs)  # 外积 shape (seq, d_k/2)

        # x_rotated = [x1*cos - x2*sin, x1*sin + x2*cos]
        self.register_buffer("cos", torch.cos(freqs), persistent=False)
        self.register_buffer("sin", torch.sin(freqs), persistent=False)
        if device != None:
            self.to(device)

    def forward(
        self,
        x: Float[Tensor, " ... sequence_length d_k"],
        token_positions: Int[Tensor, " ... sequence_length"],
    ) -> Float[Tensor, " ... sequence_length d_k"]:

        d_k = x.shape[-1]
        assert d_k == self.d_k

        # 取出对应位置的 cos 和 sin
        pos = token_positions.long()
        cos = self.cos[pos]  # type: ignore # (..., seq_len, d_k//2)
        sin = self.sin[pos]  # type: ignore # (..., seq_len, d_k//2)

        x1 = x[..., ::2]
        x2 = x[..., 1::2]

        # x_rotated = [x1*cos - x2*sin, x1*sin + x2*cos]
        rotated = torch.empty_like(x)
        rotated[..., ::2] = x1 * cos - x2 * sin
        rotated[..., 1::2] = x1 * sin + x2 * cos

        return rotated


class MySoftmax(nn.Module):
    def __init__(self, dim: int):
        super(MySoftmax, self).__init__()
        self.dim = dim

    def forward(self, x: Tensor) -> Tensor:
        max_x, _ = x.max(dim=self.dim, keepdim=True)
        x -= max_x
        ex = x.exp()
        sum_ex = ex.sum(dim=self.dim, keepdim=True)
        return ex / sum_ex


class MyScaledDotProductAttention(nn.Module):
    def __init__(self, d_k: int):
        super(MyScaledDotProductAttention, self).__init__()
        self.scale = np.sqrt(d_k)
        self.softmax = MySoftmax(dim=-1)

    def forward(
        self,
        Q: Tensor,
        K: Tensor,
        V: Tensor,
        mask: Tensor | None = None,
    ) -> Tensor:
        """
        Given key (K), query (Q), and value (V) tensors, return
        the output of your scaled dot product attention implementation.

        Args:
            Q (Float[Tensor, " ... queries d_k"]): Query tensor
            K (Float[Tensor, " ... keys d_k"]): Key tensor
            V (Float[Tensor, " ... values d_v"]): Values tensor
            mask (Float[Tensor, " ... queries keys"] | None): Mask tensor
        Returns:
            Float[Tensor, " ... queries d_v"]: Output of SDPA
        """
        d_k = Q.shape[-1]
        scores = torch.einsum("...qd,...kd->...qk", Q, K) / self.scale

        if mask is not None:
            if mask.dtype != torch.bool:
                mask = mask.bool()
            scores = torch.masked_fill(scores, ~mask, -np.inf)

        sm = self.softmax(scores)
        out = torch.einsum("...qk,...kv->...qv", sm, V)
        return out


class CausalMultiHeadSelfAttention(nn.Module):
    def __init__(
        self,
        d_model: int,
        num_heads: int,
        max_seq_len: int,
        rope_theta: float = 10000.0,
        use_rope: bool = True,
        device=None,
        dtype=None,
    ):
        super(CausalMultiHeadSelfAttention, self).__init__()
        self.d_model = d_model
        self.num_heads = num_heads
        self.d_k = d_model // num_heads
        self.d_v = self.d_k
        self.max_seq_len = max_seq_len
        self.use_rope = use_rope
        self.device = device
        self.dtype = dtype
        # ----------------------------
        self.rope = MyRoPE(rope_theta, self.d_k, self.max_seq_len)
        self.q_proj, self.k_proj, self.v_proj, self.o_proj = [
            MyLinear(d_model, d_model) for _ in range(4)
        ]
        self.attn = MyScaledDotProductAttention(d_model // num_heads)
        causal_mask = torch.tril(torch.ones((max_seq_len, max_seq_len), device=device))
        self.register_buffer("causal_mask", causal_mask, persistent=False)
        self.to(device)

    def forward(
        self,
        in_features: Float[Tensor, " ... sequence_length d_in"],
        token_positions: Int[Tensor, " ... sequence_length"] | None = None,
    ) -> Float[Tensor, " ... sequence_length d_out"]:
        """
        Args:
            in_features (Float[Tensor, "... sequence_length d_in"]): Tensor to run your implementation on.
            token_positions (Int[Tensor, "... sequence_length"]): Positions of each token in the sequence.

        Returns:
            Float[Tensor, " ... sequence_length d_out"]: Tensor with the output of running your optimized, batched multi-headed attention
            implementation with the given QKV projection weights and input features.
        """
        seq_len = in_features.shape[-2]
        # -------------------------------
        # 1. 拆分成多个头方便批量计算
        # 1.1 利用投影计算 Q, K, V
        W_q = in_features @ self.q_proj.weight.T  # [..., seq_len, d_model]
        W_k = in_features @ self.k_proj.weight.T
        W_v = in_features @ self.v_proj.weight.T
        # 1.2 更改QKV维度(batch, num_heads, seq_len, head_dim)
        W_q = einops.rearrange(W_q, "... s (h dk) -> ... h s dk", h=self.num_heads)
        W_k = einops.rearrange(W_k, "... s (h dk) -> ... h s dk", h=self.num_heads)
        W_v = einops.rearrange(W_v, "... s (h dv) -> ... h s dv", h=self.num_heads)
        if self.use_rope:
            W_q = self.rope(W_q, token_positions)
            W_k = self.rope(W_k, token_positions)
        # -------------------------------
        # 2. 计算 self-attention
        out = self.attn(
            W_q, W_k, W_v, mask=self.causal_mask[:seq_len, :seq_len].bool()  # type: ignore
        )  # (batch_size, seq_len, num_heads, head_dim)

        # -------------------------------
        # 3. 拼接多个头的结果
        out = einops.rearrange(out, "... h s dv -> ... s (h dv)")
        # -------------------------------
        # 4. 结合输出层
        out = out @ self.o_proj.weight.T
        return out


class TransformerBlock(nn.Module):
    def __init__(
        self,
        d_model: int,
        num_heads: int,
        d_ff: int,
        max_seq_len: int,
        theta: float = 10000.0,
        device=None,
        dtype=None,
    ):
        """The pre-norm Transformer block"""
        super(TransformerBlock, self).__init__()
        self.num_heads = num_heads
        self.d_model = d_model
        self.d_ff = d_ff
        self.max_seq_len = max_seq_len
        self.theta = theta
        self.device = device
        self.dtype = dtype
        # ----------------------------
        self.attn = CausalMultiHeadSelfAttention(
            d_model,
            num_heads,
            max_seq_len=max_seq_len,
            rope_theta=theta,
            use_rope=True,
            device=device,
            dtype=dtype,
        )
        self.ffn = SwiGLU(d_model, d_ff)
        self.ln1 = RMSNorm(d_model)
        self.ln2 = RMSNorm(d_model)
        self.to(device)

    def forward(
        self,
        x: Float[Tensor, " batch sequence_length d_model"],
        token_positions: Int[Tensor, " batch sequence_length"] | None = None,
    ) -> Float[Tensor, " batch sequence_length d_model"]:
        y = x + self.attn(self.ln1(x), token_positions)
        result = y + self.ffn(self.ln2(y))

        return result

    # TransformerLM(vocab_size, context_length=context_length, d_model=d_model, num_layers=num_layers, num_heads=num_heads, d_ff=d_ff, rope_theta=rope_theta)


class TransformerLM(nn.Module):
    def __init__(
        self,
        vocab_size: int,
        context_length: int,
        d_model: int,
        num_layers: int,
        num_heads: int,
        d_ff: int,
        rope_theta: float = 10000.0,
        device=None,
        dtype=None,
    ):
        super(TransformerLM, self).__init__()
        self.vocab_size = vocab_size
        self.content_length = context_length
        self.d_model = d_model
        self.num_layers = num_layers
        self.num_heads = num_heads
        self.d_ff = d_ff
        self.rope_theta = rope_theta
        self.device = device
        self.dtype = dtype
        
        self.token_embeddings = MyEmbedding(vocab_size, d_model, device=device, dtype=dtype)
        self.layers = nn.ModuleList(
            [
                TransformerBlock(
                    d_model, num_heads, d_ff, context_length, rope_theta, device=device, dtype=dtype
                )
                for _ in range(num_layers)
            ]
        )
        self.ln_final = RMSNorm(d_model)
        self.lm_head = MyLinear(d_model, vocab_size, device=device, dtype=dtype)
        self.to(device)

    def forward(self, x: Tensor) -> Tensor:
        batch, seq_len = x.shape[:2]
        x = self.token_embeddings(x)
        pos = torch.arange(seq_len, device=x.device).expand(batch, seq_len)
        for layer in self.layers:
            x = layer(x, pos)
        x = self.ln_final(x)
        # x = x.mean(dim=-2)
        logits = self.lm_head(x)
        return logits