import torch
import torch.nn as nn
import torch.nn.functional as F

from args import ModelArgs
from moe import MoE
from mla import MLA
from utils import RMSNorm, RotaryPositionEmbedding


class Block(nn.Module):
    def __init__(self, layer_id: int, args: ModelArgs):
        super().__init__()
        self.attn = MLA(args)
        self.ffn = MoE(args)
        self.attn_norm = RMSNorm(args.dim)
        self.ffn_norm = RMSNorm(args.dim)

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        x = x + self.attn(self.attn_norm(x))
        x = x + self.ffn(self.ffn_norm(x))
        return x
    

class Model(nn.Module):
    def __init__(self, args: ModelArgs):
        super().__init__()
        self.embed = nn.Embedding(args.vocab_size, args.dim)
        self.layers = torch.nn.ModuleList()
        for layer_id in range(args.n_layers):
            self.layers.append(Block(layer_id, args))
        self.norm = RMSNorm(args.dim)
        self.head = nn.Linear(args.dim, args.vocab_size)

    def forward(self, tokens: torch.Tensor) -> torch.Tensor:
        """
            x: (batch_size, seq_len)
        """
        # 词嵌入
        x = self.embed(tokens) # (batch_size, seq_len, dim)

        # 遍历所有层次！
        for layer in self.layers:
            x = layer(x) # (batch_size, seq_len, dim)
        x = self.norm(x) # (batch_size, seq_len, dim)

        # 取出最后一个 seq
        y = x[:, -1, :] # (batch_size, dim)
        
        # 计算 logits
        logits = self.head(y) # (batch_size, vocab_size)

        return logits
        

if __name__ == '__main__':
    args = ModelArgs()
    model = Model(args)

    x = torch.zeros(2, 10).to(dtype=torch.long)
    output = model(x)

    print(output.shape)