import torch
import torch.nn as nn
import torch.nn.functional as F

from args import ModelArgs
from utils import RMSNorm, RotaryPositionEmbedding


class MLA(nn.Module):
    """
    Multi-Head Latent Attention (MLA) Layer.
    """
    def __init__(self, args: ModelArgs):
        super().__init__()
        self.dim = args.dim
        self.n_heads = args.n_heads
        self.q_lora_rank = args.q_lora_rank
        self.kv_lora_rank = args.kv_lora_rank
        self.qk_nope_head_dim = args.qk_nope_head_dim
        self.qk_rope_head_dim = args.qk_rope_head_dim
        self.qk_head_dim = self.qk_nope_head_dim + self.qk_rope_head_dim
        self.v_head_dim = args.v_head_dim

        # For Q
        self.w_dq = nn.Linear(self.dim, self.q_lora_rank)
        self.norm_q = RMSNorm(self.q_lora_rank)
        self.w_uq_nope = nn.Linear(self.q_lora_rank, self.n_heads * self.qk_nope_head_dim)
        self.w_uq_rope = nn.Linear(self.q_lora_rank, self.n_heads * self.qk_rope_head_dim)
        self.rope_emb_q = RotaryPositionEmbedding(self.qk_rope_head_dim, max_len=ModelArgs.max_seq_len)

        # KV compressed
        self.w_dkv = nn.Linear(self.dim, self.kv_lora_rank)
        self.norm_kv = RMSNorm(self.kv_lora_rank)

        # For K
        self.w_uk_nope = nn.Linear(self.kv_lora_rank, self.n_heads * self.qk_nope_head_dim)    
        self.w_uk_rope = nn.Linear(self.kv_lora_rank, self.qk_rope_head_dim)
        self.rope_emb_k = RotaryPositionEmbedding(self.qk_rope_head_dim, max_len=ModelArgs.max_seq_len)

        # For V
        self.w_uv = nn.Linear(self.kv_lora_rank, self.n_heads * self.v_head_dim)

        # Output 
        self.w_o = nn.Linear(self.n_heads * self.v_head_dim, self.dim)

        self.softmax_scale = self.qk_head_dim ** -0.5

    def forward(self, x: torch.Tensor):
        batch_size, seq_len, _ = x.shape

        # For Q
        c_q = self.w_dq(x) # (bs, sl, q_lora_rank)
        c_q = self.norm_q(c_q)
        
        q_nope = self.w_uq_nope(c_q) # (bs, sl, n_heads * qk_nope_head_dim)
        q_nope = q_nope.view((batch_size, seq_len, self.n_heads, -1)) # (bs, sl, n_heads, qk_nope_head_dim)
        q_nope = q_nope.transpose(1, 2) # (bs, n_heads, ql, qk_nope_head_dim)
        
        q_rope = self.w_uq_rope(c_q) # (bs, sl, n_heads * qk_rope_head_dim)
        q_rope = q_rope.view((batch_size, seq_len, self.n_heads, -1)) # (bs, sl, n_heads, qk_rope_head_dim)
        q_rope = q_rope.transpose(1, 2) # (bs, n_heads, sl, qk_rope_head_dim)
        q_rope = self.rope_emb_q(q_rope)

        q = torch.cat([q_nope, q_rope], dim=-1) # (bs, n_heads, sl, qk_nope_head_dim + qk_rope_head_dim)

        # For KV compressed
        c_kv = self.w_dkv(x) # (bs, sl, kv_lora_rank)
        c_kv = self.norm_kv(c_kv) # (bs, sl, kv_lora_rank)
        
        # For K
        k_nope = self.w_uk_nope(c_kv) # (bs, sl, n_heads * qk_nope_head_dim)
        k_nope = k_nope.view((batch_size, seq_len, self.n_heads, -1)) # (bs, sl, n_heads, qk_nope_head_dim)
        k_nope = k_nope.transpose(1, 2) # (bs, n_heads, sl, qk_nope_head_dim)

        k_rope = self.w_uk_rope(c_kv) # (bs, sl, qk_rope_head_dim)
        k_rope = k_rope.unsqueeze(1) # (bs, 1, sl, qk_rope_head_dim)
        k_rope = self.rope_emb_k(k_rope) # (bs, 1, sl, qk_rope_head_dim)
        k_rope = k_rope.repeat((1, self.n_heads, 1, 1)) # (bs, n_heads, sl, qk_rope_head_dim)

        k = torch.cat([k_nope, k_rope], dim=-1) # (bs, n_heads, sl, qk_nope_head_dim + qk_rope_head_dim)

        # For V
        v = self.w_uv(c_kv) # (bs, sl, n_heads * v_head_dim)
        v = v.view((batch_size, seq_len, self.n_heads, -1)) # (bs, sl, n_heads, v_head_dim)
        v = v.transpose(1, 2) # (bs, n_heads, sl, v_head_dim)

        """
            q: (bs, n_heads, sl, qk_nope_head_dim + qk_rope_head_dim)
            k: (bs, n_heads, sl, qk_nope_head_dim + qk_rope_head_dim)
            v: (bs, n_heads, sl, v_head_dim)
        """
        # MHA
        attn_scores = q @ k.transpose(-1, -2) # (bs, n_heads, sl, sl)
        attn_scores = attn_scores / self.softmax_scale # (bs, n_heads, sl, sl)

        mask = torch.triu(torch.ones(seq_len, seq_len), diagonal=1).bool()
        attn_scores.masked_fill_(mask, -torch.inf)

        attn_weight = F.softmax(attn_scores, dim=-1) # (bs, n_heads, sl, sl)

        context_vec = attn_weight @ v # (bs, n_heads, sl, v_head_dim)
        context_vec = context_vec.transpose(1, 2) # (bs, sl, n_heads, v_head_dim)
        context_vec = context_vec.contiguous().view((batch_size, seq_len, self.n_heads * self.v_head_dim)) # (bs, sl, n_heads * v_head_dim)

        o = self.w_o(context_vec) # (bs, sl, dim)

        return o


if __name__ == '__main__':
    args = ModelArgs()
    mla = MLA(args)

    x = torch.randn(2, 10, args.dim)
    output = mla(x)

    print(output.shape)