import math
from dataclasses import dataclass
import torch
import torch.nn as nn
from deepseek_rope import DeepseekV2RMSNorm,DeepseekV2RotaryEmbedding,apply_rotary_pos_emb
import torch.nn.functional as F

@dataclass
class DeepseekConfig:
    hidden_size: int
    num_heads: int
    max_position_embeddings: int # 这是rope相关的参数
    rope_theta: float # 频率，一般设置的比较大

    attention_dropout: float

    q_lora_rank: int # latent的shape，一般设置比较大一点，1000多，具体是1536
    qk_rope_head_dim: int # 64
    kv_lora_rank: int # 公式41，可能是 512

    v_head_dim: int # 128
    qk_nope_head_dim: int
    attention_bias: bool

class MLA(nn.Module):
    def __init__(self, config:DeepseekConfig):
        super().__init__()
        # 三个部分
        '''part1，mha部分'''
        self.attention_dropout = config.attention_dropout
        self.hidden_size = config.hidden_size
        self.num_heads = config.num_heads

        # 对应 每一个 Head 的维度大小
        self.v_head_dim = config.v_head_dim

        self.out_proj = nn.Linear(
            self.num_heads*self.v_head_dim,
            self.hidden_size,
            bias=False, # 可以加，也可以不加
        )

        '''part2，mla压缩部分（最重要）'''
        # down 压缩
        self.qk_nope_head_dim = config.qk_nope_head_dim
        self.qk_rope_head_dim = config.qk_rope_head_dim

        self.q_lora_rank = config.q_lora_rank
        # 一般会从7168 -> 1536
        self.kv_lora_rank = config.kv_lora_rank
        # 包含两个部分
        self.q_down_proj = nn.Linear(
            self.hidden_size,
            self.q_lora_rank,
            bias=config.attention_bias
        )
        self.q_down_norm = DeepseekV2RMSNorm(self.q_lora_rank)

        self.kv_down_proj = nn.Linear(
            self.hidden_size,
            self.kv_lora_rank + config.qk_rope_head_dim,
            bias=config.attention_bias
        )# qk_rope_head_dim 一般设置的很小，一般是 64
        self.kv_down_norm = DeepseekV2RMSNorm(self.kv_lora_rank)
        # down 之后包含了两个部分，要做split，一部分用于rope，一部分没有rope
        # 升维
        # q 和 k 的shape 是一样的
        self.q_head_dim = config.qk_nope_head_dim + config.qk_rope_head_dim
        self.q_up_proj = nn.Linear(
            self.q_lora_rank,
            self.num_heads * self.q_head_dim,
            bias=config.attention_bias
        )# 这里也要split，一部分nope，一部分rope
        self.kv_up_proj = nn.Linear(
            self.kv_lora_rank,
            self.num_heads * (
                self.q_head_dim - self.qk_rope_head_dim + self.v_head_dim
            ),# self.q_head_dim - config.qk_rope_head_dim = nope_shape
            bias=config.attention_bias
        )

        '''part3：rope位置编码部分部分'''
        self.rotary_emb = DeepseekV2RotaryEmbedding(
            config.qk_rope_head_dim,
            config.max_position_embeddings,
            config.rope_theta
        )

    def forward(
            self,
            hidden_states: torch.Tensor,
            attention_mask: None,
            position_ids: None,
    ):
        # hidden_states(b, seq_len, hidden_dim)
        bsz, q_len, _ = hidden_states.size()

        # 1、compression
        q = self.q_down_proj(
            hidden_states
        )# (b, seq_len, self.q_lora_rank)

        q = self.q_down_norm(q)
        q = self.q_up_proj(q)

        # q shape is (b, seq_len, self.num_heads * self.q_head_dim)
        q = q.view(
            bsz, q_len, self.num_heads, self.q_head_dim
        ).transpose(1, 2)
        # (b, num_head, seq_len, q_head_dim)

        q_nope, q_rope = torch.split(
            q,
            [self.qk_nope_head_dim, self.qk_rope_head_dim],
            dim=-1
        )

        # kv part
        compressive_kv = self.kv_down_proj(
            hidden_states
        )
        compressive_kv, k_rope = torch.split(
            compressive_kv,
            [self.kv_lora_rank, self.qk_rope_head_dim], # self.kv_lora_rank + config.qk_rope_head_dim
            dim=-1
        )
        # kv_rope 的 shape 是 (b, seq_len, self.qk_rope_head_dim)
        # compressive_kv 的 shape 是 (b, seq_len, self.kv_lora_rank)

        k_rope = k_rope.view(
            bsz, q_len, 1, self.qk_rope_head_dim
        ).transpose(1, 2) # (b, 1, q_len, self.qk_rope_head_dim)

        kv = (
            self.kv_up_proj(self.kv_down_norm(compressive_kv))
            .view(bsz, q_len, self.num_heads, self.qk_nope_head_dim + self.v_head_dim)
            .transpose(1, 2)
        )

        k_nope, value_states = torch.split(
            kv,
            [self.qk_nope_head_dim, self.v_head_dim],
            dim=-1
        )
        # k_nope 的 shape is (b, nums_head, seq_len, self.qk_nope_head_dim)

        ''' apply 位置编码 rope'''
        kv_seq_len = value_states.shape[-2]
        # （b, nums_head, seq_len, v_head_dim）

        # 应用这个函数之后这部分张量（Tensor）就带有了位置编码
        cos, sin = self.rotary_emb(
            value_states, seq_len=kv_seq_len
        )

        q_rope, k_rope = apply_rotary_pos_emb(
            q_rope, k_rope, cos, sin ,position_ids
        )

        '''MHA 多头注意力'''
        query_states = torch.empty(
            bsz, self.num_heads, q_len, self.q_head_dim,
            device=k_rope.device
        )
        query_states[:, :, :, :self.qk_nope_head_dim] = q_nope
        query_states[:, :, :, self.qk_nope_head_dim:] = q_rope
        print(query_states.shape)
        # shape is (b, nums_head, q_len, self.qk_nope_head_dim + self.qk_rope_head_dim)

        key_states = torch.empty(
            bsz, self.num_heads, q_len, self.q_head_dim,
            device=k_rope.device
        )
        key_states[:, :, :, :self.qk_nope_head_dim] = k_nope
        key_states[:, :, :, self.qk_nope_head_dim:] = k_rope
        print(key_states.shape)
        # shape is (b, nums_head, q_len, self.qk_nope_head_dim + self.qk_rope_head_dim)

        # mha
        attn_weights = torch.matmul(query_states, key_states.transpose(2, 3))
        attn_weights = attn_weights / math.sqrt(self.q_head_dim)

        if attention_mask is not None:
            attn_weights = torch.masked_fill(
                attn_weights,
                attention_mask == 0,
                float("-inf"),
            )

        # 6. Softmax and dropout
        attn_weights = F.softmax(
            attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
        attn_weights = F.dropout(
            attn_weights, p=self.attention_dropout, training=self.training)

        # 7. Compute attention output
        # attn_weights (b,nums_head,q_len,q_len)
        # value_states (b,nums_head,q_len,v_head_dim)
        attn_output = torch.matmul(attn_weights, value_states)
        attn_output = attn_output.transpose(1, 2).reshape(bsz, q_len, -1)
        # attn_output (b,q_len,nums_head * v_head_dim)
        attn_output = self.out_proj(attn_output)

        return attn_output, attn_weights


# 写一个测试函数
def test_mla():
    config = DeepseekConfig(
        hidden_size=7168,
        num_heads=16,
        max_position_embeddings=1024,
        rope_theta=128000,
        attention_dropout=0.1,
        q_lora_rank=1536,
        qk_rope_head_dim=64,
        kv_lora_rank=512,

        v_head_dim=128,
        qk_nope_head_dim=128,
        attention_bias=False,
    )

    mla = MLA(config)
    x = torch.randn(2, 1024, 7168)
    position_ids = torch.arange(
        config.max_position_embeddings,
    ).unsqueeze(0).expand(
        x.size(0), -1
    )
    attn_output, attn_weights = mla(x, position_ids=position_ids, attention_mask=None)
    print(attn_output.shape)
    print(attn_weights.shape)


test_mla()