# Here is the Multi-head Latent Attention in DeepSeek V2
import torch
import torch.nn as nn
from typing import Optional, Tuple

from DeepSeekV2_Lite_Origin.configuration_deepseek import DeepseekV2Config
from RMSNorm import RMSNorm as DeepseekV2RMSNorm
from decoupled_ROPE_DeepSeekV2 import DeepseekV2RotaryEmbedding,DeepseekV2LinearScalingRotaryEmbedding,apply_rotary_pos_emb
from transformers.cache_utils import Cache

class DeepseekV2Attention(nn.Module):
    """Multi-headed attention from 'Attention Is All You Need' paper"""

    def __init__(self, config: DeepseekV2Config, layer_idx: Optional[int] = None):
        super().__init__()
        self.config = config
        self.layer_idx = layer_idx
        if layer_idx is None:
            print(
                f"Warning: Instantiating {self.__class__.__name__} without passing `layer_idx` is not recommended and will "
                "to errors during the forward call, if caching is used. Please make sure to provide a `layer_idx` "
                "when creating this class."
            )

        self.attention_dropout = config.attention_dropout   # 注意力权重Dropout，参数值：0.1
        self.hidden_size = config.hidden_size               # 输入的隐藏状态维度，参数值：4096
        self.num_heads = config.num_attention_heads         # 多头数量，参数值：32

        self.max_position_embeddings = config.max_position_embeddings  # 最大序列嵌入长度，参数值：2048
        self.rope_theta = config.rope_theta                 # RoPE的theta，参数值：10000
        self.q_lora_rank = config.q_lora_rank               # Query第一次低秩压缩的中间隐藏维度，参数值：1536
        self.qk_rope_head_dim = config.qk_rope_head_dim     # 每个头的位置部分Query/Key的维度，参数值：64
        self.kv_lora_rank = config.kv_lora_rank             # Key-Value第一次低秩压缩的中间隐藏维度，参数值：512
        self.v_head_dim = config.v_head_dim                 # 每个头Value的维度，参数值：128
        self.qk_nope_head_dim = config.qk_nope_head_dim     # 每个头的非位置部分Query/Key的维度，参数值：128
        self.q_head_dim = config.qk_nope_head_dim + config.qk_rope_head_dim  # 每个头Query的维度，参数值：128+64=192

        self.is_causal = True

        if self.q_lora_rank is None:
            self.q_proj = nn.Linear(                       # 传统构造Query的投影矩阵 W^{q}
                self.hidden_size, self.num_heads * self.q_head_dim, bias=False
            )
        else:
            self.q_a_proj = nn.Linear(                     # 构造Query第一次低秩压缩的矩阵 W^{DQ}
                self.hidden_size, config.q_lora_rank, bias=config.attention_bias
            )
            self.q_a_layernorm = DeepseekV2RMSNorm(config.q_lora_rank)
            self.q_b_proj = nn.Linear(                     # 构造Query第二次低秩压缩的矩阵，包括非位置的 W^{UQ}和含位置的W^{QR}
                config.q_lora_rank, self.num_heads * self.q_head_dim, bias=False
            )

        self.kv_a_proj_with_mqa = nn.Linear(               # 构造Key，Value第一次低秩压缩的矩阵，包括 W^{DKV} 和含位置Key的矩阵 W^{KR}
            self.hidden_size,
            config.kv_lora_rank + config.qk_rope_head_dim,    # W^{kva_mqa}:[512+64,4096]
            bias=config.attention_bias,
        )
        self.kv_a_layernorm = DeepseekV2RMSNorm(config.kv_lora_rank)
        self.kv_b_proj = nn.Linear(                        # 构造Key，Value第二次低秩压缩的矩阵，包括 W^{UK}和W^{UV}
            config.kv_lora_rank,
            self.num_heads * (self.q_head_dim - self.qk_rope_head_dim + self.v_head_dim),
            bias=False,
        )

        self.o_proj = nn.Linear(
            self.num_heads * self.v_head_dim,              # 输出投影矩阵 W^{O}
            self.hidden_size,
            bias=config.attention_bias,
        )
        self._init_rope()

        self.softmax_scale = self.q_head_dim ** (-0.5)     # 计算注意力权重的分母
        # if self.config.rope_scaling is not None:
        #     mscale_all_dim = self.config.rope_scaling.get("mscale_all_dim", 0)
        #     scaling_factor = self.config.rope_scaling["factor"]
            # if mscale_all_dim:
            #     mscale = yarn_get_mscale(scaling_factor, mscale_all_dim)
            #     self.softmax_scale = self.softmax_scale * mscale * mscale

    def _init_rope(self):
        if self.config.rope_scaling is None:
            self.rotary_emb = DeepseekV2RotaryEmbedding(   # RoPE位置编码器
                self.qk_rope_head_dim,
                max_position_embeddings=self.max_position_embeddings,
                base=self.rope_theta,
            )
        else:
            scaling_type = self.config.rope_scaling["type"]
            scaling_factor = self.config.rope_scaling["factor"]
            if scaling_type == "linear":
                self.rotary_emb = DeepseekV2LinearScalingRotaryEmbedding(
                    self.qk_rope_head_dim,
                    max_position_embeddings=self.max_position_embeddings,
                    scaling_factor=scaling_factor,
                    base=self.rope_theta,
                )
            # elif scaling_type == "dynamic":
            #     self.rotary_emb = DeepseekV2DynamicNTKScalingRotaryEmbedding(
            #         self.qk_rope_head_dim,
            #         max_position_embeddings=self.max_position_embeddings,
            #         scaling_factor=scaling_factor,
            #         base=self.rope_theta,
            #     )
            # elif scaling_type == "yarn":
            #     kwargs = {
            #         key: self.config.rope_scaling[key]
            #         for key in [
            #             "original_max_position_embeddings",
            #             "beta_fast",
            #             "beta_slow",
            #             "mscale",
            #             "mscale_all_dim",
            #         ]
            #         if key in self.config.rope_scaling
            #     }
            #     self.rotary_emb = DeepseekV2YarnRotaryEmbedding(
            #         self.qk_rope_head_dim,
            #         max_position_embeddings=self.max_position_embeddings,
            #         scaling_factor=scaling_factor,
            #         base=self.rope_theta,
            #         **kwargs,
            #     )
            else:
                raise ValueError(f"Unknown RoPE scaling type {scaling_type}")

    # def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
    #     return (
    #         tensor.view(bsz, seq_len, self.num_heads, self.v_head_dim)
    #         .transpose(1, 2)
    #         .contiguous()
    #     )

    def forward(
        self,
        hidden_states: torch.Tensor,
        attention_mask: Optional[torch.Tensor] = None,
        position_ids: Optional[torch.LongTensor] = None,
        past_key_value: Optional[Cache] = None,
        output_attentions: bool = False,
        use_cache: bool = False,
        **kwargs,
    ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
        if "padding_mask" in kwargs:
            print(
                "Warning: Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`"
            )
        bsz, q_len, _ = hidden_states.size()

        if self.q_lora_rank is None:
            q = self.q_proj(hidden_states)
        else:
            q = self.q_b_proj(self.q_a_layernorm(self.q_a_proj(hidden_states)))
        q = q.view(bsz, q_len, self.num_heads, self.q_head_dim).transpose(1, 2)
        q_nope, q_pe = torch.split(
            q, [self.qk_nope_head_dim, self.qk_rope_head_dim], dim=-1
        )

        compressed_kv = self.kv_a_proj_with_mqa(hidden_states)
        compressed_kv, k_pe = torch.split(
            compressed_kv, [self.kv_lora_rank, self.qk_rope_head_dim], dim=-1
        )
        k_pe = k_pe.view(bsz, q_len, 1, self.qk_rope_head_dim).transpose(1, 2)
        kv = (
            self.kv_b_proj(self.kv_a_layernorm(compressed_kv))
            .view(bsz, q_len, self.num_heads, self.qk_nope_head_dim + self.v_head_dim)
            .transpose(1, 2)
        )

        k_nope, value_states = torch.split(
            kv, [self.qk_nope_head_dim, self.v_head_dim], dim=-1
        )
        kv_seq_len = value_states.shape[-2]
        if past_key_value is not None:
            if self.layer_idx is None:
                raise ValueError(
                    f"The cache structure has changed since version v4.36. If you are using {self.__class__.__name__} "
                    "for auto-regressive decoding with k/v caching, please make sure to initialize the attention class "
                    "with a layer index."
                )
            kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
        cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)

        q_pe, k_pe = apply_rotary_pos_emb(q_pe, k_pe, cos, sin, position_ids)

        query_states = k_pe.new_empty(bsz, self.num_heads, q_len, self.q_head_dim)
        query_states[:, :, :, : self.qk_nope_head_dim] = q_nope
        query_states[:, :, :, self.qk_nope_head_dim :] = q_pe

        key_states = k_pe.new_empty(bsz, self.num_heads, q_len, self.q_head_dim)
        key_states[:, :, :, : self.qk_nope_head_dim] = k_nope
        key_states[:, :, :, self.qk_nope_head_dim :] = k_pe
        if past_key_value is not None:
            cache_kwargs = {"sin": sin, "cos": cos}  # Specific to RoPE models
            key_states, value_states = past_key_value.update(
                key_states, value_states, self.layer_idx, cache_kwargs
            )

        attn_weights = (
            torch.matmul(query_states, key_states.transpose(2, 3)) * self.softmax_scale
        )

        if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len):
            raise ValueError(
                f"Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is"
                f" {attn_weights.size()}"
            )
        assert attention_mask is not None
        if attention_mask is not None:
            if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
                raise ValueError(
                    f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}"
                )
            attn_weights = attn_weights + attention_mask

        # upcast attention to fp32
        attn_weights = nn.functional.softmax(
            attn_weights, dim=-1, dtype=torch.float32
        ).to(query_states.dtype)
        attn_weights = nn.functional.dropout(
            attn_weights, p=self.attention_dropout, training=self.training
        )
        attn_output = torch.matmul(attn_weights, value_states)

        if attn_output.size() != (bsz, self.num_heads, q_len, self.v_head_dim):
            raise ValueError(
                f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.v_head_dim)}, but is"
                f" {attn_output.size()}"
            )

        attn_output = attn_output.transpose(1, 2).contiguous()

        attn_output = attn_output.reshape(bsz, q_len, self.num_heads * self.v_head_dim)

        attn_output = self.o_proj(attn_output)

        if not output_attentions:
            attn_weights = None

        return attn_output, attn_weights, past_key_value

if __name__ == '__main__':
    torch.manual_seed(123)
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    config = DeepseekV2Config()
    model = DeepseekV2Attention(config).to(device)
    batch_size = 1
    seq_len = 5
    embedding_dim = config.hidden_size
    hidden_states = torch.randn(1,seq_len, embedding_dim).to(device)
    q_len = seq_len
    kv_seq_len = seq_len
    attention_mask = torch.tril(torch.ones(q_len, kv_seq_len)).unsqueeze(0).unsqueeze(0).expand(batch_size, 1, q_len, kv_seq_len).to(device)
    attn_output, attn_weights, past_key_value = model(hidden_states,attention_mask=attention_mask,output_attentions=True)
    print("attn_weights shape: ", attn_weights.shape)
    print("attn_output shape: ",attn_output.shape)
    print(attn_output)
