import torch
from torch import nn
from typing import Optional


def apply_rope(x: torch.Tensor, *args, **kwargs):
    return x


def update_compressed_kv_cache(compressed_kv: torch.Tensor):
    return compressed_kv.repeat(1, 5, 1)

def repeat_kv(hidden_states: torch.Tensor, n_rep: Optional[int] = 1):
    batch, num_key_value_heads, slen, head_dim = hidden_states.shape
    if n_rep == 1:
        return hidden_states
    hidden_states = hidden_states[:, :, None, :, :].expand(
        batch, num_key_value_heads, n_rep, slen, head_dim
    )
    return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)

class MultiHeadLatentAttention(nn.Module):
    def __init__(
        self,
        hidden_size: int,
        num_heads: int,
        q_lora_rank: int,
        qk_rope_head_dim: int,
        qk_nope_head_dim: int,
        kv_lora_rank: int,
        use_cache: Optional[bool] = False,
    ):
        super().__init__()
        qk_head_dim = qk_nope_head_dim + qk_rope_head_dim
        self.hidden_size = hidden_size
        self.num_heads = num_heads
        self.q_lora_rank = q_lora_rank
        self.qk_rope_head_dim = qk_rope_head_dim
        self.qk_nope_head_dim = qk_nope_head_dim
        self.kv_lora_rank = kv_lora_rank
        self.qk_head_dim = qk_head_dim
        self.scale = self.qk_head_dim**-0.5
        self.use_cache = use_cache

        self.q_a_proj = nn.Linear(hidden_size, q_lora_rank)
        # Diff 1: qk_head_dim -> qk_rope_head_dim + kv_lora_rank
        self.q_b_proj = nn.Linear(
            q_lora_rank, num_heads * (qk_rope_head_dim + kv_lora_rank)
        )

        self.kv_a_proj = nn.Linear(hidden_size, qk_rope_head_dim + kv_lora_rank)
        # Diff 2: No kv_b_proj

        # Diff 3: v_head_dim -> kv_lora_rank
        self.o_proj = nn.Linear(num_heads * kv_lora_rank, hidden_size)

    def forward(
        self,
        hidden_states: torch.Tensor,
        position_ids: Optional[torch.LongTensor] = None,
    ):
        bsz, q_len, _ = hidden_states.size()

        query_states = self.q_b_proj(self.q_a_proj(hidden_states))
        query_states = query_states.view(
            bsz, q_len, self.num_heads, self.qk_rope_head_dim + self.kv_lora_rank
        ).transpose(1, 2)

        # q_rope: (bsz, num_heads, q_len, qk_rope_head_dim)
        # q_nope: (bsz, num_heads, q_len, kv_lora_rank)
        q_rope, q_nope = query_states.split(
            [self.qk_rope_head_dim, self.kv_lora_rank], dim=-1
        )
        q_rope = apply_rope(q_rope)
        # query_states: (bsz, num_heads, q_len, qk_rope_head_dim + kv_lora_rank)
        query_states = torch.cat([q_rope, q_nope], dim=-1)

        # compressed_kv: (bsz, q_len, qk_rope_head_dim + kv_lora_rank)
        compressed_kv = self.kv_a_proj(hidden_states)
        k_rope, kv_nope = compressed_kv.split(
            [self.qk_rope_head_dim, self.kv_lora_rank], dim=-1
        )
        # k_rope: (bsz, num_heads, q_len, qk_rope_head_dim)
        k_rope = repeat_kv(k_rope.unsqueeze(1), self.num_heads)
        k_rope = apply_rope(k_rope)
        compressed_kv = torch.cat([k_rope[:, 0, :, :], kv_nope], dim=-1)

        if self.use_cache:
            compressed_kv = update_compressed_kv_cache(compressed_kv)
            k_rope, kv_nope = compressed_kv.split(
                [self.qk_rope_head_dim, self.kv_lora_rank], dim=-1
            )
        kv_len = compressed_kv.shape[1]

        attn_weights = (
            torch.einsum("bhlD, bnD -> bhln", query_states, compressed_kv) * self.scale
        )
        attn_weights = attn_weights.softmax(dim=-1)

        # (bsz, num_heads, q_len, kv_lora_rank) -> (bsz, q_len, num_heads * kv_lora_rank)
        attn_weights = (
            torch.einsum("bhln, bnr -> bhlr", attn_weights, kv_nope)
            .transpose(1, 2)
            .contiguous()
            .view(bsz, q_len, self.num_heads * self.kv_lora_rank)
        )

        # (bsz, q_len, hidden_size)
        attn_output = self.o_proj(attn_weights)
        return attn_output
