import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import Optional, Tuple


class PagedAttention(nn.Module):
    """PagedAttention实现"""

    def __init__(self, num_heads: int, head_dim: int, block_size: int = 16):
        super().__init__()
        self.num_heads = num_heads
        self.head_dim = head_dim
        self.block_size = block_size

    def forward(
            self,
            query: torch.Tensor,
            key_cache: torch.Tensor,
            value_cache: torch.Tensor,
            block_tables: torch.Tensor,
            seq_lens: torch.Tensor,
    ) -> torch.Tensor:
        """
        Args:
            query: [num_tokens, num_heads, head_dim]
            key_cache: [num_blocks, block_size, num_heads, head_dim]
            value_cache: [num_blocks, block_size, num_heads, head_dim]
            block_tables: [num_seqs, max_num_blocks]
            seq_lens: [num_seqs]
        """
        batch_size, max_num_blocks = block_tables.shape

        # 重塑query以便广播
        query = query.unsqueeze(0)  # [1, num_tokens, num_heads, head_dim]

        # 准备KV缓存
        key_cache, value_cache = self._prepare_kv_cache(
            key_cache, value_cache, block_tables, seq_lens
        )

        # 计算注意力分数
        attn_scores = torch.matmul(query, key_cache.transpose(-2, -1))
        attn_scores = attn_scores / (self.head_dim ** 0.5)

        # 应用注意力掩码
        attn_mask = self._create_attention_mask(seq_lens, query.shape[1])
        attn_scores = attn_scores.masked_fill(attn_mask == 0, -1e9)

        # 计算注意力权重
        attn_weights = F.softmax(attn_scores, dim=-1)

        # 计算输出
        output = torch.matmul(attn_weights, value_cache)
        output = output.squeeze(0)  # [num_tokens, num_heads, head_dim]

        return output

    def _prepare_kv_cache(
            self,
            key_cache: torch.Tensor,
            value_cache: torch.Tensor,
            block_tables: torch.Tensor,
            seq_lens: torch.Tensor,
    ) -> Tuple[torch.Tensor, torch.Tensor]:
        """准备KV缓存"""
        batch_size, max_num_blocks = block_tables.shape
        num_blocks, block_size, num_heads, head_dim = key_cache.shape

        # 根据块表收集KV缓存
        key_cache_gathered = key_cache[block_tables]  # [batch_size, max_num_blocks, block_size, num_heads, head_dim]
        value_cache_gathered = value_cache[block_tables]

        # 重塑为 [batch_size, max_seq_len, num_heads, head_dim]
        max_seq_len = max_num_blocks * block_size
        key_cache_reshaped = key_cache_gathered.view(
            batch_size, max_seq_len, num_heads, head_dim
        )
        value_cache_reshaped = value_cache_gathered.view(
            batch_size, max_seq_len, num_heads, head_dim
        )

        return key_cache_reshaped, value_cache_reshaped

    def _create_attention_mask(self, seq_lens: torch.Tensor, query_len: int) -> torch.Tensor:
        """创建注意力掩码"""
        batch_size = seq_lens.shape[0]
        max_seq_len = seq_lens.max().item()

        # 创建因果掩码
        causal_mask = torch.tril(torch.ones(max_seq_len, query_len))

        # 创建序列长度掩码
        seq_mask = torch.arange(max_seq_len).expand(batch_size, max_seq_len)
        seq_mask = seq_mask < seq_lens.unsqueeze(1)

        # 组合掩码
        mask = causal_mask & seq_mask.unsqueeze(-1)

        return mask.float()