#!/usr/bin/env python
# coding: utf-8

# In[ ]:


import torch
from torch import nn
import math
from typing import Optional, List


# GQA(group query attention)的核心是在多个注意力头中共享key value矩阵，从而减少key value矩阵的计算量。
# 
# 具体上，GQA将多个heads分成一组，一组内只计算一次key value矩阵，然后将结果广播到其他head中。当group_size=heads时，GQA被压缩成MQA，即所有heads共享一个key value矩阵。

# In[ ]:


from math import e
import re
from numpy import repeat


class GroupQueryAttention(nn.Module):
    def __init__(self, heads: int, d_model: int, group_size: int = 1, dropout_prob: float = 0.1, bias: bool = True):
        super(GroupQueryAttention, self).__init__()
        assert d_model % heads == 0
        assert heads % group_size == 0

        self.heads = heads
        self.d_model = d_model
        self.group_size = group_size
        self.d_k = d_model//heads
        self.num_group = heads//group_size

        self.q_proj = nn.Linear(d_model, d_model, bias=bias)
        self.k_proj = nn.Linear(d_model, self.d_k*self.num_group, bias=bias)
        self.v_proj = nn.Linear(d_model, self.d_k*self.num_group, bias=bias)
        self.output = nn.Linear(d_model, d_model)

        self.softmax = nn.Softmax(dim=1)
        self.dropout = nn.Dropout(dropout_prob)

        self.scale = 1/math.sqrt(self.d_k)

        self.attn = None

    def get_score(self, query: torch.Tensor, key: torch.Tensor):
        """
        Q shape:(seq_len_q, batch_size, num_heads, head_dim)
        K shape:(seq_len_k, batch_size, num_heads//group_size, head_dim)
        score shape:(seq_len_q, seq_len_k, batch_size, num_heads)
        """

        return torch.einsum('qbhd,kbhd->qkbh', query, torch.repeat_interleave(key,repeats=self.group_size, dim=2))


    def validate_mask(self, mask: torch.Tensor, query_shape: List[int], key_shape: List[int]):
        """校验mask

        Args:
            mask (torch.Tensor): 整体的mask，shape：(seq_len_q, seq_len_k, batch_size)
            query_shape (List[int]): q的形状
            key_shape (List[int]): k的形状
        """
        assert mask.dim() >= 2
        assert mask.shape[0] == 1 or mask.shape[0] == query_shape[0]
        assert mask.shape[1] == key_shape[0]
        if mask.dim() >= 3:
            assert mask.shape[2] == 1 or mask.shape[2] == query_shape[1]
        while mask.dim() < len(query_shape)+1:
            mask.unsqueeze_(-1)
        return mask


    def forward(self, *, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, mask: Optional[torch.Tensor] = None):
        seq_len_k, batch_size, _ = key.shape
        seq_len_q, batch_size, _ = query.shape

        if mask is not None:
            mask = self.validate_mask(mask, query.shape, key.shape)

        query = self.q_proj(query).view(seq_len_k, batch_size, -1, self.d_k)
        key = self.k_proj(key).view(seq_len_k, batch_size, -1, self.d_k)
        value = self.v_proj(value).view(seq_len_k, batch_size, -1, self.d_k)

        # (seq_len_q, seq_len_k, batch_size, num_heads)
        scores = self.get_score(query, key)*self.scale
        if mask is not None:
            scores = scores.masked_fill(mask == 0, float('-inf'))

        attn = self.softmax(scores)
        attn = self.dropout(attn)
        repeated_value = torch.repeat_interleave(value,repeats=self.group_size, dim=2)

        # einsum might produce discontigous result
        x = torch.einsum("qkbh,kbhd->qbhd", attn, repeated_value)

        self.attn = attn.detach()
        x = x.reshape(seq_len_q, batch_size, -1)
        return self.output(x)

if __name__ == '__main__':
    heads, d_model, seq_len_q, batch, group_size = 8, 64, 5, 2, 4
    mha = GroupQueryAttention(heads=heads, d_model=d_model, group_size=group_size)

    q = torch.randn((seq_len_q, batch, d_model))
    k = torch.randn((seq_len_q, batch, d_model))
    v = torch.randn((seq_len_q, batch, d_model))
    mask = torch.tril(torch.ones((seq_len_q, seq_len_q)))

    output = mha(query=q, key=k, value=v, mask=mask)
    assert output.shape==(seq_len_q, batch, d_model)

