from dataclasses import dataclass 
import math, torch 
from typing import Optional, Tuple 
import torch.nn.functional as F 
from torch import nn

def apply_rotary_emb(xq, xk, freq):
    return xq, xk


@dataclass
class ModelArgs:
    dim: int = 4096 
    n_layers:int = 32 
    n_heads:int = 32 
    n_kv_heads:Optional[int] = None
    vocab_size:int = -1 
    multiple_of: int = 256
    ffn_dim_multiplier: Optional[float] = None
    norm_eps: float = 1e-5

    max_batch_size: int = 32
    max_seq_len: int = 2048

def repeat_kv(x, reps):
    bsz, seqlen, heads, dim = x.shape
    if reps == 1:
        return x 
    return (
        x[:,:,:,None,:]
        .expand(bsz, seqlen, heads, reps, dim)
        .reshape(bsz, seqlen, heads*reps, dim)
    )

# 思路是按GQA的样式分开query，而MHA是GQA的特例，所以可无缝切换。
# 源码有并行操作，这里省略
class Attention(nn.Module):

    def __init__(self, args: ModelArgs):
        super().__init__()
        

        # n_kv_heads. 如果GQA的Group数量没设置，则设置为MHA的head数量
        self.n_kv_heads = args.n_heads if args.n_kv_heads is None else args.n_kv_heads
        "省略并行操作"
        self.n_local_heads = args.n_heads 
        self.n_local_kv_heads = self.n_kv_heads
        # kv_heads重复次数
        self.n_rep = self.n_local_heads // self.n_local_kv_heads 
        # 保持总dim不变，每个head维度如下
        self.head_dim = args.dim // args.n_heads

        # x(token, dim) wq(dim, n_heads*head_dim)
        self.wq = torch.nn.Parameter(
            torch.FloatTensor(
                (args.dim, args.n_heads*self.head_dim),
                dtype=torch.float32
                )
            )

        # x(token, dim) k(dim, n_kv_heads*head_dim)
        self.wk = torch.nn.Parameter(
            torch.FloatTensor(
                (args.dim, self.n_kv_heads*self.head_dim),
                dtype=torch.float32
                )
            )
        
        # x(token, dim) v(dim, n_kv_heads*head_dim)
        self.wv = torch.nn.Parameter(
            torch.FloatTensor(
                (args.dim, self.n_kv_heads*self.head_dim),
                dtype=torch.float32
                )
            )
        
        # x(token, dim) qkv(dim, 3*dim)
        self.wo = torch.nn.Parameter(
            torch.FloatTensor(
                (self.n_kv_heads*self.head_dim, args.dim),
                dtype=torch.float32
                )
            )
    
    def forward(self, x, start_pos, freqs_cis, mask):
        # x(batch_size, token, dim)
        bsz, seqlen, _ = x.shape 
        # 得到qkv 
        xq, xk, xv = self.wq(x),self.wk(x),self.wv(x)
        # 变换维度，把head和head_dim分出来
        xq = xq.view(bsz, seqlen, self.n_local_heads, self.head_dim)
        xk = xk.view(bsz, seqlen, self.n_local_kv_heads, self.head_dim)
        xv = xv.view(bsz, seqlen, self.n_local_kv_heads, self.head_dim)
        # 给qk添加rope
        xq, xk = apply_rotary_emb(xq, xk, freqs_cis=freqs_cis)
        # 把kv存到cache里
        '略'
        # 因为GQA，所以要重复kv
        keys, values = repeat_kv(xk, self.n_rep), repeat_kv(xv, self.n_rep)
        
        # 下面正式计算
        xq = xq.transpose(1,2)
        keys = keys.permute(0,2,3,1)
        values = values.transpose(1,2)
        scores = torch.matmul(xq, keys)/math.sqrt(self.head_dim)
        if mask is not None:
            # mask会把mask位置置为-inf,使其无法在softmax上获得权重
            scores = scores+mask
        scores = F.softmax(scores.float(), dim=-1).type_as(xq)
        # 下一步比较复杂。先转换head和seq维度，再用contiguous确定向量连续存储，然后view合并维度时才不会复制一份向量而是原地计算。
        output = torch.matmul(scores, values).transpose(1,2).contiguous().view(bsz, seqlen, -1)
        return self.wo(output)