import torch
from torch import bmm as bmm, matmul as matmul
from torch import nn
from debug import debug_print
from draw import generate_head_map
import math

class NaiveAttention(nn.Module):
    def __init__(self, input_dim, hidden_dim, output_dim, num_heads, precision=torch.float32, dropout=0.1, require_bias=False, kv_needed=True):
        super(NaiveAttention, self).__init__()
        
        debug_print(f"""init NaiveAttention Module with:
              input_dim={input_dim},
              hidden_dim={hidden_dim}
              output_dim={output_dim},
              num_heads={num_heads}
            """)
        
        assert hidden_dim%num_heads==0, f"hidden_dim must could be divided by num_heads, however we found hidden_dim={hidden_dim}, while num_heads={num_heads}"
        
        # 自定义的矩阵是不可训练的。只有nn.Parameter是可以训练的！
        if kv_needed:
            # self.W_k=self.W_q=self.W_v=torch.randn(input_dim, hidden_dim, device=device, dtype=precision)
            # 使用连续等号，会导致k q v共享权重。这是应当避免的
            #self.W_k=self.W_q=self.W_v=nn.Linear(input_dim, hidden_dim, dtype=precision, bias=require_bias)
            self.W_k=nn.Linear(input_dim, hidden_dim, dtype=precision, bias=require_bias)
            self.W_q=nn.Linear(input_dim, hidden_dim, dtype=precision, bias=require_bias)
            self.W_v=nn.Linear(input_dim, hidden_dim, dtype=precision, bias=require_bias)
        else:
            #self.W_q=torch.randn(input_dim, hidden_dim, device=device, dtype=precision)
            self.W_q=nn.Linear(input_dim, hidden_dim, dtype=precision, bias=require_bias)
            self.W_k=self.W_v=None
        
        # nn.parameter是可以训练的，但是单纯定义矩阵是无法加载到优化器上的！
        # self.W_o=torch.randn(hidden_dim, output_dim, device=device, dtype=precision)
        self.W_o=nn.Linear(hidden_dim, output_dim, dtype=precision, bias=require_bias)
        self.num_heads=num_heads
        self.hidden_dim=hidden_dim
        self.precision=precision
        self.output_dim=output_dim
        
    def divide_into_multi_heads(self, x):
        # x.shape: [b, n, dim] -> [b, n, head, dim//head] -> [b, head, n, dim//head] -> [b*head, n, dim//head]
        
        '''
        Attention: ways of change the shape of a tensor:
        permute:    Swap the dimension of a tensor.
        reshape:    Change the size, and return a new tensor.
        view:       Return a VIEW, which means no copy. Attention, the tensor should be contiguous.
        resize_:    Return a VIEW, and may chunk or fill the tensor if needed.
        '''
        
        b, n, dim1 = x.shape
        dim2 = dim1//self.num_heads
        x=x.view(b, n, self.num_heads, dim2)
        x=torch.permute(x, [0, 2, 1, 3]).contiguous()
        x=x.view(-1, n, dim2)
        return x
    
    def concat_back(self, x):
        # x.shape: [b*head, n, dim//head] -> [b, head, n, dim//head] -> [b, n, head, dim//head] -> [b, n, dim]
        x=x.view(-1, self.num_heads, x.shape[1], x.shape[2])
        x=torch.permute(x, [0, 2, 1, 3]).contiguous()
        x=x.view(x.shape[0], x.shape[1], -1)
        return x
        
        
    def forward(self, x, W_k=None, W_v=None):
        assert not (self.W_k==None and W_k==None)
        assert not (self.W_v==None and W_v==None)
        if W_k is not None:
            self.W_k, self.W_v = W_k, W_v

        # 对于一般的矩阵乘法，torch会记录梯度，但是无法加载到优化器上。因此无法梯度下降
        # query, key, value, = matmul(x, self.W_q), matmul(x, self.W_k), matmul(x, self.W_v)
        query, key, value=self.W_q(x), self.W_k(x), self.W_v(x)
        
        # mulit head division
        query, key, value = self.divide_into_multi_heads(query), self.divide_into_multi_heads(key), self.divide_into_multi_heads(value)
        key=torch.permute(key, [0, 2, 1]).contiguous()
        
        # generate Attention matrix
        attn = torch.bmm(query, key)
        
        # add mask
        attn=torch.tril(attn)
        attn=attn.masked_fill(attn==0, float('-inf'))

        attn = attn/math.sqrt(self.hidden_dim)
        attn = torch.softmax(attn, dim=2)

        # generate output matrix
        out = torch.matmul(attn, value)
        
        # concat back to original shape
        out=self.concat_back(out)
        
        # use W_o to merge info derived from multi heads
        # out=torch.matmul(out, self.W_o)
        out=self.W_o(out)
        
        return out


if __name__=="__main__":
    model=NaiveAttention(
        input_dim=8,
        hidden_dim=2,
        output_dim=8,
        num_heads=1,
        device='cuda',
        precision=torch.float16
    ).to('cuda')
    x=torch.randn([4, 3, 8], device='cuda', dtype=torch.float16)
    
    y=model(x)
    assert y.shape==x.shape
    print("OK")
        