import torch
import torch.nn as nn
from torch.nn import Dropout
import torch.nn.functional as F
import math
from a_1_embedding import Embedding

Device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(Device)


class Attention(nn.Module):
    def __init__(self, d_model, drop=0.1):
        super(Attention, self).__init__()
        self.d_model = d_model
        self.linear_list = nn.ModuleList([nn.Linear(d_model, d_model) for _ in range(3)])
        self.drop = Dropout(drop)

    def forward(self, x, mask=None):
        q, k, v = [linear(x) for linear in self.linear_list]
        attn_score = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(self.d_model)
        if mask is not None:
            attn_score.masked_fill_(mask == False, -1e9)
        attn_score = F.softmax(attn_score, dim=-1)
        print('attn_score.shape', attn_score.shape)
        attn_output = torch.matmul(attn_score, v)
        return self.drop(attn_output)


class MultiAttention(nn.Module):
    def __init__(self, d_model, n_head, drop=0.1):
        super(MultiAttention, self).__init__()
        self.d_model = d_model
        self.n_head = n_head
        self.linear_list = nn.ModuleList([nn.Linear(d_model, d_model, device=Device) for _ in range(3)])
        # self.linear_list.append(nn.Linear(d_model, d_model))
        # self.linear = nn.Linear(d_model, d_model)
        self.drop = Dropout(drop)
        self.sub_model = d_model // n_head

    def forward(self, q: torch.Tensor, k: torch.Tensor, v: torch.Tensor, mask=None):
        # x  10,5,512
        batch_size, length, dim = q.size()
        batch_size_kv, length_kv, dim_kv = k.size()
        q = self.linear_list[0](q)
        k = self.linear_list[1](k)
        v = self.linear_list[2](v)

        q = q.view(batch_size, length, self.n_head, self.sub_model)  # 10 5 8 64
        q = q.transpose(1, 2)  # 10,8,5,64

        # try:
        k = k.view(batch_size_kv, length_kv, self.n_head, self.sub_model)  # 10 5 8 64
        k = k.transpose(1, 2)  # 10,8,5,64
        # except Exception:
        #     print('pass')

        v = v.view(batch_size_kv, length_kv, self.n_head, self.sub_model)  # 10 5 8 64
        v = v.transpose(1, 2)  # 10,8,5,64

        # q&k / d_model
        attn_score = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(self.sub_model)
        if mask is not None:
            attn_score.masked_fill_(mask == False, -1e9)
        attn_score = F.softmax(attn_score, dim=-1)
        attn_output = torch.matmul(attn_score, v)

        attn_output = attn_output.transpose(1, 2).contiguous().view(batch_size, length, self.d_model)  # 10 5 512
        # attn_output = self.linear_list[3](attn_output)  # 10 5 512

        return self.drop(attn_output)


if __name__ == '__main__':
    mask = torch.triu(torch.ones(10, 10), diagonal=1) == 0
    '''
        tensor([[ True, False, False, False, False, False, False, False, False, False],
            [ True,  True, False, False, False, False, False, False, False, False],
            [ True,  True,  True, False, False, False, False, False, False, False],
            [ True,  True,  True,  True, False, False, False, False, False, False],
            [ True,  True,  True,  True,  True, False, False, False, False, False],
            [ True,  True,  True,  True,  True,  True, False, False, False, False],
            [ True,  True,  True,  True,  True,  True,  True, False, False, False],
            [ True,  True,  True,  True,  True,  True,  True,  True, False, False],
            [ True,  True,  True,  True,  True,  True,  True,  True,  True, False],
            [ True,  True,  True,  True,  True,  True,  True,  True,  True,  True]])
        
    '''
    x = torch.randn(5, 10, 512)
    x = x.to(Device)
    mask = mask.to(Device)
    multi_header = MultiAttention(512, 8)
    res = multi_header(x, x, x, mask)
    # print(res)
    print(res.shape)
