import pickle
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
with open('zh_split.pickle', 'rb') as f:
     l = pickle.load(f)

lists1 = [['你好','你好'],['dsadasd'],['金克拉就是看垃圾'],['d'],['d']]
class Attention_module(nn.Module):
    def __init__(self, d_model, num_heads):
        super(Attention_module, self).__init__()
        self.d_model = d_model#表示模型的隐藏层维度（或称为特征维度）
        self.num_heads = num_heads
        assert d_model % num_heads == 0, "d_model 必须要被 num_heads整除"
        self.head_dim = d_model // num_heads
        self.scale_factor = torch.sqrt(torch.tensor(self.head_dim, dtype=torch.float32))

        self.W_q = nn.Linear(self.d_model, self.d_model)
        self.W_k = nn.Linear(self.d_model, self.d_model)
        self.W_v = nn.Linear(self.d_model, self.d_model)
        self.W_out = nn.Linear(self.d_model, self.d_model)
    def split_heads(self, x):#输入x的形状是(batch_size, seq_len, d_model)时，d_model必须等于num_heads * head_dim，否则view会报错
        #将输入分割为多头
        batch_size, seq_len  = x.size()[:2]
        return x.view(batch_size, seq_len, self.num_heads, self.head_dim).transpose(1, 2) #将X的维度由原来的三维拆分为四维，1 ，2这两个维度调换位置变成(batch_size, self.num_heads, seq_len, self.head_dim)

    def forward(self, Q, K, V, mask = None):
        """"Q , K , V: 输入矩阵[batch_size, seq_len, d_model]
             mask: 掩码矩阵[batch_size, seq_len, seq_len]"""

        batch_size = Q.size(0)

        Q = self.split_heads(self.W_q(Q))
        K = self.split_heads(self.W_k(K))
        V = self.split_heads(self.W_v(V))

        scores = torch.matmul(Q, K.transpose(-2, -1)) * self.scale_factor.reciprocal()


        #应用掩码
        if mask is not None:




            scores = scores.masked_fill(mask, -1e9)#这里表示如果给出掩码矩阵，那么掩码矩阵中为True的那个位置在scores矩阵中相同的位置的值会被转化为 -1e9
                                                                                                    #scores.masked_fill(mask, value) 是 PyTorch 中的一个函数，用于对张量进行按位替换操作。
                                                                                                   #mask 是一个布尔类型的张量，用于指示要进行替换的位置。
                                                                                                   #value 是一个标量或与被操作的张量 scores 具有相同形状的张量，用于指定替换的值。


        attention_w = F.softmax(scores, dim=-1)#对每个矩阵相同位置的数值进行softmax

        context = torch.matmul(attention_w, V)#α权重与V相乘，得到C向量。

        #合并多头
        context = context.transpose(1, 2).contiguous()
        context = context.view(batch_size, -1, self.d_model)
        output = self.W_out(context)
        return output, attention_w

s = set()
for i in lists1:
     s.update(i)
l = list(s)
print(l)
size = 4
attn_shape = (1, size, size)
subsequent_mask =1 -  np.tril(np.ones(attn_shape), ).astype('uint8')
print(torch.from_numpy(subsequent_mask) == 0)
t = torch.tensor([[[0.31, 0.326, 0.3, 0.3, 0.66, 0.17],[0.269, 0.19, 0.3, 0.3, 0.3, 0.115],[0.36, 0.3, 0.115, 0.111, 0.137, 0.29],[0.3, 0.3, 0.215, 0.2, 0.32, 0.21]],
                                               [[0.37, 0.126, 0.6, 0.3, 0.66, 0.17],[0.263, 0.19, 0.3, 0.34, 0.3, 0.115],[0.3, 0.3, 0.115, 0.111, 0.137, 0.89],[0.3, 0.34, 0.215, 0.3, 0.32, 0.21]]],dtype=torch.float32)#2, 4,6
mask = t == 0.3
mask = mask.unsqueeze(1)
scores = t.masked_fill(mask, -1e9)
print(mask.shape)
print(t.shape)
print("t = " + str(t))
if __name__ == '__main__':
     attention = Attention_module(d_model=6, num_heads=2)
     split_h = attention.split_heads(t)
     print(split_h.shape)
     print("split_h= " + str(split_h))
     print(subsequent_mask.shape)

