# 自定义数据集类
from torch.utils.data import DataLoader, Dataset
import torch
import torch.nn as nn
import torch.nn.functional as F

dataset = []

for _ in range(100):
    length_ = torch.randint(3, 6, size=(1,)).item()
    d = torch.randint(1, 9, size=(length_,))
    dataset.append(d)

# dataset = [torch.randn(size=(5,)) for _ in range(10)]
# dataset = [(np.random.randn(12,),3) for _ in range(10)]

# dl = DataLoader(dataset,batch_size=3,shuffle=False)
# for _ in dl:
#     print(_)
#     break
'''
tensor([[-0.7132,  1.8676, -0.2932,  0.3237,  0.8106], 
        [ 1.0606,  0.9550,  1.0979,  1.3607,  1.2806], 
        [ 1.1545, -0.6752,  0.5168, -0.2807,  0.6177]])
'''


def change(batchs):
    # batch 就是上面输出的那个
    '''
    [(tensor([ 0.9818,  0.1940,  0.0436, -0.8141,  1.4636]), 3), (tensor([-0.0840, -0.0065, -2.6674, -0.3946,  2.8424]), 3), (tensor([ 2.2204, -1.0966, -0.2777,  0.3998,  1.1074]), 3)]
    '''
    enc_datas = []
    dec_in_datas = []
    dec_out_datas = []

    first = torch.tensor([0])
    suffix = torch.tensor([9])
    padding = 11

    for batch in batchs:
        dec_in = torch.cat([first, batch], axis=0)
        dec_out = torch.cat([batch, suffix], axis=0)

        enc_datas.append(batch)
        dec_in_datas.append(dec_in)
        dec_out_datas.append(dec_out)

    # enc,dec_in,dec_out = torch.tensor(enc_datas),torch.tensor(dec_in_datas),torch.tensor(dec_out_datas)
    enc, dec_in, dec_out = enc_datas, dec_in_datas, dec_out_datas

    max_enc = max([len(_) for _ in enc])
    max_dec_in = max([len(_) for _ in dec_in])
    max_dec_out = max([len(_) for _ in dec_out])

    pad_enc = []
    pad_dec_in = []
    pad_dec_out = []

    for _ in enc:
        pad_token = torch.full((max_enc - len(_),), padding, dtype=torch.long)
        d = torch.cat([_, pad_token], axis=0)
        pad_enc.append(d)

    for _ in dec_in:
        pad_token = torch.full((max_dec_in - len(_),), padding, dtype=torch.long)
        d = torch.cat([_, pad_token], axis=0)
        pad_dec_in.append(d)

    for _ in dec_out:
        pad_token = torch.full((max_dec_out - len(_),), padding, dtype=torch.long)
        d = torch.cat([_, pad_token], axis=0)
        pad_dec_out.append(d)

    print(pad_enc)
    print(pad_dec_in)
    print(pad_dec_out)
    pad_enc = torch.stack(pad_enc)
    pad_dec_in = torch.stack(pad_dec_in)
    pad_dec_out = torch.stack(pad_dec_out)
    return pad_enc, pad_dec_in, pad_dec_out


d2 = DataLoader(dataset, batch_size=10, shuffle=False, collate_fn=change)
for pad_enc, pad_dec_in, pad_dec_out in d2:
    print(pad_enc.size())
    print(pad_dec_in.size())
    print(pad_dec_out.size())
    break
# '''
# (tensor([-0.8561,  0.3045, -0.6356,  0.6673,  0.6331, -0.0856,  0.8673, -0.7812,
#         -1.7579,  1.4737, -0.1817,  0.4981,  1.4502,  1.3990,  0.4049]), tensor([3, 3, 3]))

# '''
embed = nn.Embedding(12, 512)
# for q, k, v in d2:
#     q1 = embed(q)  # 10 5 512
#     k1 = embed(k)  # 10 6 512
#     v1 = embed(v)  # 10 6 512
#
#     d_model = q1.size(-1)
#     score = torch.matmul(q1, k1.transpose(1, 2)) / torch.sqrt(d_model)
#     # 注意力机制
#     print(score)
#     mask = k.view(10, -1, 6)
#     score.masked_fill_(mask == 11, -1e9)
#     print(score)
#     attn = F.softmax(score, dim=-1)
#     print(attn)


attention = nn.MultiheadAttention(512, 8, batch_first=True)

for q, k, v in d2:
    q1 = embed(q)  # 10 5 512
    k1 = embed(k)  # 10 6 512
    v1 = embed(v)  # 10 6 512

    key_padding_mask = k == 11
    attn_mask = torch.zeros(5,6)
    for _ in range(5):
        attn_mask[_,_+1:] = 1
    r = attention(q1, k1, v1, key_padding_mask=key_padding_mask,attn_mask=attn_mask)[0]  # 10 5 6
    print(r.size())


