import copy
import numpy as np
import math
import torch.nn as nn
import torch
import torch.nn.functional as F


def clone_module(module, N):
    """用于生成相同网络层的克隆函数, 它的参数module表示要克隆的目标网络层, N代表需要克隆的数量"""
    return nn.ModuleList([copy.deepcopy(module) for _ in range(N)])


def subsequent_mask(size):
    "Mask out subsequent positions."
    attn_shape = (1, size, size)
    subsequent_mask = torch.triu(torch.ones(attn_shape), diagonal=1).type(torch.uint8)
    return subsequent_mask == 0


def make_src_mask(token_list, pad):
    """
    Create a mask to hide padding and future words.
    token_list: [batch,seq_len]
    """

    mask = (token_list != pad).unsqueeze(-2)
    return mask  # [batch,1,seq_len]

    # def create_mask(seq_len):
    #     """生成向后遮掩的掩码张量, 参数size是掩码张量最后两个维度的大小, 它的最后两维形成一个方阵"""
    #     attn_shape = (1, seq_len, seq_len)
    #     subsequent_mask = np.ones(attn_shape).astype('uint8')
    #     return torch.tensor(subsequent_mask)

    # mask = (token_list != pad).unsqueeze(-2)  # [batch,1,seq_len]
    # mask = mask & create_mask(token_list.size(1)).type_as(token_list.data)
    # return mask  # [batch,seq_len, seq_len]


def make_tgt_mask(token_list, pad):
    """
    Create a mask to hide padding and future words.
    token_list: [batch,seq_len]
    """

    mask = (token_list != pad).unsqueeze(-2)  # [batch,1,seq_len]
    mask = (mask & subsequent_mask(token_list.size(-1))).type_as(token_list.data)
    return mask  # [batch,seq_len, seq_len]


def attention(query, key, value, mask=None, dropout=None):
    '''
    query,key, value:[batch_size, num_head, seq_len, d_k]
    if mask is source_mask: [batch_size, 1, seq_len]
    if mask is target_mask: [batch_size, seq_len, seq_len]
    '''
    # 在函数中, 首先取query的最后一维的大小, 一般情况下就等同于我们的词嵌入维度, 命名为d_k
    d_k = query.size(-1)
    # scores: [batch_size, num_head, seq_len, seq_len]
    scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(d_k)

    if mask is not None:
        # 使用tensor的masked_fill方法, 将掩码张量和scores张量每个位置进行比较, 如果掩码张量处为0
        # 则对应的scores张量用-1e9这个值来替换, 如下演示, -1e9在经过softmax会趋近于0

        # print('query size: ', query.size())
        # print('key size: ', key.size())
        # print('value size: ', value.size())

        # print('scores size: ', scores.size())
        # print('mask size: ', mask.size())
        scores = scores.masked_fill(mask == 0, -1e9)

    p_attn = F.softmax(scores, dim=-1)

    if dropout is not None:
        p_attn = dropout(p_attn)

    return torch.matmul(p_attn, value), p_attn


if __name__ == '__main__':
    import copy
    import matplotlib.pyplot as plt

    PAD = 0  # <pad> 占位标志，在一个batch中如果当前句子不足固定长度用PAD占位，仅为了合成batch提高效率
    BOS = 1  # <bos> 开始标志
    EOS = 2  # <eos> 结束标志

    vocab_size = 10
    min_token = 3
    batch_size = 2
    seq_len = 4
    num_head = 3

    origin_tokens = torch.randint(min_token, vocab_size, (batch_size, seq_len))  # [batch,seq_len]
    scores = torch.ones((batch_size, num_head, seq_len, seq_len))  # [batch_size, num_head, seq_len, seq_len]

    # print(subsequent_mask(4))
    # plt.imshow(subsequent_mask(4)[0])


    def test_make_mask(tokens, title):
        print('tokens: ', tokens)
        src_mask = make_src_mask(tokens, PAD)
        print('src mask size: ', src_mask.size())  # [batch,1, seq_len]
        print('src mask: ', src_mask)

        tgt_mask = make_tgt_mask(tokens, PAD)
        print('tgt mask size: ', tgt_mask.size())  # [batch,seq_len, seq_len]
        print('tgt mask: ', tgt_mask)

        fig, axes = plt.subplots(2, 2)  # 创建2行2列的subplots
        fig.suptitle(title, fontsize=14)

        axes[0, 0].imshow(src_mask[0], cmap='viridis', vmin=0, vmax=1)
        axes[0, 0].set_title('src mask 0')

        axes[0, 1].imshow(src_mask[1], cmap='viridis', vmin=0, vmax=1)
        axes[0, 1].set_title('src mask 1')

        axes[1, 0].imshow(tgt_mask[0], cmap='viridis', vmin=0, vmax=1)
        axes[1, 0].set_title('tgt mask 0')

        axes[1, 1].imshow(tgt_mask[1], cmap='viridis', vmin=0, vmax=1)
        axes[1, 1].set_title('tgt mask 1')

        return src_mask, tgt_mask

    tokens1 = copy.deepcopy(origin_tokens)
    src_mask, tgt_mask = test_make_mask(tokens1, 'tokens1')

    tokens2 = copy.deepcopy(origin_tokens)
    tokens2[0, 3] = PAD
    tokens2[1, 2:4] = PAD
    src_mask, tgt_mask = test_make_mask(tokens2, 'tokens2')

    plt.show()
