
import math
import copy

from torch import optim
from torch.autograd import Variable

import torch
import torch.nn as nn
import torch.nn.functional as F


def clones(module, N):
    """克隆模型块，克隆的模型块参数不共享"""
    return nn.ModuleList([copy.deepcopy(module) for _ in range(N)])


def attention(query, key, value, mask=None, dropout=None):
    # 将query矩阵的最后一个维度值作为d_k
    d_k = query.size(-1)

    # 将key的最后两个维度互换(转置)，才能与query矩阵相乘，乘完了还要除以d_k开根号
    scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(d_k)

    # 如果存在要进行mask的内容，则将那些为0的部分替换成一个很大的负数
    if mask is not None:
        scores = scores.masked_fill(mask == 0, -1e9)

    # 将mask后的attention矩阵按照最后一个维度进行softmax
    p_attn = F.softmax(scores, dim=-1)

    # 如果dropout参数设置为非空，则进行dropout操作
    if dropout is not None:
        p_attn = dropout(p_attn)
    # 最后返回注意力矩阵跟value的乘积，以及注意力矩阵
    return torch.matmul(p_attn, value), p_attn


class MyMultiHeadedAttention(nn.Module):
    def __init__(self, h, d_model, dropout=0.1):
        super(MyMultiHeadedAttention, self).__init__()
        # 保证可以整除
        assert d_model % h == 0
        # 得到一个head的attention表示维度
        self.d_k = d_model // h
        # head数量
        self.h = h
        # 定义4个全连接函数，供后续作为WQ，WK，WV矩阵和最后h个多头注意力矩阵concat之后进行变换的矩阵
        self.linears = clones(nn.Linear(d_model, d_model), 4)
        self.attn = None
        self.dropout = nn.Dropout(p=dropout)

    def forward(self, query, key, value, mask=None):
        if mask is not None:
            mask = mask.unsqueeze(1)
        # query的第一个维度值为batch size
        nbatches = query.size(0)
        # 将embedding层乘以WQ，WK，WV矩阵(均为全连接)
        # 并将结果拆成h块，然后将第二个和第三个维度值互换(具体过程见上述解析)
        query, key, value = [l(x).view(nbatches, -1, self.h, self.d_k).transpose(1, 2)
                             for l, x in zip(self.linears, (query, key, value))]
        # 调用上述定义的attention函数计算得到h个注意力矩阵跟value的乘积，以及注意力矩阵
        x, self.attn = attention(query, key, value, mask=mask, dropout=self.dropout)
        # 将h个多头注意力矩阵concat起来（注意要先把h变回到第三维的位置）
        x = x.transpose(1, 2).contiguous().view(nbatches, -1, self.h * self.d_k)
        # 使用self.linears中构造的最后一个全连接函数来存放变换后的矩阵进行返回
        return self.linears[-1](x)



class TransformerModel(nn.Module):
    def __init__(self, d_model, heads):
        super(TransformerModel, self).__init__()
        self.attention = MyMultiHeadedAttention(d_model, heads)

    def forward(self, x):
        attn_output = self.attention(x, x, x)
        print(f"%{attn_output}")




# 假设的参数设置
vocab_size=3
d_model = 3  # 模型维度
n_layers = 1  # 层数
heads = 1  # 头数

# 实例化模型
model = TransformerModel(heads,d_model )

# 假设的训练数据
input_ids = torch.tensor([[1.0, 2.0, 3.0]])  # 假设"我", "是", "谁"的索引分别为1, 2, 3

# 损失函数和优化器
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=0.001)

# 训练模型
model.train()
optimizer.zero_grad()
outputs = model(input_ids)
loss = criterion(outputs.view(-1, vocab_size), input_ids.view(-1))
loss.backward()
optimizer.step()

# 打印注意力分数矩阵
attention_scores = model.layers[0].attention.in_proj_weight.weight.data
print(attention_scores)

if __name__=='__main__':
    print("end")