# 000版本 Multi-head Self-attention
import torch.nn as nn
import torch
import matplotlib.pyplot as plt
import sympy as sp
import numpy as np
from scipy.misc import derivative
from sympy import symbols, exp, N


class SelfAttention(nn.Module):
    def __init__(self, embedding_dim, hidden_dim, num_heads):
        super(SelfAttention, self).__init__()
        self.embedding_dim = embedding_dim
        self.hidden_dim = hidden_dim
        self.num_heads = num_heads

        assert embedding_dim % num_heads == 0

        # 20 *103
        self.value_projection = nn.Linear(embedding_dim, hidden_dim)
        self.key_projection = nn.Linear(embedding_dim, hidden_dim)
        self.query_projection = nn.Linear(embedding_dim, hidden_dim)

    def forward(self, inputs):
        # 16*103
        queries = self.query_projection(inputs)
        keys = self.key_projection(inputs)
        values = self.value_projection(inputs)
        print("Q q1-q6", queries.size())
        print("K k1-k6", keys.size())
        print("V v1-v6", values.size())
        # Q * K.T  得到 a 矩阵 6 * 6
        A = torch.matmul(queries, keys.transpose(-2, -1)) / torch.sqrt(torch.tensor(self.hidden_dim).float())
        A_plus = torch.softmax(A, dim=-1)
        print("分数2 a hat(i) 做 softmax ", A_plus.size())
        # 6 * 6 6 * 1

        # output = torch.matmul(A_plus, values)
        output = torch.bmm(A_plus, values)

        print("b(i) 加权分数", output)
        # print("output1",output1)
        return output


# 创建一个 4x10 的二维张量，所有元素初始化为0 ，这里前面是batch_size 后面是seq_size ,前面比较灵活。后面是固定的。
# tensor = torch.rand((6, 108))
tensor = torch.FloatTensor([
    [[-1.5], [2.3], [-3.4], [-9.5], [1.3], [2.2], [-3.3], [10.0]],
    [[-1.3], [4.3], [-5.4], [10.5], [3.5], [2.5], [0.3], [-9.7]]
])


def compute(tensors):
    model2 = SelfAttention(1, 1, 1)
    y_s = model2(tensor)
    x_s = [x.squeeze() for x in tensor]
    x_1 = x_s[0].detach().numpy()
    x_2 = x_s[1].detach().numpy()
    y_1 = [y.squeeze().detach().numpy() for y in y_s[0]]
    y_2 = [y.squeeze().detach().numpy() for y in y_s[1]]
    return x_1, x_2, y_1, y_2


plt.style.use(['ggplot', 'grayscale'])
fig = plt.figure()
lines = 3

for i in range(lines):
    x_1, x_2, y_1, y_2 = compute(tensor)
    ax3 = fig.add_subplot(lines, 2, i * 2 + 1)
    ax3.scatter(x_1, y_1)
    ax3 = fig.add_subplot(lines, 2, i * 2 + 2)
    ax3.scatter(x_2, y_2)

fig.tight_layout(pad=2)
plt.show()
