# 000版本 Multi-head Self-attention
import torch.nn as nn
import torch
import matplotlib.pyplot as plt
import sympy as sp
import numpy as np
from scipy.misc import derivative
from sympy import symbols, exp, N


class SelfAttention(nn.Module):
    def __init__(self, embedding_dim, hidden_dim, num_heads):
        super(SelfAttention, self).__init__()
        self.embedding_dim = embedding_dim
        self.hidden_dim = hidden_dim
        self.num_heads = num_heads

        assert embedding_dim % num_heads == 0

        # 20 *103
        self.value_projection = nn.Linear(embedding_dim, hidden_dim)
        self.key_projection = nn.Linear(embedding_dim, hidden_dim)
        self.query_projection = nn.Linear(embedding_dim, hidden_dim)

    def forward(self, Q, K, V):
        # 16*103
        queries = self.query_projection(Q)
        keys = self.key_projection(K)
        values = self.value_projection(V)
        print("Q q1-q6", queries.size())
        print("K k1-k6", keys.size())
        print("V v1-v6", values.size())
        # Q * K.T  得到 a 矩阵 6 * 6
        A = torch.matmul(queries, keys.transpose(-2, -1)) / torch.sqrt(torch.tensor(self.hidden_dim).float())
        A_plus = torch.softmax(A, dim=-1)
        print("分数2 a hat(i) 做 softmax ", A_plus.size())
        # 6 * 6 6 * 1
        output = torch.matmul(A_plus, values)
        print("b(i) 加权分数", output.size())
        return output


# 二维特征
model1 = SelfAttention(3, 3, 3)
print(model1)
# 创建一个 4x10 的二维张量，所有元素初始化为0 ，这里前面是batch_size 后面是seq_size ,前面比较灵活。后面是固定的。
# tensor = torch.rand((6, 108))
tensor = torch.FloatTensor([
    [[-1.5, -1.3, -1.1],  # 1
     [2.3, 4.3, 3.3],  # 3
     [-1.2, -1.56, -1.2],  # 1
     [-3.4, -3.2, 3.0],  # 2
     [-3.1, -3.5, 3.2],  # 2
     [2.3, 3.5, 3.8],  # 3
     [2.2, 3.4, 3.1],  # 3
     [-1.53, -1.31, -1.1],  # 1
     [-2.9, 0.6, 1.8],  # 4
     [14, 0.7, 1.2]  # 5
     ]
    # 027 34 156 8 9
])
Q = tensor
K = torch.randn([1, 10, 3])
V = torch.randn([1, 10, 3])

x_1 = [x.t().detach().numpy() for x in tensor]
output = model1(Q, K, V)
X = output[0].T[0].detach().numpy()
Y = output[0].T[1].detach().numpy()
Z = output[0].T[2].detach().numpy()

# y_1 = [item.squeeze().detach().numpy() for item in model1(tensor)[0]]
from draw_3d_points import draw_point

draw_point(X, Y, Z)
