import math
from manul_exp import Softmax, MySoftMax
import torch
from torch import nn
from d2l import torch as d2l

n_train = 200  # 训练样本数
x_train, _ = torch.sort(torch.rand(n_train) * 5)  # 排序后的训练样本
from heatmaps import show_heatmaps


def f(x):
    return 2 * torch.sin(x) + x ** 0.8


y_train = f(x_train) + torch.normal(0.0, 0.5, (n_train,))  # 训练样本的输出
x_test = torch.arange(0, 5, 0.025)  # 测试样本
# print(x_test,"111111111")
y_truth = f(x_test)  # 测试样本的真实输出
n_test = len(x_test)  # 测试样本数
print(x_test)
print(n_test)


def plot_kernel_reg(y_hat):
    d2l.plot(x_test, [y_truth, y_hat], 'x', 'y', legend=['Truth', 'Pred'],
             xlim=[0, 5], ylim=[-1, 5])
    # x_train是0-5之间随机取x轴上n个点，y_train是x_train对应的观测值
    d2l.plt.plot(x_train, y_train, 'o', alpha=0.5)
    d2l.plt.show()


def plot_kernel_reg_with_all(y_hats):
    d2l.plot(x_test, [y_truth] + y_hats, 'x', 'y', legend=['Truth', 'Pred'],
             xlim=[0, 5], ylim=[-1, 5])
    # x_train是0-5之间随机取x轴上n个点，y_train是x_train对应的观测值
    d2l.plt.plot(x_train, y_train, 'o', alpha=0.5)
    d2l.plt.show()


y_hat = torch.repeat_interleave(y_train.mean(), n_test)
plot_kernel_reg(y_hat)

# X_repeat的形状:(n_test,n_train),
# 每一行都包含着相同的测试输入（例如：同样的查询）
X_repeat = x_test.repeat_interleave(n_train).reshape((-1, n_train))
print("x_test", x_test)
print("X_repeat", X_repeat)
print("x_train", x_train)
print("X_repeat - x_train", X_repeat - x_train)
# x_train包含着键。attention_weights的形状：(n_test,n_train),
# 每一行都包含着要在给定的每个查询的值（y_train）之间分配的注意力权重

attention_weights = nn.functional.softmax(-(X_repeat - x_train) ** 2 / 2, dim=1)
# attention_weights = nn.functional.softmax((X_repeat - x_train) ** 2, dim=1)

my_attention_weights = MySoftMax(x_test, x_train)

print("Manul Softmax\n{}".format(Softmax(x_test, x_train)))
print("attention_weights\n{}".format(attention_weights))
"""
结果一致，也说明如果采用其他的核函数也会有其他的拟合结果。
Manul Softmax [[4.70531931e-01 4.49639965e-01 7.48723705e-02 4.95358172e-03
  2.15088508e-06]
 [3.23843788e-01 3.52719438e-01 2.69985690e-01 5.32907072e-02
  1.60375887e-04]
 [1.08281522e-01 1.34420703e-01 4.72968523e-01 2.78519826e-01
  5.80942645e-03]
 [1.40217062e-02 1.98395043e-02 3.20886608e-01 5.63752593e-01
  8.14995891e-02]
 [7.24289958e-04 1.16804767e-03 8.68432137e-02 4.55182953e-01
  4.56081496e-01]]
attention_weights tensor([[4.7053e-01, 4.4964e-01, 7.4872e-02, 4.9536e-03, 2.1509e-06],
        [3.2384e-01, 3.5272e-01, 2.6999e-01, 5.3291e-02, 1.6038e-04],
        [1.0828e-01, 1.3442e-01, 4.7297e-01, 2.7852e-01, 5.8094e-03],
        [1.4022e-02, 1.9840e-02, 3.2089e-01, 5.6375e-01, 8.1500e-02],
        [7.2429e-04, 1.1680e-03, 8.6843e-02, 4.5518e-01, 4.5608e-01]])
"""
print("y_train", y_train)

# y_hat的每个元素都是值的加权平均值，其中的权重是注意力权重
y_hat = torch.matmul(attention_weights, y_train)
print("y_hat", y_hat)
plot_kernel_reg(y_hat)

y_hat1 = torch.matmul(my_attention_weights, y_train)
plot_kernel_reg_with_all([y_hat, y_hat1])

show_heatmaps(attention_weights.unsqueeze(0).unsqueeze(0),
              xlabel='Sorted training inputs',
              ylabel='Sorted testing inputs')
# d2l.show_heatmaps(attention_weights.unsqueeze(0).unsqueeze(0),
#                   xlabel='Sorted training inputs',
#                   ylabel='Sorted testing inputs')
