import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import time
from torch.utils.tensorboard import SummaryWriter
# 导入鸢尾花数据的库
from sklearn.datasets import load_iris
from demo import model

# 全局声明device
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print('device=', device)

# 获取鸢尾花数据
iris = load_iris()
# 特征数据，numpy.ndarray，150×4
x = iris.data
print(type(x))
print(x.shape)
# 标签，numpy.ndarray，150×1
target = iris.target
print(target.shape)
# one-hot编码,150×3
y = np.zeros((x.shape[0], 3))
for i in range(target.shape[0]):
    y[i][target[i]] = 1
# 类别名
names = iris.target_names
print(names)
# 上传到设备
x = torch.tensor(x, dtype=torch.float32).to(device)
target = torch.tensor(target, dtype=torch.long).to(device)

# 生成test、val、test
ids = np.arange(x.shape[0])
np.random.seed(666)
np.random.shuffle(ids)
train_ids = ids[0: int(x.shape[0] * 0.8)]
val_ids = ids[int(x.shape[0] * 0.8): int(x.shape[0] * 0.9)]
test_ids = ids[int(x.shape[0] * 0.9):]
x_train, target_train = x[train_ids], target[train_ids]
x_val, target_val = x[val_ids], target[val_ids]
x_test, target_test = x[test_ids], target[test_ids]


# print(x_test, target_test, y_test)


# 定义注意力机制模型
class Atten_model(torch.nn.Module):
    def __init__(self, in_dim, out_dim):
        super(Atten_model, self).__init__()
        self.k = nn.Linear(in_dim, in_dim)
        self.q = nn.Linear(in_dim, in_dim)
        self.v = nn.Linear(in_dim, in_dim)
        self.proj = nn.Linear(in_dim, out_dim)
        self.proj_drop = nn.Dropout(0.1)
        self.GELU = nn.GELU()

    def forward(self, x):
        # k q v 均将x从in_dim转变为out_dim，特征拓展、特征对应一个权重
        k = self.k(x)
        q = self.q(x)
        v = self.v(x)
        # 点乘计算注意力
        atten = F.softmax((k * q) / torch.sqrt(torch.tensor(v.shape[1])), dim=1)
        # 特征值
        out = self.proj(self.GELU(atten * v))
        return out


class NeuralNet(torch.nn.Module):
    def __init__(self, in_dim, out_dim, init_weights=True):
        super(NeuralNet, self).__init__()
        self.layer_1 = Atten_model(in_dim, 10)
        self.layer_2 = Atten_model(10, 20)
        #         self.layer_3 = Atten_model(64, 132)
        self.linear = nn.Linear(20, out_dim)
        self.sigmoid = nn.Sigmoid()

    def forward(self, x):
        out = self.layer_1(x)
        out = self.layer_2(out)
        #         out = self.layer_3(out)
        out = self.linear(out)
        out = self.sigmoid(out)
        return out


class LinearNet(torch.nn.Module):
    def __init__(self, in_dim, out_dim):
        super(LinearNet, self).__init__()
        self.layer_1 = nn.Linear(in_dim, 10)
        self.layer_2 = nn.Linear(10, 20)
        self.layer_3 = nn.Linear(20, out_dim)
        self.sigmoid = nn.Sigmoid()

    def forward(self, x):
        out = self.layer_1(x)
        out = self.relu(out)
        out = self.layer_2(out)
        out = self.relu(out)
        out = self.layer_3(out)
        out = self.sigmoid(out)
        return out


model_atten = NeuralNet(4, 3)
model_atten = model_atten.to(device)
model_selfatten = model(4, 3)
model_selfatten = model_selfatten.to(device)
model_linear = LinearNet(4, 3)
model_linear = model_linear.to(device)

Epochs = 5000
opt = torch.optim.Adam(model_atten.parameters(), lr=0.001, weight_decay=1e-3)
Loss_func = nn.CrossEntropyLoss()
t_start = 0.0
board = SummaryWriter('./logs')
for epoch in range(Epochs):
    # 前向计算
    pred_train_atten = model_atten(x_train)
    # pred_train_atten = model_selfatten(x_train)
    # 其中的pred_train为150×3，target_train为150×1的原始类别编码
    loss_train_atten = Loss_func(pred_train_atten, target_train)
    # 训练网络
    opt.zero_grad()
    loss_train_atten.backward()
    opt.step()

    board.add_scalar("Train_loss_atten", loss_train_atten.item(), epoch)
    # 计算准确率
    accuracy_atten = (pred_train_atten.argmax(1) == target_train).sum() / pred_train_atten.shape[0]
    board.add_scalar("Train_accuray_atten", accuracy_atten.item(), epoch)
    if epoch % 100 == 0 or epoch == Epochs - 1:
        print('epoch=', epoch, ' train_loss_atten=', loss_train_atten.item(), "accuary_atten=", accuracy_atten.item())

board.close()
t_end = 0.0
t_d = t_end - t_start
print('atten: 程序运行%d个epochs的时间为%f,误差为%f' % (epoch, t_d, loss_train_atten))
torch.save(model_atten, ".model_atten.pth")

model_atten.eval()
with torch.no_grad():
    Y_pred_train_atten = model_atten(x_train)
    Y_pred_val_atten = model_atten(x_val)
    Y_pred_test_atten = model_atten(x_test)

    loss_train_atten = Loss_func(Y_pred_train_atten, target_train)
    loss_val_atten = Loss_func(Y_pred_val_atten, target_val)
    loss_test_atten = Loss_func(Y_pred_test_atten, target_test)

    accuracy_train_atten = (Y_pred_train_atten.argmax(1) == target_train).sum() / Y_pred_train_atten.shape[0]
    accuracy_val_atten = (Y_pred_val_atten.argmax(1) == target_val).sum() / Y_pred_val_atten.shape[0]
    accuracy_test_atten = (Y_pred_test_atten.argmax(1) == target_test).sum() / Y_pred_test_atten.shape[0]

model_linear.eval()
with torch.no_grad():
    Y_pred_train_linear = model_linear(x_train)
    Y_pred_val_linear = model_linear(x_val)
    Y_pred_test_linear = model_linear(x_test)

    loss_train_linear = Loss_func(Y_pred_train_linear, target_train)
    loss_val_linear = Loss_func(Y_pred_val_linear, target_val)
    loss_test_linear = Loss_func(Y_pred_test_linear, target_test)

    accuracy_train_linear = (Y_pred_train_linear.argmax(1) == target_train).sum() / Y_pred_train_linear.shape[0]
    accuracy_val_linear = (Y_pred_val_linear.argmax(1) == target_val).sum() / Y_pred_val_linear.shape[0]
    accuracy_test_linear = (Y_pred_test_linear.argmax(1) == target_test).sum() / Y_pred_test_linear.shape[0]

print("Atten Model: ")
print("训练数据,Loss={}, accuracy={}".format(loss_train_atten, accuracy_train_atten))
print("验证数据,Loss={}, accuracy={}".format(loss_val_atten, accuracy_val_atten))
print("测试数据,Loss={}, accuracy={}".format(loss_test_atten, accuracy_test_atten))

print("Linear Model: ")
print("训练数据,Loss={}, accuracy={}".format(loss_train_linear, accuracy_train_linear))
print("验证数据,Loss={}, accuracy={}".format(loss_val_linear, accuracy_val_linear))
print("测试数据,Loss={}, accuracy={}".format(loss_test_linear, accuracy_test_linear))
