import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from Data_preprocessing import data_split, Excel_dataset
import time
from model import Net



if __name__ == "__main__":
    data = Excel_dataset('data.xlsx', if_normalize=False)
    data_train, data_test = data_split(data, 0.9)

    batchsize = 30
    LR = 0.001
    device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
    print(device)
    net = Net()

    optimizer = torch.optim.Adam(net.parameters(), LR)

    # 设定损失函数
    loss_func = torch.nn.CrossEntropyLoss()

    # 训练并且记录每次准确率，loss     函数输入为：训练输入，训练标签，测试输入，测试标签，一个batch大小
    data_loader = DataLoader(data_train, batch_size=batchsize, shuffle=True)
    a = time.time()

    for epoch in range(500):
        for step, data in enumerate(data_loader):
            net.train()
            inputs, labels = data
            # 前向传播
            out = net(inputs)
            # 计算损失函数
            loss = loss_func(out, labels)
            # 清空上一轮的梯度
            optimizer.zero_grad()
            # 反向传播
            loss.backward()
            # 参数更新
            optimizer.step()

        if epoch % 5 == 0:
            net.eval()
            with torch.no_grad():
                total = len(data_test)
                test_dataloader = DataLoader(data_test, batch_size=1, shuffle=True)
                j = 0
                for i, (inputs, labels) in enumerate(test_dataloader):
                    outputs = net(inputs)
                    prediction = torch.max(outputs, 1)[1]  # torch.max

                    pred_y = prediction.numpy()  # 从GPU取到CPU中！！！！！！


                    if pred_y[0] == labels.data.numpy()[0]:
                        j += 1

                    acc = j / total

                loss = loss.item()
                print("训练次数为", epoch , "train loss:", loss,"精确度:",float(acc * 100),"%")

    save_path = './Model.pth'
    torch.save(net.state_dict(), save_path)
    print("训练时间：",time.time() - a)

