# Use the ResNet18 on Cifar-10
import os

import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
import torch

import torch.nn as nn
import numpy as np
# check gpu
from torch.utils.data import DataLoader

from ENF_deepLearning.utils.helper.Data_Helper import Data_Helper
from Resnet18.Dataset import Net_Dataset
from Resnet18.ResNet import ResNet18

# 代码要放在主函数这里面运行才可以
if __name__ == '__main__':
    # your code
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    torch.set_default_tensor_type(torch.DoubleTensor)
    # set hyperparameter
    # 在机器学习的上下文中，超参数是在开始学习过程之前设置值的参数，而不是通过训练得到的参数数据，
    # 如迭代次数epoch、隐藏层的层数、每层神经元的个数、学习速率lr等。是根据经验进行设定的，他们的取值会影响到权重w和偏置b的大小
    # 通常情况下，需要对超参数进行优化，给学习机选择一组最优超参数，以提高学习的性能和效果。
    # ————————————————
    EPOCH = 10
    pre_epoch = 0
    BATCH_SIZE = 256  # 一次训练的样本数目
    LR = 0.01

    # prepare dataset and preprocessing
    # transform_train = transforms.Compose([
    #     transforms.RandomCrop(32, padding=4),
    #     transforms.RandomHorizontalFlip(),
    #     transforms.ToTensor(),
    #     transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
    # ])
    # transform_test = transforms.Compose([
    #     transforms.ToTensor(),
    #     transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
    # ])

    # trainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform_train)
    # print('---------------------------train', trainset)
    # trainloader = torch.utils.data.DataLoader(trainset, batch_size=BATCH_SIZE, shuffle=True, num_workers=2)


    # testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform_test)
    # print('---------------------------test', testset)
    # testloader = torch.utils.data.DataLoader(testset, batch_size=100, shuffle=False, num_workers=2)


    # 数据集处理
    data_path = '../ENF_deepLearning/utils/data'  # 数据
    list = os.listdir(data_path)
    data_helper = Data_Helper()  # 数据相关的操作助手

    Y = []
    X = []
    minn = 0
    PAD = 350
    for item in list:
        path = data_path + '/' + item
        # print(item)
        data = np.loadtxt(path, dtype=np.float, delimiter=',')
        minn = max(minn, data.shape[0])
        data = data.tolist()
        if len(data) < PAD:
            for i in range(PAD - len(data)):
                data.append(0)
        # plt.plot(data)
        # plt.show()
        X.append(np.array(data[1:]).astype(np.float))   # x表示数据
        Y.append(np.array(data[0]).astype(np.int))      # y表示标签
    X = np.array(X)
    Y = np.array(Y)

    dim = 32

    x_train, x_test, y_train, y_test = data_helper.train_test_split(X, Y, 0.1, True)
    x_train, x_test = torch.tensor(x_train).resize_(x_train.shape[0], dim, dim), torch.tensor(x_test).resize_(x_test.shape[0], dim, dim)
    x_train, x_test = x_train.reshape(x_train.shape[0], 1, dim, dim), x_test.reshape(x_test.shape[0], 1, dim, dim)

    xx = torch.from_numpy(x_test).to(device)
    yy = torch.from_numpy(y_test).to(device)

    # print('1----1', x_train, y_train)

    ss = Net_Dataset(x_train, y_train)
    # 使用自定义数据集进行训练
    trainloader = DataLoader(dataset=ss, batch_size=BATCH_SIZE, shuffle=True, num_workers=2, pin_memory=True)

    # define ResNet18
    net = ResNet18().to(device)

    # define loss funtion & optimizer
    criterion = nn.CrossEntropyLoss() # 损失函数
    optimizer = optim.SGD(net.parameters(), lr=LR, momentum=0.9, weight_decay=5e-4) # 优化器
    ###################################### train ###################################
    for epoch in range(pre_epoch, EPOCH):
        print('\nEpoch: %d' % (epoch + 1))
        net.train()
        sum_loss = 0.0
        correct = 0.0
        total = 0.0
        for i, data in enumerate(trainloader, 0):
            # prepare dataset
            length = len(trainloader)
            # print(data)
            inputs, labels = data
            print('input data---------', inputs.shape)
            inputs, labels = inputs.to(device), labels.to(device)
            optimizer.zero_grad()

            # forward & backward
            outputs = net(inputs)
            loss = criterion(outputs, labels)
            loss.backward()
            optimizer.step()

            # print ac & loss in each batch
            sum_loss += loss.item()
            _, predicted = torch.max(outputs.data, 1)
            total += labels.size(0)
            correct += predicted.eq(labels.data).cpu().sum()
            print('[epoch:%d, iter:%d]============Loss: %.03f | Acc: %.3f%% '
                  % (epoch + 1, (i + 1 + epoch * length), sum_loss / (i + 1), 100. * correct / total))

        # get the ac with testdataset in each epoch
        predict = net(xx)
        prediction = torch.argmax(predict, dim=1) == yy
        acc = (prediction.sum().float().cpu().numpy() / x_test.shape[0])  # 测试集的准确率
        print('acc:', acc)
    print('Train has finished, total epoch is %d' % EPOCH)
