import os
from torchvision.transforms import transforms
import torch
from torch import nn, optim
from torch.autograd import Variable
from torch.utils.data import DataLoader
import time
from torchvision import transforms
from Final_assignment.CNN import CNN
from Final_assignment.Dataset import MyDataset
from Final_assignment.tools import plot_curve

# device : GPU or CPU

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
batch_size = 20  # 批训练的数量
learning_rate = 0.01
act_fun = "nn.ReLU"  # nn.Sigmoid
transforms_train = transforms.Compose([
    transforms.ToTensor(),  # 将数据转换成Tensor型
    transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])  # 标准化


def train(model, criterion, optimizer, train_loader):  # 训练
    # 训练模型
    epoch = 0
    epoch_list = []
    loss_list = []
    for img, label in train_loader:
        # img:[batch_size, 3, 287, 287],label:[batch_size]
        img = Variable(img)
        img, label = img.to(device), label.to(device)  # GPU加速
        out = model(img)
        loss = criterion(out, label)  # 计算损失
        optimizer.zero_grad()  # 把梯度置零，也就是把loss关于weight的导数变成0.
        loss.backward()  # 反向传播，计算当前梯度；
        optimizer.step()  # 根据梯度更新网络参数
        epoch += 1
        if epoch % 20 == 0:
            print('epoch: {}, loss: {:.4}'.format(epoch, loss.data.item()))
            epoch_list.append(epoch)
            loss_list.append(loss.data.item())

    plot_curve(epoch_list, loss_list, "loss_"+str(learning_rate), "Epoch", "Loss")  # 绘制曲线


def test(model, criterion, test_loader):  # 测试
    # 模型评估
    # model.eval()
    # 在eval模式下，dropout层会让所有的激活单元都通过，而BN层会停止计算和更新mean和var，直接使用在训练阶段已经学出的mean和var值。
    eval_loss = 0
    eval_acc = 0  # 准确个数
    for img, label in test_loader:
        # img:[1000, 1, 28, 28];label:[1000]
        img = Variable(img)
        # tensor不能反向传播，variable可以反向传播。Varibale包含三个属性
        # {data：tensor, grad: 保存data的梯度, grad_fn: 指向Function对象，用于反向传播的梯度计算之用}
        img, label = img.to(device), label.to(device)  # GPU加速
        out = model(img)
        loss = criterion(out, label)
        eval_loss += loss.data.item() * label.size(0)
        value, pred = torch.max(out, 1)  # torch.max()这个函数返回的是两个值，value和index
        num_correct = (pred == label).sum()  # 每一趟（1000张）计算的正确个数
        # print(num_correct.item())
        eval_acc += num_correct.item()  # 累加正确的个数
    return eval_loss, eval_acc


def only_train(train_folder):  # 训练并测试
    print("model is training ...")
    # 创建一个dataset对象
    train_data = MyDataset(train_folder, transform=transforms_train)
    train_loader = DataLoader(train_data, batch_size=batch_size, shuffle=True, num_workers=0)  # 数据加载器

    time1, time2 = 0, 0
    time1 = time.time()  # 开始计时

    print("learning_rate:{}".format(learning_rate))
    print("Activation fun:{}".format(act_fun))
    model = CNN(act_fun)  # 创建一个CNN网络对象
    model.to(device)
    # 定义损失函数和优化器
    criterion = nn.CrossEntropyLoss()  # 交叉熵损失函数
    optimizer = optim.SGD(model.parameters(), lr=learning_rate)  # SGD优化算法

    train(model, criterion, optimizer, train_loader)  # 训练
    # print(os.path.join("model", "model_" + "train_folder" + ".pkl"))
    # torch.save(model, "model/model_"  + ".pkl")  # 保存模型
    # torch.save(model, "model/model.pkl")
    # torch.save(model.state_dict(), "model/model.pkl")
    # torch.save(model.state_dict(), os.path.join("model", "model_" + "train_folder" + ".pth"))
    # torch.save(model, "model_train_folder.pth")  # 保存模型
    time2 = time.time()
    print("train time using (minutes): ", (time2 - time1) / 60)
    return model


def only_test(model_name, test_folder, model):  # 仅测试
    print("model is testing ...")
    # 创建一个dataset对象
    test_data = MyDataset(test_folder, transform=transforms_train)
    test_loader = DataLoader(test_data, batch_size=batch_size, shuffle=True, num_workers=0)  # 数据加载器
    # model = torch.load(model_name)  # 加载模型

    # model = CNN(act_fun)  # 创建一个CNN网络对象
    # # model = model.load_state_dict(torch.load(model_name))
    # model.load_state_dict(torch.load(model_name))
    # model.eval()  # 当用于inference时不要忘记添加
    # model.load_state(model_name)
    criterion = nn.CrossEntropyLoss()  # 交叉熵损失函数
    eval_loss, eval_acc = test(model, criterion, test_loader)  # 测试
    loss = eval_loss / (len(test_loader) * batch_size)
    acc = eval_acc / (len(test_loader) * batch_size)  # 正确率
    print('Test Loss: {:.6f}, Acc: {:.6f}'.format(loss, acc))
