import torch
import torchvision
from torch.utils.data import DataLoader
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim

from torch.autograd import Variable
import torchvision.transforms as transforms
from torch.utils.data import Dataset
import numpy as np
import cv2
from net import Net
from netAttention import NetAttention
import matplotlib.pyplot as plt
if __name__ == '__main__':
    batch_size, lr = 8,1e-4
    epoch = 40

    trainset = torchvision.datasets.ImageFolder('./dataset/train',
                                                transform=transforms.Compose([
                                                    # transforms.Resize((224, 224)),
                                                    # 将图片缩放到指定大小（h,w）或者保持长宽比并缩放最短的边到int大小

                                                    # transforms.CenterCrop(224),
                                                    transforms.ToTensor()])
                                                )
    trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size,
                                              shuffle=True, num_workers=1)

    testset = torchvision.datasets.ImageFolder('./dataset/test',
                                                    transform=transforms.Compose([
                                                        # transforms.Resize((32, 32)),  # 将图片缩放到指定大小（h,w）或者保持长宽比并缩放最短的边到int大小
                                                        transforms.ToTensor()])
                                                    )
    testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size,
                                                 shuffle=True, num_workers=1)



    # 神经网络结构
    # net = Net()
    net = NetAttention()
    # device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    device = torch.device('cuda')
    print("device = ", device)
    net.to(device=device)

    # optimizer = optim.SGD(net.parameters(), lr=0.0001, momentum=0.9)
    optimizer = optim.Adam(net.parameters(), lr=lr, betas=(0.9, 0.99))


    criterion = nn.CrossEntropyLoss()

    print("trainloader = ", trainloader)
    loss_list = []
    val_loss_list = []

    for iter in range(epoch+1):
        running_loss = 0.0
        val_loss_sum = 0.0
        net.train()
        for i, data in enumerate(trainloader, 0):
            inputs, labels = data
            inputs, labels = Variable(inputs), Variable(labels)
            optimizer.zero_grad()
            inputs = inputs.to(device)
            labels = labels.to(device)
            outputs = net(inputs)
            loss = criterion(outputs, labels)
            loss.backward()
            optimizer.step()
            running_loss += loss.item()
        with torch.no_grad():
            net.eval()
            for j,val in enumerate(testloader,0):
                val_x,val_y = val
                val_x, val_y = Variable(val_x),Variable(val_y)
                val_x = val_x.to(device)
                val_y = val_y.to(device)
                val_predict = net(val_x)
                val_loss = criterion(val_predict,val_y)
                val_loss_sum += val_loss.item()

        loss_list.append(running_loss)
        val_loss_list.append(val_loss_sum)
        print("epoch:{}, loss:{}, val_loss:{}".format(iter,running_loss,val_loss_sum))
        if (iter%2==0):
            torch.save(net.state_dict(), './model/net_attention'+str(iter)+'.pth')
            # torch.save(net.state_dict(),'./model/net'+str(iter)+'.pth')



    print('Done!')
    # torch.save(net.state_dict(), 'net_attention.pth')
    torch.save(net.state_dict(),'net.pth')
    x = range(0,len(loss_list))
    plt_title = 'BATCH_SIZE = {}; LEARNING_RATE:{}'.format(batch_size, lr)
    print(loss_list)
    f, ax = plt.subplots(1, 2)
    f.suptitle(plt_title)
    plt.figure(1)
    ax1 = plt.subplot(1,2,1)
    ax1.set_title('train loss')
    plt.plot(x, loss_list, '.-')
    plt.figure(1)
    ax2 = plt.subplot(1,2,2)
    ax2.set_title('val loss')
    plt.plot(x, val_loss_list,'.-',color='red')
    plt.savefig('result.png')
    plt.show()





