#VGG_16

import shutil
import os
import torch
import torch.nn as nn
from matplotlib import pyplot as plt
from torchvision import models, transforms, datasets
import torch.utils.data
import xlwt


from torch.autograd import Variable

# 训练模型
'''
model:训练的模型
dataloader：训练集
size：训练集大小
epochs：训练次数
optimizer：优化器
'''
def train_model(device, model, dataloader, size, epochs, criterion, optimizer):

    model.train()  #保证模型可以通过训练修改权重等
    loss1 = []
    acc1 = []
    for epoch in range(epochs):
        running_loss = 0.0
        running_corrects = 0
        for inputs, classes in dataloader:
            inputs = inputs.to(device)
            classes = classes.to(device)
            outputs = model(inputs)#参数向前传播
            print("tezheng")
            print(outputs)

            loss = criterion(outputs, classes)
            optimizer.zero_grad()#梯度归零,不清零的话梯度会与上一个batch相关
            loss.backward()#反向传播计算得到每个参数的梯度值
            optimizer.step()#通过梯度下降执行一步参数更新
            _, preds = torch.max(outputs.data, 1)#下划线相当于一个变量，比如定义变量a，不加下划线表示返回一行中最大的数，加下划线表示返回一行中最大数的索引
            running_loss += loss.data.item()#.item（）的方法是得到一个一个元素张量里面的元素值，具体就是将一个零维张量转换成浮点数
            running_corrects += torch.sum(preds == classes.data)

        epoch_loss = running_loss / size
        epoch_acc = running_corrects.data.item() / size
        loss1.append(epoch_loss)
        acc1.append(epoch_acc)

        plt.plot(acc1, c='r', ls='-', marker='o', mec='b', mfc='w')
        plt.xlabel('epoch')
        plt.ylabel('Acc')
        fig = plt.gcf()
        plt.pause(0.1)
        #fig.savefig('E:\\video\\Epoch_acc_img\\acc-{}.eps'.format(epoch + 1),dpi=600)
        fig.savefig('D:\\Program Files\\food101\\Epoch_acc_img\\acc-{}.jpg'.format(epoch + 1))
        plt.close()
        plt.plot(loss1, c='r', ls='-', marker='o', mec='b', mfc='w')
        plt.xlabel('epoch')
        plt.ylabel('Loss')
        fig = plt.gcf()
        plt.pause(0.1)
        #fig.savefig('E:\\video\\Epoch_acc_loss\\loss-{}.eps'.format(epoch + 1),dpi=600)
        fig.savefig('D:\\Program Files\\food101\\Epoch_loss_img\\loss-{}.jpg'.format(epoch + 1))
        plt.close()

        print('Loss: {:.4f} Acc: {:.4f}'.format(epoch_loss, epoch_acc))
    print('Finish train')
    return  loss1,acc1



def val_model(device, model, dataloader, size, criterion):
    model.eval()  #保证输入的数据不会修改修炼好的模型，将之前训练过的模型参数固定，仅仅用来测试
    running_loss = 0.0
    running_corrects = 0
    for inputs, classes in dataloader:
        inputs = inputs.to(device)
        classes = classes.to(device)
        outputs = model(inputs)
        loss = criterion(outputs, classes)
        _, preds = torch.max(outputs.data, 1)
        running_loss += loss.data.item()
        running_corrects += torch.sum(preds == classes.data)
    epoch_loss = running_loss / size
    epoch_acc = running_corrects.data.item() / size
    print('Loss: {:.4f} Acc: {:.4f}'.format(epoch_loss, epoch_acc))
    print('Finish validate')
    torch.save(model, 'D:\\Program Files\\food101\\model1.pth')
    del model.classifier._modules['5']
    del model.classifier._modules['6']
    del model.classifier._modules['7']
    torch.save(model, 'D:\\Program Files\\food101\\model2.pth')

def test_model(device, path, dataloader):
    model = torch.load(path)
    model.eval()
    total_preds = []
    for inputs, classes in dataloader:
        inputs = inputs.to(device)
        outputs = model(inputs)
        _, preds = torch.max(outputs.data, 1)
        total_preds.extend(preds)

    with open("D:\\Program Files\\food101\\result.csv", 'a+') as f:
        for i in range(27):
            f.write("{},{}\n".format(i, total_preds[i]))
def main():

    # 判断是否存在GPU设备
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    # 数据存放目录
    data_dir = 'D:\\Program Files\\food101\\image'

    # 在train数据集中创建cats和dogs子文件夹，方便ImageFolder的读取
    train_dir = os.path.join(data_dir, 'train') #用于路径拼接文件路径
    # train_Working_conditions_1_dir = os.path.join(train_dir, 'cats')
    # train_Working_conditions_2_dir = os.path.join(train_dir, 'dog2')


    list_dir = os.listdir(train_dir)#列出路径下所有的文件
    # 在val数据集中创建cats和dogs子文件夹，方便ImageFolder的读取
    val_dir = os.path.join(data_dir, 'val')
    # val_Working_conditions_1_dir = os.path.join(val_dir, 'cats')
    # val_Working_conditions_2_dir = os.path.join(val_dir, 'dogs')


    list_dir = os.listdir(val_dir)
    # 在test数据集中创建raw子文件夹，方便ImageFolder的读取
    test_dir = os.path.join(data_dir, 'test')
    test_raw_dir = os.path.join(test_dir, 'raw')
    list_dir = os.listdir(test_dir)


    # 读取三个数据集
    transform = transforms.Compose([
        transforms.CenterCrop(224),#在图片的中间区域进行裁剪
        transforms.ToTensor(),#将PIL Image或者 ndarray 转换为tensor，并且归一化至[0-1]
        transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),#用均值和标准差归一化张量图像
    ])

    data_sets = {'train': datasets.ImageFolder(train_dir, transform),
                 'val': datasets.ImageFolder(val_dir, transform),
                 'test': datasets.ImageFolder(test_dir, transform)}

    # data_sets的行数
    data_set_sizes = {x: len(data_sets[x]) for x in ['train', 'val', 'test']}
    print(data_set_sizes['train'])
    print(data_set_sizes['val'])
    print(data_set_sizes['test'])
    # 三个数据集的loader
    train_loader = torch.utils.data.DataLoader(data_sets['train'],
                                               batch_size=500,#批训练数据量的大小，根据具体情况设置即可
                                               shuffle=True,#是否打乱数据，一般在训练数据中会采用。
                                               num_workers=8)#这个参数必须大于等于0，为0时默认使用主线程读取数据，其他大于0的数表示通过多个进程来读取数据，可以加快数据读取速度，一般设置为2的N次方，且小于batch_size（默认：0）
    val_loader = torch.utils.data.DataLoader(data_sets['val'],
                                             batch_size=500,
                                             shuffle=False,
                                             num_workers=8)
    test_loader = torch.utils.data.DataLoader(data_sets['test'],
                                              batch_size=5,
                                              shuffle=False,
                                              num_workers=2)

    # 根据VGG16创建新模型
    new_model = models.vgg16(pretrained=True)

    # 固定前面15层的参数
    for param in new_model.parameters():
        param.requires_grad = False#false冻结参数，

    # 改变原来的16层
    #new_model.classifier._modules['6'] = nn.Linear(4096, 2)
    #new_model.classifier._modules['0'] = nn.Linear(25088,4096)
    new_model.classifier._modules['3'] = nn.Linear(4096, 2048)
    new_model.classifier._modules['6'] = nn.Linear(2048, 101)
    new_model.classifier._modules['7'] = nn.LogSoftmax(dim=1)

    new_model = new_model.to(device)
    print(new_model)

    # 创建损失函数和优化器
    # 损失函数 NLLLoss() 的 输入 是一个对数概率向量和一个目标标签.
    # 它不会为我们计算对数概率，适合最后一层是log_softmax()的网络.
    criterion = nn.NLLLoss()

    # 学习率
    lr = 0.001

    # 随机梯度下降
    optimizer = torch.optim.SGD(new_model.classifier[6].parameters(), lr=lr)

    # 模型训练
    train_model(device, new_model, train_loader, data_set_sizes['train'], 40, criterion, optimizer)

    # 模型验证
    val_model(device, new_model, val_loader, data_set_sizes['val'], criterion)

    # 模型测试
    test_model(device, 'D:\\Program Files\\food101\\model1.pth', test_loader)



if __name__ == '__main__':
    main()