'''数据预处理部分'''
# 1 数据增强：torchvision中transforms模块自带功能，比较实用
# 2 数据预处理：torchvision中transforms也帮我们实现好了，直接调用即可
# 3 DataLoader模块直接读取batc数据
'''网络模块设置'''
# 1 加载与训练模型，torchvision中有很多经典的架构，调用十分方便，并且可以用人家训练好的权重继续训练，
# 也就是所谓的迁移学习
# 2 需要注意的时候别人训练好的任务跟咱们的不完全一样，需要把最后的head层改一下，
# 一般也就是最后的全连接层，改成咱们自己的任务
# 3 训练时可以全部重头训练，也可以只训练最后咱们的任务层，以为前几层都是做特征提取的，
# 本质任务目标是一致的
'''网络模型保存与测试'''
# 1 模型保存的时候可以带有选择性，例如在验证集中如果当前效果好则保存
# 2 读取模型进行实际测试

'''下面的程序是对花进行分类的一个程序'''
import os
import matplotlib.pyplot as plt

import numpy as np
import torch
from torch import nn
import torch.optim as optim
from torchvision import transforms, models, datasets
import time
import copy
import json

'''数据读取与预处理操作'''
data_dir = './flower_data'
train_dir = data_dir + './train'
valid_dir = data_dir + './valid'

'''制作好数据源'''
# data_transforms中指定了所有图像预处理操作
# ImageFolder假设所有的文件按文件夹保存好，每个文件夹下面保存同一类别的图片，文件夹的名字为分类的名字
data_transforms = {
    'train':
        transforms.Compose([
            transforms.RandomRotation(45),# 随机旋转，±45度之间随机选择
            transforms.CenterCrop(224),# 从中心开始裁剪
            transforms.RandomHorizontalFlip(p=0.5),# 随机水平翻转，选择一个概率
            transforms.RandomVerticalFlip(p=0.5),# 随机垂直翻转
            transforms.ColorJitter(brightness=0.2, contrast=0.1, saturation=0.1, hue=0.1),# 参数1位亮度，参数2 为对比度，参数3位饱和度，参数4位色相
            transforms.RandomGrayscale(p=0.025),# 概率转换成灰度率，
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])# 均值，标准差
        ]),
    'valid':
        transforms.Compose([
            transforms.Resize(256),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        ])
}

batch_size = 8
# 先把当前数据的一个结构读进来，x: 后面的两个参数， 第一个就是实际的路劲，第二个就是需要预处理的流程
# 构建数据集
image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x), data_transforms[x]) for x in ['train', 'valid']}
# 第二步就是取数据了，需要传入数据集
dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=batch_size, shuffle=True) for x in ['train', 'valid']}
dataset_size = {x: len(image_datasets[x]) for x in ['train', 'valid']}
class_name = image_datasets['train'].classes

'''读取标签对应实际名字'''
with open('cat_to_name.json', 'r') as f:
    cat_to_name = json.load(f)

'''展示一下数据'''
# 注意tensor的数据需要转换成numpy的格式，而且需要还原回标准化的结果
def im_convert(tensor):
    '''展示数据'''
    image = tensor.to('cpu').clone().detach()
    image = image.numpy().squeeze()
    image = image.transpose(1,2,0) # 将颜色通道还原回去
    image = image*np.array((0.229, 0.224, 0.225)) + np.array((0.485, 0.456, 0.406)) # 还原
    image = image.clip(0, 1)

    return image

fig = plt.figure(figsize=(20, 12))
columns = 4
rows = 2
dataiter = iter(dataloaders['valid'])
inputs, classes = dataiter.next()
for idx in range(columns*rows):
    ax = fig.add_subplot(rows, columns, idx+1, xticks=[], yticks=[])
    ax.set_title(cat_to_name[str(int(class_name[classes[idx]]))])
    plt.imshow(im_convert(inputs[idx]))
# 这里先不展示了，需要的时候取消注释即可
# plt.show()

'''加载model中提供的模型，并直接用训练好的权重当做初始化参数'''
# 可以选择的比较多：['resnet', 'alexnet', 'vgg', 'squeezenet', 'densenet', 'inception']
model_name = 'resnet'
feature_extract = True # 是否用人家训练好 的特征来做

#是否使用GPU进行训练
train_on_gpu = torch.cuda.is_available()
if not train_on_gpu:
    print('CUDA is not available.   Training on CPU...')
else:
    print('CUDA is available!   Training on GPU...')
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

def set_parameter_requires_grad(model, feature_extracting):
    '''将模型中指定的层冻结，不作训练'''
    if feature_extracting:
        for param in model.parameters():
            param.requires_grad = False

#model_ft = models.resnet152()
#print(model_ft) # 打印一下model模型结构


'''参考pytorch官网的例子'''
def initialize_model(model_name, num_classes, feature_extract, use_pretrained=True):
    '''初始化网络'''
    # 选择合适的模型，不同模型的初始化方法稍微有点区别
    model_ft = None
    input_size = 0

    if model_name == 'resnet':
        '''Resnet152'''
        model_ft = models.resnet152(pretrained=use_pretrained)
        set_parameter_requires_grad(model_ft, feature_extract) # 冻结某些层
        num_ftrs = model_ft.fc.in_features # 拿到网络中的最后一层的全连接神经元个数
        model_ft.fc = nn.Sequential(nn.Linear(num_ftrs, 102), # 将原先的2048个全连接换成102个
                                    nn.LogSoftmax(dim=1))
        inputs_size = 224

    elif model_name == 'alexnet':
        """Alexnet"""
        pass

    elif model_name == 'vgg':
        """VGG11_bn"""
        pass


    elif model_name == 'squeezenet':
        """squeezenet"""
        pass

    elif model_name == 'densenet':
        """densenet"""
        pass

    elif model_name == 'inception':
        """inception"""
        pass

    else:
        print("Invalid model name, exiting...")
        exit()

    return model_ft, inputs_size

'''设置那些层需要训练'''
model_ft, inputs_size = initialize_model(model_name, 102, feature_extract, use_pretrained=True)

# GPU计算
model_ft = model_ft.to(device)

# 模型保存
filename = 'checkpoint.pth'

# 是否训练所有层
params_to_update = model_ft.parameters()
print("Params to learn:")
if feature_extract:
    params_to_update = []
    for name, param in model_ft.named_parameters():
        if param.requires_grad == True:
            params_to_update.append(param)
            print('\t', name)
else:
    for name, param in model_ft.named_parameters():
        if param.requires_grad == True:
            print("\t", name)

print(model_ft)

'''优化器设置'''
# 优化器
optimizer_ft = optim.Adam(params_to_update, lr=1e-2)
scheduler = optim.lr_scheduler.StepLR(optimizer_ft, step_size=7, gamma=0.1) #学习率每七个epoch衰减成原来的0.1
# 最后一层已经LogSoftmax()了，所以不能nn.CrossEntropyLoss()来计算，nn.Cross()相当于logSoftmax()和nn.NLLLoss()整合
criterion = nn.NLLLoss()

'''训练模块'''
def train_model(model, dataloaders, criterion, optimizer, num_epoch=25, is_inception=False, filename=filename):
    since = time.time()
    best_acc = 0
    model.to(device)

    val_acc_history = []
    train_acc_history = []
    train_losses = []
    valid_losses = []
    LRs = [optimizer.param_groups[0]['lr']]

    best_model_wts = copy.deepcopy(model.state_dict())

    for epoch in range(num_epoch):
        print('Epoch {}/{}'.format(epoch, num_epoch-1))
        print('-'*10)

        # 训练和验证
        for phase in ['train','valid']:
            if phase == 'train':
                model.train() # 训练
            else:
                model.eval()    # 验证

            running_loss = 0.0
            running_corrects = 0

            # 把数据都取个遍
            for inputs, labels in dataloaders[phase]:
                inputs = inputs.to(device)
                labels = labels.to(device)

                # 清零
                optimizer.zero_grad()
                # 只有训练的时候计算和更新梯度
                with torch.set_grad_enabled(phase=='train'):
                    if is_inception and phase == 'train':
                        outputs, aux_outputs = model(inputs)
                        loss1 = criterion(outputs, labels)
                        loss2 = criterion(aux_outputs, labels)
                        loss = loss1 + 0.4*loss2
                    else: # resnet执行的是这里，上面一块if是不会执行的
                        outputs = model(inputs)
                        loss = criterion(outputs, labels)

                    _, preds = torch.max(outputs, 1)

                    # 训练阶段更新权重
                    if phase == 'train':
                        loss.backward()
                        optimizer.step()

                # 计算损失
                running_loss += loss.item()*inputs.size(0)
                running_corrects += torch.sum(preds == labels.data)

            epoch_loss = running_loss / len(dataloaders[phase].dataset)
            epoch_acc = running_corrects.double() / len(dataloaders[phase].dataset)

            time_elapsed = time.time() - since
            print('Time elapsed {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))
            print('{} Loss: {:.4f}\tACC: {:.4f}'.format(phase, epoch_loss, epoch_acc))

            # 得到最好那次的模型
            if phase == 'valid' and epoch_acc > best_acc:
                best_acc = epoch_acc
                best_model_wts = copy.deepcopy(model.state_dict()) # 将当前模型的所有参数、权重copy过来
                state = {
                    'state_dict': model.state_dict(),
                    'best_acc': best_acc,
                    'optimizer': optimizer.state_dict(),
                }
                torch.save(state, filename)
            if phase == 'valid':
                val_acc_history.append(epoch_acc)
                valid_losses.append(epoch_acc)
                scheduler.step(epoch_acc)
            if phase == 'train':
                train_acc_history.append(epoch_acc)
                train_losses.append(epoch_loss)

        print('Optimizer learning rate : {:.7f}'.format(optimizer.param_groups[0]['lr']))
        LRs.append(optimizer.param_groups[0]['lr'])
        print()

    time_elapsed = time.time() - since
    print('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))
    print('Best val Acc: {:4f}'.format(best_acc))

    # 训练完后用最好的一次当做模型最终结果
    model.load_state_dict(best_model_wts)
    return model, val_acc_history, train_acc_history, valid_losses, train_losses, LRs

'''开始训练'''
model_ft, val_acc_history, train_acc_history, valid_losses, train_losses, LRs = train_model(
    model_ft,
    dataloaders,
    criterion,
    optimizer_ft,
    num_epoch=20,
    is_inception=(model_name=='inception')
)