#!/usr/bin/env python3
# https://www.cnblogs.com/silence-cho/p/11404817.html
# https://www.cnblogs.com/lianshuiwuyi/p/11179473.html
from __future__ import print_function, division

import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import torchvision
from torchvision import datasets, models, transforms
from PIL import  Image
import matplotlib.pyplot as plt
import time
import os
import copy
# 训练数据的扩充及标准化
# 只进行标准化验证

data_transforms = {
    # 训练集
    'train': transforms.Compose([
        # 裁剪放缩成指定大小224*224*3，该操作的含义在于：即使只是该物体的一部分，我们也认为这是该类物体；
        transforms.RandomResizedCrop(224),
        # 以给定的概率随机镜像给定的PIL的图像，默认为0.5
        transforms.RandomHorizontalFlip(),
        # 将给定图像转为Tensor
        transforms.ToTensor(),
        # 归一化处理 参数什么意思？   Ans: 根据模型的不同，标准化参数不一定相同，resNet采用的是如下的参数结构
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ]),
    # 验证集
    'val': transforms.Compose([
        # 按照比例把图像最小的一个边长放缩到256，另一边按照相同比例放缩。
        transforms.Resize(256),
        # 在图片的中间区域进行裁剪 224*224
        transforms.CenterCrop(224),
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ])
}
# 定义数据文件路径
data_dir = 'data/hymenoptera_data'

# 获取路径下的训练集和验证集，进行图片处理
image_datasets = {x: datasets.ImageFolder(os.path.join(
    data_dir, x), data_transforms[x]) for x in ['train', 'val']}

# 加载数据集合 ------- 数据输入的集合
dataloaders = {x: torch.utils.data.DataLoader(
    image_datasets[x], batch_size=4, shuffle=True, num_workers=4) for x in ['train', 'val']}
# 根据数据集里头的数据给size赋值
dataset_size = {x: len(image_datasets[x]) for x in ['train', 'val']}

# 定义训练类别，根据项目文件夹下的分类来
class_name = image_datasets['train'].classes

# 设置是哪种训练设备，cpu获或gpu
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')


# 数据可视化函数 plt
def imshow(inp, title=None):
    inp = inp.numpy().transpose((1, 2, 0))
    mean = np.array([0.485, 0.456, 0.406])
    std = np.array([0.229, 0.224, 0.225])
    inp = std * inp + mean
    inp = np.clip(inp, 0, 1)
    plt.imshow(inp)
    if title is not None:
        plt.title(title)
    plt.pause(10)    # 暂停一会，以便更新绘图


# 训练模型
def train_model(model, criterion, optimizer, schduler, num_epochs=25):
    # 获取当前系统的时间戳
    since = time.time()

    # 深拷贝模型的参数
    best_model_wts = copy.deepcopy(model.state_dict())
    # 学习率
    best_acc = 0.0

    for epoch in range(num_epochs):
        print('Epoch {}/{}'.format(epoch, num_epochs-1))
        print('-'*10)

        for phase in ['train', 'val']:
            if phase == 'train':
                schduler.step()
                model.train()   # 训练模型
            else:
                model.eval()    # 评估模型

            running_loss = 0.0
            running_corrects = 0

            for inputs, labels in dataloaders[phase]:
                inputs = inputs.to(device)
                labels = labels.to(device)

                # 零化参数梯度
                optimizer.zero_grad()

                # 前向传递
                # 只对训练集，追踪历史，进行求导
                with torch.set_grad_enabled(phase == 'train'):
                    outputs = model(inputs)
                    # 前向传递获取到预测值preds
                    _, preds = torch.max(outputs, 1)
                    loss = criterion(outputs, labels)

                    # 训练时，反向传播 + 优化
                    if phase == 'train':
                        loss.backward()
                        optimizer.step()

                # 统计
                running_loss += loss.item() * inputs.size(0)
                running_corrects += torch.sum(preds == labels.data)

            epoch_loss = running_loss / dataset_size[phase]
            epoch_acc = running_corrects.double() / dataset_size[phase]

            print('{} Loss: {:.4f} Acc: {:.4f}'.format(
                phase, epoch_loss, epoch_acc))

            # 深拷贝模型
            if phase == 'val' and epoch_acc > best_acc:
                best_acc = epoch_acc
                best_model_wts = copy.deepcopy(model.state_dict())

        print()

    time_elapsed = time.time() - since
    print('Training complete in {:.0f}m {:.0f}s'.format(
        time_elapsed // 60, time_elapsed % 60))
    print('Best val Acc: {:4f}'.format(best_acc))

    # 导入最优模型权重
    model.load_state_dict(best_model_wts)
    return model


if __name__ == '__main__':
    inputs, classes = next(iter(dataloaders['train']))
    # 获取一批训练数据
    # 从批处理中生成图像网格 只是为了显示而已
    # out = torchvision.utils.make_grid(inputs)
    # imshow(out, title=[class_name[x] for x in classes])

    # 使用resnet18模型
    model_conv = models.resnet18(pretrained=True)
    for param in model_conv.parameters():
        param.requires_grad = False
    # 输入特征矩阵的图像深度
    num_ftrs = model_conv.fc.in_features
    # 载入更改过类别数量的全连接层
    model_conv.fc = nn.Linear(num_ftrs, 8)
    model_conv = model_conv.to(device)

    # 交叉熵函数
    criterion = nn.CrossEntropyLoss()

    # 优化所有参数
    optimizer_ft = optim.SGD(model_conv.parameters(), lr=0.001, momentum=0.9)

    # 没7次，学习率衰减0.1
    exp_lr_scheduler = torch.optim.lr_scheduler.StepLR(
        optimizer_ft, step_size=7, gamma=0.1)
    model_ft = train_model(model_conv, criterion, optimizer_ft,
                           exp_lr_scheduler, num_epochs=20)
    print('Training Finish')
    save_path = './eightType.pkl'
    torch.save(model_ft, save_path)
