# -*- coding: utf-8 -*-
"""
@file name      : finetune_resnet18.py
@author         : QuZhang
@date           : 2020-1-2 15:25
@brief          : 莫得finetune方法
"""
import os
from tools.common_tools import set_seed
import torch
from torchvision.transforms import transforms
from tools.my_dataset import AntsDataset
from torch.utils.data import DataLoader
import torchvision.models as models
import torch.nn as nn
import torch.optim as optim
import numpy as np
from matplotlib import pylab as plt


set_seed(1)
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print("use device ：{}".format(device))
label_name = {"ants": 0, "bees": 1}

if __name__ == "__main__":
    # 超参数设置
    max_epoch = 25
    batch_size = 16
    lr = 0.001
    log_interval = 10
    val_interval = 1
    classes = 2
    start_epoch = -1
    lr_decay_step = 7

    # ------------ step 1/5 数据 --------------
    data_dir = os.path.abspath(os.path.join(BASE_DIR, '..', "..", "data", "hymenoptera_data"))
    if not os.path.exists(data_dir):
        raise Exception("\n{} 不存在，请下载 07-02-数据-模型finetune.zip 放到\n{}下，并解压即可".
                        format(data_dir, os.path.dirname(data_dir)))

    train_dir = os.path.join(data_dir, "train")
    valid_dir = os.path.join(data_dir, "val")

    norm_mean = [0.485, 0.456, 0.406]
    norm_std = [0.229, 0.224, 0.225]
    train_transform = transforms.Compose([
        transforms.RandomResizedCrop(224),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize(norm_mean, norm_std),
    ])
    valid_transform = transforms.Compose([
        transforms.Resize(256),
        transforms.CenterCrop(224),
        transforms.ToTensor(),
        transforms.Normalize(norm_mean, norm_std),
    ])

    train_data = AntsDataset(data_dir=train_dir, transform=train_transform)
    valid_data = AntsDataset(data_dir=valid_dir, transform=valid_transform)

    train_loader = DataLoader(dataset=train_data, batch_size=batch_size, shuffle=True)
    valid_loader = DataLoader(dataset=valid_data, batch_size=batch_size)

    # ------------ step 2/5 模型 --------------
    # 1/3 构建模型
    resnet18_ft = models.resnet18()

    # 2/3 加载参数
    flag_pre = True  # 迁移学习
    # flag_pre = False
    if flag_pre:
        path_pretrained_model = os.path.join(BASE_DIR, '..', '..', "data", "finetune_resnet18-5c106cde.pth")
        if not os.path.exists(path_pretrained_model):
            raise Exception("\n{} 不存在，请下载 07-02-数据-模型finetune.zip 放到\n{}下，并解压即可".
                            format(path_pretrained_model, os.path.dirname(path_pretrained_model)))
        state_dict_load = torch.load(path_pretrained_model)
        resnet18_ft.load_state_dict(state_dict_load)

    # 训练的当前数据集的数据量少，为了防止当前数据集对之前训练的预训练权重产生较大影响,应该尽量少的更新预训练权重
    # 法1：冻结卷积层，已经训练好的卷积层参数不更新
    # flag_m1 = True
    flag_m1 = False
    if flag_m1:
        for param in resnet18_ft.parameters():
            param.requires_grad = False
        print("conv1.weight[0, 0, ...]:\n {}".format(resnet18_ft.conv1.weight[0, 0, ...]))

    # 3/3 替换FC层：不改变输入特征数，修改输出特征数
    num_ftrs = resnet18_ft.fc.in_features  # 原全连接层的输入特征图数
    resnet18_ft.fc = nn.Linear(num_ftrs, classes)  # 修改原来的全连接层

    resnet18_ft.to(device)  # 指定模型运行的设备

    # -------------- step 3/5 损失函数 --------------
    criterion = nn.CrossEntropyLoss()

    # -------------- step 4/5 优化器 ----------------
    # 法2：conv 小学习率
    flag = True
    # flag = False
    if flag:
        # 方法：将卷积层和全连接层分开，作为不同的参数组,实现卷积层用小学习率
        # 1. 获取全连接层参数的内存地址
        fc_params_id = list(map(id, resnet18_ft.fc.parameters()))  # 返回parameters的内存地址
        # 2.获取卷积层参数
        base_params = filter(lambda p: id(p) not in fc_params_id, resnet18_ft.parameters())
        # 优化器管理参数组
        optimizer = optim.SGD([
            {'params': base_params, 'lr': lr*0.1},
            # {"params": base_params, lr: lr*0}, # 冻结卷积层
            {'params': resnet18_ft.fc.parameters(), 'lr': lr}], momentum=0.9)
    else:
        optimizer = optim.SGD(resnet18_ft.parameters(), lr=lr, momentum=0.9)

    lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=lr_decay_step, gamma=0.1)

    # ------------ step 5/5 训练 ----------------
    train_curve = list()
    valid_curve = list()

    for epoch in range(start_epoch+1, max_epoch):
        loss_mean_train = 0.
        correct_train = 0.
        total_train = 0

        resnet18_ft.train() # 训练状态
        for i, data in enumerate(train_loader):
            inputs, labels = data
            inputs, labels = inputs.to(device), labels.to(device)  # 数据放到指定设备
            outputs = resnet18_ft(inputs)

            optimizer.zero_grad()
            loss = criterion(outputs, labels)
            loss.backward()

            optimizer.step()

            # 统计分类情况
            _, predicted = outputs.data.max(1)  # 获取每行的最大值和下标
            total_train += labels.size(0)
            correct_train += (predicted == labels).squeeze().cpu().sum().numpy()

            # 打印训练信息
            loss_mean_train += loss.item()
            train_curve.append(loss.item())
            if (i+1) % log_interval == 0:
                loss_mean_train /= (i+1)
                # 损失保留小数点4位，准确率用百分比表示(保留2位小数点)
                print("Training:Epoch[{:0>3}/{:0>3}] Iteration:[{:0>3}/{:0>3}] Loss:{:.4f} Acc:{:.2%}".format(
                    epoch+1, max_epoch, i+1, len(train_loader), loss_mean_train, correct_train/total_train,
                ))


        lr_scheduler.step()  # 达到学习率调整条件，调整学习率

        # validate the model
        if (epoch+1) % val_interval == 0:
            resnet18_ft.eval()
            correct_val = 0.
            loss_val = 0.
            total_val = 0
            with torch.no_grad():
                for j, data in enumerate(valid_loader):
                    inputs, labels = data
                    inputs, labels = inputs.to(device), labels.to(device)

                    outputs = resnet18_ft(inputs)
                    loss = criterion(outputs, labels)

                    _, predicted = torch.max(outputs.data, 1)
                    total_val += labels.size(0)
                    correct_val += (predicted == labels).squeeze().cpu().sum().numpy()

                    loss_val += loss.item()

                # 验证集跑完一个epoch
                loss_val_mean = loss_val / len(valid_loader)
                valid_curve.append(loss_val_mean)
                print("Valid:\tEpoch[{:0>3}/{:0>3}] Iteration[{:0>3}/{:0>3}] Loss:{:.4f} Acc:{:.2%}".format(
                    epoch+1, max_epoch, j+1, len(valid_loader), loss_val_mean, correct_val/total_val
                ))


    train_x = range(len(train_curve))
    train_y = train_curve

    train_iters = len(train_loader)
    valid_x = np.arange(1, len(valid_curve)+1) * val_interval * train_iters  # 一个点就是一个epoch
    valid_y = valid_curve

    plt.plot(train_x, train_y, label='Train')
    plt.plot(valid_x, valid_y, label="Valid")

    plt.legend(loc="upper right")
    plt.ylabel("loss value")
    plt.xlabel("Iteration")
    plt.show()
