# -*- coding: utf-8 -*-
"""
@file name      : alexnet_loss_acc.py
@author         : QuZhang
@date           : 2021-1-1
@brief          : 监控alexnet训练二分类的loss和acc
"""
from tools.common_tools import set_seed
import os
from torchvision.transforms import transforms
from tools.my_dataset import RMBDataset
from torch.utils.data import DataLoader
import torchvision.models as models
import torch.nn as nn
import torch.optim as optim
from torch.utils.tensorboard import SummaryWriter
import torch
import numpy as np
import matplotlib.pyplot as plt
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"


BASE_DIR = os.path.dirname(os.path.abspath(__file__))
set_seed(1)
rmb_label = {"1": 0, "100": 1}

if __name__ == "__main__":
    # 超参数设置
    MAX_EPOCH = 10
    BATCH_SIZE = 16
    LR = 0.01
    log_interval = 10  # 训练集一个epoch大概读取数据的次数
    val_interval = 1

    # -------------- step 1/5 数据 --------------
    split_dir = os.path.abspath(os.path.join(BASE_DIR, '..', "data", "rmb_split"))
    if not os.path.exists(split_dir):
        raise Exception(r"数据 {} 不存在, 回到lesson-06\1_split_dataset.py生成数据".format(split_dir))
    train_dir = os.path.join(split_dir, "train")
    valid_dir = os.path.join(split_dir, "valid")

    norm_mean = [0.485, 0.456, 0.406]
    norm_std = [0.229, 0.224, 0.225]

    # 遵守alexnet网络的输入尺寸大小的规则
    train_transform = transforms.Compose([
        transforms.Resize(256),
        transforms.RandomCrop(224,),
        transforms.RandomGrayscale(p=0.8),
        transforms.ToTensor(),
        transforms.Normalize(mean=norm_mean, std=norm_std),
    ])

    valid_transform = transforms.Compose([
        transforms.Resize((224, 224)),
        transforms.ToTensor(),
        transforms.Normalize(mean=norm_mean, std=norm_std),
    ])

    train_data = RMBDataset(data_dir=train_dir, transform=train_transform)
    valid_data = RMBDataset(data_dir=valid_dir, transform=valid_transform)

    train_loader = DataLoader(dataset=train_data, batch_size=BATCH_SIZE, shuffle=True)
    valid_loader = DataLoader(dataset=valid_data, batch_size=BATCH_SIZE)

    # ---------------- step 2/5 模型 ------------------
    alexnet = models.alexnet(num_classes=2)

    # ---------------- step 3/5 损失函数 ---------------
    loss_func = nn.BCEWithLogitsLoss()

    # --------------- step 4/5 优化器 ---------------
    optimizer = optim.Adam(alexnet.parameters(), lr=LR)
    lr_scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.1, verbose=True)

    # ---------------- step 5/5 训练 ----------------
    train_curve = list()
    valid_curve = list()
    iter_count = 0
    log_dir = "./alexnet_loss_acc"
    writer = SummaryWriter(log_dir=log_dir, comment="loss_acc", filename_suffix="nb")

    for epoch in range(MAX_EPOCH):

        loss_mean = 0.
        correct = 0.
        total = 0.
        alexnet.train()
        for i, data in enumerate(train_loader):
            iter_count += 1
            inputs, labels = data
            outputs = alexnet(inputs)
            _, predictions = torch.max(outputs, 1)  # 每行的最大值和最大值对应的细胞,下标表示类别
            # print("labels shape:{}".format(labels.shape))
            # print("outputs shape:{}".format(outputs.shape))
            # print("predictions shape :", predictions.shape)

            optimizer.zero_grad()  # 梯度清零
            predictions = torch.tensor(predictions, dtype=torch.float, requires_grad=True)
            labels = torch.tensor(labels, dtype=torch.float)
            loss = loss_func(predictions, labels)
            loss.backward()
            optimizer.step()  # 更新参数

            # 统计分类情况
            total += labels.size(0)
            correct += (predictions == labels).squeeze().sum().numpy()  # 正确的个数

            loss_mean += loss.item()
            train_curve.append(loss.item())  # 记录此次loss值

            # 一个epoch
            if (i+1) % log_interval == 0:
                loss_mean /= log_interval
                print("Training:Epoch[{:0>3}/{:0>3}] Iteration:[{:0>3}/{:0>3}] Loss: {:.4f} Acc:{:.2%}".format(
                    epoch+1, MAX_EPOCH, iter_count, len(train_loader), loss_mean, correct/total
                ))
                loss_mean = 0.

            # 记录每一次的损失和准确率
            writer.add_scalars("Loss", {"Train": loss.item()}, iter_count)
            writer.add_scalars("Accuracy", {"Train": correct/total}, iter_count)

        lr_scheduler.step(loss)  # 达到学习率更新的条件就更新学习率

        # 一个epoch，就验证一次
        iter_valid = 0
        if (epoch + 1) % val_interval == 0:
            alexnet.eval()
            loss_val = 0.
            total_val = 0
            correct_val = 0

            with torch.no_grad():
                for j, data in enumerate(valid_loader):
                    iter_valid += 1
                    inputs, labels = data
                    outputs = alexnet(inputs)
                    _, predictions = outputs.data.max(1)
                    predictions = torch.tensor(predictions, dtype=torch.float)
                    labels = torch.tensor(labels, dtype=torch.float)
                    loss = loss_func(predictions, labels)

                    total_val += labels.size(0)
                    correct_val += (predictions == labels).squeeze().sum().numpy()
                    loss_val += loss.item()

                # 一个验证集的epoch
                loss_val_epoch = loss_val / len(valid_loader)
                valid_curve.append(loss_val_epoch)
                print("Valid:Epoch[{:0>3}/{:0>3}] Iteration:[{:0>3}/{:0>3}] Loss:{:.4f} Acc:{:.2%}".format(
                    epoch+1, MAX_EPOCH, iter_valid, len(valid_loader), loss_val_epoch, correct_val / total_val
                ))

                writer.add_scalars("Loss", {"valid": loss_val_epoch}, iter_count)
    train_x = range(len(train_curve))
    train_y = train_curve

    train_iters = len(train_loader)
    valid_x = np.arange(1, len(
        valid_curve) + 1) * train_iters * val_interval  # 由于valid中记录的是epochloss，需要对记录点进行转换到iterations
    valid_y = valid_curve

    plt.plot(train_x, train_y, label='Train')
    plt.plot(valid_x, valid_y, label='Valid')

    plt.legend(loc='upper right')
    plt.ylabel('loss value')
    plt.xlabel('Iteration')
    plt.show()
    writer.add_scalars("Accuracy", {"Valid": correct_val/total_val}, iter_count)