# @Author：cnzdy
# @Email：cn_zdy@126.com
# @Time: 2021/10/6 8:42
# @File: analysis.py
import torch
import os
from utils_analysis.log import get_logger
from torch.tensor import Tensor
import numpy as np
import matplotlib.pyplot as plt

from pytorch.classifier.options import Options

device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')


def plot_training(save_path):
    def wrapper(func):
        def deco(*args, **kwargs):
            print('start training...')
            train_loss, valid_loss, train_acc, valid_acc = func(*args, **kwargs)

            print("plot loss and acc curve...")
            plot_loss_acc(train_loss, valid_loss, train_acc, valid_acc, save_path)
        return deco
    return wrapper


def plot_loss_acc(train_loss, valid_loss, train_acc, valid_acc, save_path='.'):
    epochs = range(len(train_loss))

    plt.figure(1)
    plt.title('training_loss and valid_loss')
    plt.plot(epochs, train_loss, color='blue', marker='o', label='training_loss')
    plt.plot(epochs, valid_loss, color='green', marker='x', label='valid_loss')
    plt.legend()
    plt.xlabel('the epochs number')
    plt.ylabel('rate')
    if save_path.strip() != '':
        plt.savefig(os.path.join(save_path, 'loss.png'), dpi=200)
    plt.show()

    plt.figure(2)
    plt.title('training_acc and valid_acc')
    plt.plot(epochs, train_acc, color='blue', marker='o', label='training_acc')
    plt.plot(epochs, valid_acc, color='green', marker='x', label='valid_acc')
    plt.legend()
    plt.xlabel('the epochs number')
    plt.ylabel('rate')
    if save_path.strip() != '':
        plt.savefig(os.path.join(save_path, 'acc.png'), dpi=200)
    plt.show()


def tensor_show(input: Tensor, title=None):
    """
    显示单张图片
    :param input:
    :param title:
    :return:
    """
    # print(inp.size())
    input = input.numpy().transpose((1, 2, 0))
    mean = np.array([0.485, 0.456, 0.406])
    std = np.array([0.229, 0.224, 0.225])
    input = std * input + mean
    input = np.clip(input, 0, 1)
    plt.imshow(input)
    if title is not None:
        plt.title(title)
    plt.pause(0.001)  # 为了让图像更新可以暂停一会


def log(func):
    def wrapper(*args, **kwargs):
        train_value = func(*args, **kwargs)
        # print(f"train value: {train_value}")
        # print("log gradient ...")
        log_grad(train_value['epoch'], train_value['lr'], train_value['loss'],
                 train_value['acc'], train_value['optimizer'])
        return train_value

    return wrapper


def log_grad(e, lr, loss, acc, optimizer):
    """网络回传梯度输出到日志文件"""
    if e % 4 != 0:
        return

    model = Options.model
    log_file = os.path.join(Options.analysis_path, "log.txt")
    logger = get_logger(log_file)

    logger.info('epoch:[{}]\t learn rate: {:.3f}\t loss={:.5f}\t acc={:.3f}\n'.format(e, lr, loss, acc))
    logger.info(f"features[0] weight: {model.features[0].weight}, "
                f"grad: {model.features[0].weight.grad}\n")

    # parms.requires_grad: 表示该参数是否可学习，是不是frozen的；
    # parm.grad: 打印该参数的梯度值。
    for name, parms in model.named_parameters():
        logger.info(f"-->name: {name} -->grad_requirs: "
                    f"{parms.requires_grad} -->grad_value: {parms.grad}\n")

    gd = [x.grad for x in optimizer.param_groups[0]['params']]
    logger.info("optimizer grad")
    logger.info(gd)


if __name__ == '__main__':
    tensor_show()
    plot_loss_acc()
