import matplotlib.pyplot as plt
import math
import torch
from torchvision.models import vgg16_bn
import os
from globalData import DEVICE, lr, class_num


# 显示plt
def show_plt(numpy_data, dim_num=25, mode="jet"):
    for i in range(dim_num):  # 可视化了32通道
        ax = plt.subplot(int(math.sqrt(dim_num)) + 1, int(math.sqrt(dim_num)) + 1, i + 1)
        ax.set_title('Feature {}'.format(i))
        ax.axis('off')
        ax.set_title('')
        plt.imshow(numpy_data[i, :, :], cmap=mode)  # cmap = jet 热力图 cmap = gray 灰度图
    plt.show()


# load the model & lr ...
def load_checkpoint(folder, model, name=None, goon=False):
    print("init the model")
    pretrained_model = vgg16_bn(pretrained=True)
    # model = FCN8s(pretrained_net=pretrained_model, classes_num=class_num).to(DEVICE)
    epoch = 0
    print(f"init lr: {lr}")
    # optimizer = torch.optim.SGD(model.parameters(), lr=lr, weight_decay=1e-4)
    optimizer = torch.optim.Adam(model.parameters(), lr=lr)
    lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
        optimizer, mode='min', factor=0.1, patience=5, verbose=True,
        threshold=0.002, threshold_mode='rel'
    )
    if name is None:
        checkpoint_name = os.path.join(
            folder, 'best_checkpoint.pt')
    else:
        checkpoint_name = os.path.join(folder, name)
    if os.path.isfile(checkpoint_name) and goon:
        checkpoint = torch.load(checkpoint_name)
        model.load_state_dict(checkpoint['model_state_dict'])
        # return 0, model, optimizer, lr_scheduler, +math.inf
        epoch = checkpoint['epoch'] + 1
        optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
        lr_scheduler.load_state_dict(checkpoint['lr_scheduler_state_dict'])
        return epoch, model, optimizer, lr_scheduler
    else:
        return epoch, model, optimizer, lr_scheduler


# save the checkpoint
def save_checkpoint(epoch, model, optimizer, lr_scheduler, folder, best=False):
    if not os.path.exists(folder):
        os.makedirs(folder)
    if (epoch + 1) % 1 == 0 and epoch > 0:
        torch.save({
            'epoch': epoch,
            'model_state_dict': model.state_dict(),
            'optimizer_state_dict': optimizer.state_dict(),
            'lr_scheduler_state_dict': lr_scheduler.state_dict()
        }, os.path.join(folder, 'epoch_{}_time_checkpoint.pt'.format(epoch)))
    if best:
        torch.save({
            'epoch': epoch,
            'model_state_dict': model.state_dict(),
            'optimizer_state_dict': optimizer.state_dict(),
            'lr_scheduler_state_dict': lr_scheduler.state_dict()
        }, os.path.join(folder, 'best_checkpoint.pt'))
