import math
import time
import torch
import os
from math import cos, pi
import torch.nn as nn
import torch.nn.functional as F

"""
    Van训练用工具包
    Author: Dong Sun, Chuanjie Wu
"""


def save_model(model, optimizer, criterion, epoch, save_path, lr_scheduler=None, acc=None, loss=None):
    if lr_scheduler is not None:
        checkpoint = {
            'net': model.state_dict(),
            'optimizer': optimizer.state_dict(),
            'epoch': epoch,
            'lr_scheduler': lr_scheduler.state_dict(),
            'criterion': criterion.state_dict()
        }
    else:
        checkpoint = {
            'net': model.state_dict(),
            'optimizer': optimizer.state_dict(),
            'epoch': epoch,
            'criterion': criterion.state_dict()
        }

    if not os.path.isdir(save_path):
        os.mkdir(save_path)
    if acc is not None:
        torch.save(checkpoint, save_path + f'/ckpt_{epoch}' + '_acc%.4f.pth' % acc)
    else:
        torch.save(checkpoint, save_path + f'/ckpt_{epoch}.pth')
    print(f'epoch {epoch}: checkpoint saved.')
    if acc is not None and loss is not None:
        print('model val_acc %.4f | val_loss %.6f' % (acc, loss))


class EarlyStopping:
    def __init__(self, save_path, patience=6, delta=0., mode='loss'):
        """
        :param save_path: 保存最佳结果的位置
        :param patience: 最对允许连续多少epoch不再进步
        :param delta: 界定进步多少才算进步
        :param mode: 选择根据val_loss(loss) 还是 val_acc(acc) 作为指标
        参考网页：https://blog.csdn.net/t18438605018/article/details/123646329
        """
        self.save_path = save_path
        self.patience = patience
        self.delta = delta
        self.epoch_count = 0
        self.mode = mode
        self.stop_flag = False
        self.top_score = None
        self.top_another = None

    def __call__(self, epoch, val_acc, val_loss, model, optimizer, criterion, lr_scheduler=None):
        score = (-val_loss) if self.mode == 'loss' else val_acc
        another_score = val_acc if self.mode == 'loss' else (-val_loss)

        if self.top_score is None:
            self.top_score = score
            self.top_another = another_score
        elif score < self.top_score + self.delta:
            self.epoch_count += 1
            if self.epoch_count > self.patience:
                self.stop_flag = True
                if self.mode == 'loss':
                    print('best acc: %.4f | best loss: %.6f at epoch %d' % (
                        self.top_another, -self.top_score,
                        epoch - self.patience - 1) + f'. Early stopped at epoch {epoch}')
                else:
                    print('best acc: %.4f | best loss: %.6f at epoch %d' % (
                        self.top_score, -self.top_another,
                        epoch - self.patience - 1) + f'. Early stopped at epoch {epoch}')

                save_model(model=model, optimizer=optimizer, criterion=criterion, epoch=epoch,
                           save_path=self.save_path, lr_scheduler=lr_scheduler,
                           acc=val_acc, loss=val_loss)

        else:
            self.top_score = score
            self.top_another = another_score
            self.epoch_count = 0

    def get_stop_flag(self):
        return self.stop_flag


#  计算用时
def timeSince(since):
    now = time.time()
    s = now - since
    m = math.floor(s / 60)
    s -= m * 60
    return '%dm %ds' % (m, s)


# warm up + cosine LR
def cosine_warmup_lr(optimizer, current_epoch, max_epoch, warmup_epoch, lr_min=0., lr_max=5e-4, warm_flag=True):
    """
    :param optimizer: 优化器
    :param current_epoch: 当前epoch
    :param max_epoch: 最大epoch
    :param warmup_epoch: 热身epoch数
    :param lr_min: 最小学习率
    :param lr_max: 最大学习率
    :param warm_flag: 是否启用热身
    :return: 自动调整optimizer的学习率 并返回lr
    参考网页：https://blog.csdn.net/qq_40268672/article/details/121145630
    """
    _warmup_epoch = warmup_epoch if warm_flag else 0
    if current_epoch < warmup_epoch:
        lr = 1.0 * lr_max * current_epoch / warmup_epoch
    else:
        lr = lr_min + (lr_max - lr_min) * (
                1 + cos(pi * (current_epoch - warmup_epoch) / (max_epoch - warmup_epoch))) * 0.5
    for param in optimizer.param_groups:
        param['lr'] = lr
    return lr


class LabelSmoothing(nn.Module):
    """
    参考韩雪的LabelSmoothingCrossEntropy
    参考网页1：https://blog.csdn.net/weixin_41811314/article/details/115863126
    参考网页2：https://zhuanlan.zhihu.com/p/433048073?utm_oi=629375409599549440
    """
    def __init__(self, reduction='mean', eps=0.1):
        super(LabelSmoothing, self).__init__()
        self.eps = eps
        self.reduction = reduction

    def forward(self, output, target):
        """
        :param output: shape [batch_size, class_num]
        :param target: shape [batch_size]
        """
        c = output.size()[-1]
        log_preds = F.log_softmax(output, dim=-1)
        if self.reduction == 'sum':
            loss = -log_preds.sum()
        else:
            loss = -log_preds.sum(dim=-1)
            if self.reduction == 'mean':
                loss = loss.mean()
        return loss*self.eps/c + (1-self.eps) * F.nll_loss(log_preds, target, reduction=self.reduction)


# EMA机制
class EMA:
    """
    参考网页：https://zhuanlan.zhihu.com/p/68748778
    """
    def __init__(self, model, decay=0.99):
        self.model = model
        self.decay = decay
        self.shadow = {}
        self.backup = {}

    def register(self):
        for name, param in self.model.named_parameters():
            if param.requires_grad:
                self.shadow[name] = param.data.clone()

    def update(self):
        for name, param in self.model.named_parameters():
            if param.requires_grad:
                assert name in self.shadow
                new_average = (1.0 - self.decay) * param.data + self.decay * self.shadow[name]
                self.shadow[name] = new_average.clone()

    def apply_shadow(self):
        for name, param in self.model.named_parameters():
            if param.requires_grad:
                assert name in self.shadow
                self.backup[name] = param.data
                param.data = self.shadow[name]

    def restore(self):
        for name, param in self.model.named_parameters():
            if param.requires_grad:
                assert name in self.backup
                param.data = self.backup[name]
        self.backup = {}

