import torch.optim as optim
import matplotlib.pyplot as plt
import importlib
import numpy as np
import math

class LambdaLR(object):
    '''
    将每个参数组的学习率设置为给定函数的初始lr倍
    '''
    def __init__(self,optimizer):
        self.lambda2 = lambda epoch: 0.95 ** epoch  # 在给定整数参数纪元或此类函数列表的情况下计算乘法因子的函数，对于optimizer.param_groups中的每个组一个。
        self.optimizer = optimizer
    def __call__(self,):
        scheduler = optim.lr_scheduler.LambdaLR(self.optimizer, lr_lambda=[self.lambda2])
        return scheduler

class MultiplicativeLR(object):
    '''
    将每个参数组的学习率乘以指定函数中给定的因子。
    '''
    def __init__(self,optimizer):
        self.lmbda = lambda epoch: 0.95     # –在给定整数参数纪元或此类函数列表的情况下计算乘法因子的函数，对于optimizer.param_groups中的每个组一个。
        self.optimizer = optimizer
    def __call__(self,):
        scheduler = optim.lr_scheduler.MultiplicativeLR(self.optimizer, lr_lambda=self.lmbda)
        return scheduler

class StepLR(object):
    '''
    在每个step_size时期，通过gamma降低每个参数组的学习率。注意，这种衰减可能与此调度程序外部的学习速率的其他更改同时发生。
    '''
    def __init__(self,optimizer):
        self.step_size = 30     # 学习率衰减的周期  每30衰减一次
        self.gamma = 0.1        # 学习率衰减的乘数。默认值：0.1
        self.optimizer = optimizer
    def __call__(self,):
        '''
        # Assuming optimizer uses lr = 0.05 for all groups
        # lr = 0.05     if epoch < 30
        # lr = 0.005    if 30 <= epoch < 60
        # lr = 0.0005   if 60 <= epoch < 90
        '''
        scheduler = optim.lr_scheduler.StepLR(self.optimizer, step_size=self.step_size, gamma=self.gamma)
        return scheduler

class MultiStepLR(object):
    '''
    一旦纪元数达到里程碑之一，就以伽马衰减每个参数组的学习率。注意，这种衰减可能与此调度程序外部的学习速率的其他更改同时发生。
    '''
    def __init__(self,optimizer):
        self.milestones = [30,80]       # 时代索引列表。必须增加   进入范围缩小一次，离开范围再缩小一次
        self.gamma = 0.1                # 学习率衰减的乘数。默认值：0.1
        self.optimizer = optimizer
    def __call__(self,):
        '''
            # Assuming optimizer uses lr = 0.05 for all groups
            # lr = 0.05     if epoch < 30
            # lr = 0.005    if 30 <= epoch < 80
            # lr = 0.0005   if epoch >= 80
        '''
        scheduler = optim.lr_scheduler.MultiStepLR(self.optimizer, milestones=[30,80], gamma=0.1)
        return scheduler

class ExponentialLR(object):
    '''在每个时期以伽马衰减每个参数组的学习率'''
    def __init__(self, optimizer):
        self.gamma = 0.1    # 学习率衰减的乘数
        self.optimizer = optimizer

    def __call__(self, ):
        scheduler = optim.lr_scheduler.ExponentialLR(self.optimizer, gamma=0.1)
        return scheduler

class CosineAnnealingLR(object):
    '''使用余弦退火时间表设置每个参数组的学习率'''
    def __init__(self, optimizer,T_max=100,eta_min=1e-5):
        self.T_max = T_max    # 最大迭代次数
        self.eta_min = eta_min    # 最低学习率
        self.optimizer = optimizer

    def __call__(self, ):
        scheduler = optim.lr_scheduler.CosineAnnealingLR(self.optimizer, T_max= self.T_max, eta_min=self.eta_min)
        return scheduler

class ReduceLROnPlateau(object):
    '''
    当指标停止改善时，降低学习率。一旦学习停滞，模型通常会受益于将学习率降低2-10倍。该调度程序读取度量标准数量，如果对于“耐心”的时期没有看到改善，则学习率会降低。
    Examples:
        optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9)
        scheduler = ReduceLROnPlateau(optimizer, 'min')
        for epoch in range(10):
             train(...)
             val_loss = validate(...)
             # Note that step should be called after validate()
             scheduler.step(val_loss)

    这个要特别一些，要有val_loss才好进行更新
    '''
    def __init__(self, optimizer):
        self.optimizer = optimizer
        self.mode = 'min'    # 'min'/'max' 在最小模式下，当监视的数量停止减少时，lr将减小；在最大模式下，当监视的数量停止增加时，它将减少
        self.factor = 0.1   # 学习率降低的因数。new_lr = lr *因子。默认值：0.1
        self.patience = 10      # 没有改善的时期数，此后学习率将降低。例如，如果 耐心= 2，那么我们将忽略前两个时期而没有任何改善，并且如果损失仍然没有改善，则只会在第三个时期之后降低LR。默认值：10
        self.threshold = 1e-4   # –用于测量新的最佳阈值，仅关注重大变化。默认值：1e-4。
        self.threshold_mode = 'rel'     #  rel，abs之一。在rel模式下，“ max”模式下的dynamic_threshold = best *（1 + threshold），在min模式下，dynamic_threshold = best *（1-threshold）。在绝对模式下，dynamic_threshold =最佳+ 最大模式下的阈值或最佳-最小模式下的阈值。默认值：“ rel”。
        self.cooldown = 0       # 减少lr后恢复正常运行之前要等待的时期数。默认值：0
        self.min_lr = 0     # 标量或标量列表。所有参数组或每个组的学习率的下限。默认值：0
        self.eps = 1e-8     # 最小衰减应用于lr。如果新旧lr之间的差异小于eps，则忽略更新。默认值：1e-8。

    def __call__(self, ):
        scheduler = optim.lr_scheduler.ReduceLROnPlateau(self.optimizer, mode= self.mode)
        return scheduler

class CosineDecayLR(object):

    '''
    a cosine decay scheduler about steps, not epochs.
    '''
    def __init__(self,optimizer,T_max = 100 ,lr_min = 1e-4, lr_max = 1e-3, warmup =  20  ):
        self.optimizer = optimizer
        self.__T_max = T_max     # max steps, and steps=epochs * batches
        self.__lr_min = lr_min    # 学习率的最小值
        self.__lr_max = lr_max    # lr_max is init lr.
        self.__warmup = warmup    # in the training begin, the lr is smoothly increase from 0 to lr_init, which means "warmup",this means warmup steps, if 0 that means don't use lr warmup.


    def step(self, t):
        if self.__warmup and t < self.__warmup:
            lr = self.__lr_max / self.__warmup * t
        else:
            T_max = self.__T_max - self.__warmup
            t = t - self.__warmup
            lr = self.__lr_min + 0.5 * (self.__lr_max - self.__lr_min) * (1 + np.cos(t/T_max * np.pi))
        for param_group in self.optimizer.param_groups:
            param_group["lr"] = lr

class Burnin(object):
    def __init__(self,optimizer):
        self.burn_in = 20
        self.steps = [40, 80]

    def burnin_schedule(self,i):
        if i < self.burn_in:
            factor = pow(i / self.burn_in, 4)
        elif i < self.steps[0]:
            factor = 1.0
        elif i < self.steps[1]:
            factor = 0.1
        else:
            factor = 0.01
        return factor

    def __call__(self,):
        scheduler = optim.lr_scheduler.LambdaLR(optimizer, self.burnin_schedule)
        return scheduler

class yolov5s(object):
    def __init__(self,optimizer,lrf = 0.2):
        self.optimizer = optimizer
        self.lrf = lrf
        self.lf = lambda x: 0.9 * (1 - math.exp(-(x) / 20))  # cosine


    def __call__(self,):
        scheduler = optim.lr_scheduler.LambdaLR(self.optimizer, self.lf)
        return scheduler

if __name__ == '__main__':
    from model.yolo4 import YoloBody
    net = YoloBody()
    optimizer = optim.Adam(net.parameters(), 1e-3, weight_decay=5e-4)

    torch_names = ['LambdaLR','MultiplicativeLR','StepLR','MultiStepLR','ExponentialLR','CosineAnnealingLR','ReduceLROnPlateau']
    myself_names =['CosineDecayLR','burnin']

    # step = len(train_loader)
    # scheduler = LambdaLR(optimizer)()
    # scheduler = MultiplicativeLR(optimizer)()
    # scheduler = StepLR(optimizer)()
    # scheduler = MultiStepLR(optimizer)()
    # scheduler = ExponentialLR(optimizer)()
    # scheduler = CosineAnnealingLR(optimizer)()
    # scheduler = ReduceLROnPlateau(optimizer)()
    # scheduler = CosineDecayLR(optimizer)      # step
    # scheduler = burnin(optimizer)()             # step
    scheduler = yolov5s(optimizer)()

    y = []
    for epoch in range(300):
        optimizer.step()
        scheduler.step(epoch)
        y.append(optimizer.param_groups[0]['lr'])


    plt.figure()
    plt.plot(y, label='LambdaLR')
    plt.xlabel('steps')
    plt.ylabel('LR')
    # plt.title(f'{name}')
    plt.tight_layout()
    plt.show()
    # plt.savefig(f"lr_photo/{name}.jpg", dpi=300)