import math
import torch

import torch.optim as optim

import argparse

import matplotlib.pyplot as plt

from torchvision.models import resnet50

# https://blog.csdn.net/qq_36560894/article/details/114004799
# new_lr=lr_lambda(last_epoch) * base_lr
# 当 last_epoch=-1时, base_lr为optimizer优化器中的lr
# 每次执行 scheduler.step(),  last_epoch=last_epoch +1
def set_lr_policy(args):
    global base_lr, warmup_epochs, warmup_init_lr, steps

    base_lr = args.lr

    warmup_epochs = args.warmup_epochs

    warmup_init_lr = args.warmup_init_lr

    steps = args.steps

    def warmup(epoch):
        if epoch  < warmup_epochs:
            lamda = (base_lr*epoch/warmup_epochs + warmup_init_lr)/base_lr
        else:
            lamda = 1
        return lamda
    
    def step(epoch):
        if epoch  < warmup_epochs:
            lamda = (base_lr*epoch/warmup_epochs + warmup_init_lr)/base_lr
        else:
            i=0
            for step in steps:
                if epoch >= step and step > warmup_epochs:
                    i +=1 
            lamda = 0.1 **i
        return lamda 
    
    def cos(epoch):
        if epoch  < warmup_epochs:
            lamda = (base_lr*epoch/warmup_epochs + warmup_init_lr)/base_lr
        else:
            lamda = 0.5 * math.cos(epoch/300) + 1
        return lamda 
    
    if args.lr_policy =='warmup':
        return warmup
    elif args.lr_policy =='step':
        return step
    elif args.lr_policy =='cos':
        return cos
    
if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument('--optimizer', type=str, default='sgd', help='optimizer: sgd, adam') 
    parser.add_argument('--lr', type=float, default=0.01, help='Init learning rate')
    parser.add_argument('--warmup_epochs', type=int, default=10) 
    parser.add_argument('--warmup_init_lr', type=int, default=0.001) 
    parser.add_argument('--steps', type=list, default=[200,250,300])
    parser.add_argument('--momentum', type=float, default=0.9)
    parser.add_argument('--weight_decay', type=float, default=1e-4)
    
    parser.add_argument('--lr_policy', type=str, default='step', help='warmup, cos, step')

    args = parser.parse_args()

    model = resnet50(pretrained=True)

    optimizer = {
            'adam'  : optim.Adam(model.parameters(), args.lr, betas = (args.momentum, 0.999), weight_decay = args.weight_decay),
            'sgd'   : optim.SGD(model.parameters(), args.lr, momentum = args.momentum, nesterov=True, weight_decay = args.weight_decay)
        }[args.optimizer]

    scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda = set_lr_policy(args), last_epoch=-1)
    epochs = []
    lrs = []
    for epoch in range(500):
        optimizer.step()
        scheduler.step()
        lr = scheduler.get_lr()
        print(lr[0])
        lrs.append(lr[0])
        epochs.append(epoch)
    plt.plot(epochs, lrs, 'r')
    plt.title('learning rate')
    plt.xlabel('epoch')
    plt.ylabel('lr')
    plt.show()
    plt.savefig('./LearningRate.jpg')