import os
from collections import OrderedDict
import torch
import torch.optim as optim
from torch.optim.lr_scheduler import StepLR

#----------------------------------------#
#   获取学习率
#----------------------------------------#
def get_optim_lr(optimizer):
    for param_group in optimizer.param_groups:
        return param_group['lr']


def set_optim_lr(optimizer, lr):
    for param_group in optimizer.param_groups:
        param_group['lr'] = lr


#----------------------------------------#
#   获取scheduler
#----------------------------------------#
def get_scheduler(optimizer, step_size, gamma):
    return StepLR(optimizer, step_size, gamma, last_epoch=-1)


#----------------------------------------#
#   获取优化器
#----------------------------------------#
def get_optim(name):
    """ 选择优化器 """
    name = name.lower()
    if name == 'sgd':
        return optim.SGD
    elif name == 'adamw':
        return optim.AdamW
    elif name == 'adam':
        return optim.Adam
    elif name == 'asgd':
        return optim.ASGD
    elif name == 'rmsprop':
        return optim.RMSprop
    else:
        raise NotImplementedError
    

#----------------------------------------#
#   模型保存与加载
#----------------------------------------#
def save_whole_model(path, model):
    """ 保存整个模型 """
    torch.save(model, path)


def load_whole_model(path):
    """ 加载整个模型 """
    model = torch.load(path)
    return model


def save_state_dict(path, 
                    model,  
                    cfgs=None,
                    max_acc=None,
                    min_loss=None,
                    epoch=None, 
                    optim=None,
                    scheduler=None,
                    is_distributed=False):
        """ 保存 model state dict """
        if not is_distributed:
            state_dict = model.state_dict()
        else:
            state_dict = model.module.state_dict()
        state = {
            "module": state_dict,
            "max_acc": max_acc,
            "min_loss": min_loss,
            "epoch": epoch,
            "optim": optim.state_dict(),
            "scheduler": scheduler.state_dict() if scheduler is not None else None,
            "args": cfgs
        }
        torch.save(state, path)


def load_state_dict(path, model, 
                    optim=None,
                    scheduler=None):
    """ 读取 model state dict """
    state = torch.load(path, map_location="cpu")
    # if not is_distributed:
    model.load_state_dict(state['module'])
    # else:
    #     new_state_dict = OrderedDict()
    #     for k, v in state.items():
    #         name = k[7:] # module字段在最前面，从第7个字符开始就可以去掉module
    #         new_state_dict[name] = v # 新字典的key值对应的value一一对应
    #     model.state_dict = new_state_dict
    min_loss = state['min_loss']
    epoch = state['epoch']
    cfgs = state['args']
    if optim is not None:
        optim.load_state_dict(state['optim'])
    if scheduler is not None:
       scheduler.load_state_dict(state['scheduler'])
    return model, cfgs, epoch, min_loss, optim, scheduler


#----------------------------------------#
#   新建保存路径
#----------------------------------------#
def build_save_dir(save_dir='weights'):
    """ 新建文件保存模型权重 """
    if not os.path.exists(save_dir):
        os.makedirs(save_dir)
    files = os.listdir(save_dir)
    num = [int(f.split('_')[-1]) for f in files if f.find('run') >= 0]
    save_dir = os.path.join(save_dir, 'run_{}'.format(len(num) + 1))
    return save_dir