'''
Funtion:
    Build the optimizer
Author:
    Zhenchao Jin
'''
import copy
# import torch.nn as nn
# import torch.optim as optim
import luojianet
import luojianet.nn as nn
import luojianet.ops as ops
from luojianet import nn, ops, Parameter, Tensor

from .paramsconstructor import DefaultParamsConstructor, LayerDecayParamsConstructor


'''BuildOptimizer'''
def BuildOptimizer(model, optimizer_cfg):
    # define the supported optimizers
    supported_optimizers = {
        'sgd': nn.SGD,
        'adam': nn.Adam,
        'adamw': nn.AdamWeightDecay,
        'adadelta': nn.Adadelta,
    }
    # parse optimizer_cfg
    optimizer_cfg = copy.deepcopy(optimizer_cfg)
    # optimizer_type = optimizer_cfg.pop('type')
    # params_rules, filter_params = {}, False
    # if 'params_rules' in optimizer_cfg:
    #     params_rules = optimizer_cfg.pop('params_rules')
    # if 'filter_params' in optimizer_cfg:
    #     filter_params = optimizer_cfg.pop('filter_params')
    # # obtain params
    # supported_constructors = {
    #     'default': DefaultParamsConstructor,
    #     'layerdecay': LayerDecayParamsConstructor,
    # }
    # constructor_type = params_rules.get('type', 'default')
    # params_constructor = supported_constructors[constructor_type](params_rules=params_rules, filter_params=filter_params, optimizer_cfg=optimizer_cfg)
    # optimizer_cfg['params'] = params_constructor(model=model)
    # return
    # return supported_optimizers[optimizer_type](**optimizer_cfg)

    #TODO:optimizer参数只用于测试SGD
    params = [{'params': model.trainable_params()}]
    optimizer = supported_optimizers[optimizer_cfg['type']](params = params,
                                                            learning_rate = optimizer_cfg['lr'],
                                                            momentum = optimizer_cfg['momentum'],
                                                            weight_decay = optimizer_cfg['weight_decay'])
    return optimizer
