import autograd


class Optimizer(object):
    r""" base class for all optimizer

    usage: 
    optimizer = SGD(model.parameters(),lr=learning_rate)

    optimizer = SGD([
                {'params': model.base.parameters()}, 
                {'params': model.fc.parameters(), 'lr': 1e-3} # 对 fc的参数设置不同的学习率
            ], lr=1e-2, momentum=0.9)
    """

    def __init__(self, params, defaults):
        if isinstance(params, autograd.Tensor):
            raise TypeError("params argument given to the optimizer should be "
                            "an iterable of Tensors or dicts, but got " +
                            type(params))

        self.param_groups = []
        param_groups = list(params)
        if len(param_groups) == 0:
            raise ValueError("optimizer got an empty parameter list")
        if not isinstance(param_groups[0], dict):
            param_groups = [{'params': param_groups}]

        for param_group in param_groups:
            self.add_param_group(param_group)

        self.default = defaults

    def zero_grad(self):
        for group in self.param_groups:
            for p in group['params']:
                p.zero_grad()

    def step(self):
        raise NotImplementedError

    def add_param_group(self, param_group: dict):
        assert isinstance(param_group, dict), "param group must be a dict"
        params = param_group['params']

        # 转换为列表
        if isinstance(params, autograd.Tensor):
            param_group['params'] = [params]
        else:
            param_group['params'] = list(params)

        for name, default in self.defaults.items():
            param_group.setdefault(name, default)

        self.param_groups.append(param_group)

