import torch
import warnings
import numpy as np

from typing import Optional, List
from .valid_tools import list2dict

def make_optimizer(model, cfg):
    if cfg.optim.base_lr <= 0:
        raise ValueError("argument 'base_lr' must greater than zero")
    if cfg.optim.base_weight_decay <= 0:
        raise ValueError("argument 'base_weight_decay' must greater than zero")

    params = []
    for key, value in model.named_parameters():
        if not value.requires_grad:
            continue
        # when module lr or weight_decay = 0, follow base_lr or base_weight_decay
        if "backbone" in key:
            lr = cfg.optim.backbone_lr if cfg.optim.backbone_lr > 0 else cfg.optim.base_lr
            decay = cfg.optim.backbone_weight_decay if cfg.optim.backbone_weight_decay > 0 else cfg.optim.base_weight_decay
        elif "fuser" in key:
            lr = cfg.optim.asff_lr if cfg.optim.asff_lr > 0 else cfg.optim.base_lr
            decay = cfg.optim.asff_weight_decay if cfg.optim.asff_weight_decay > 0 else cfg.optim.base_weight_decay
        elif "watcher" in key:
            lr = cfg.optim.pam_lr if cfg.optim.pam_lr > 0 else cfg.optim.base_lr
            decay = cfg.optim.pam_weight_decay if cfg.optim.pam_weight_decay > 0 else cfg.optim.base_weight_decay
        else:
            lr = cfg.optim.base_lr
            decay = cfg.optim.base_weight_decay
        if "bias" in key:
            lr = lr * cfg.optim.bias_lr_factor
            decay = decay * cfg.optim.bias_decay_factor
        params += [{"params": [value], "lr": lr, "weight_decay": decay}]

    kwargs = list2dict(cfg.optim.optim_kwargs)
    if getattr(torch.optim, cfg.optim.name, None) is None:
        raise ValueError(f"cannot find specified optimizer {cfg.optim.name} "
                         f"in moudule 'torch.optimizer'")
    else:
        optimizer = getattr(torch.optim, cfg.optim.name)(params=params, **kwargs)
        for group in optimizer.param_groups:
            group.setdefault('initial_lr', group['lr'])
        return optimizer


class WarmupDecayLR(torch.optim.lr_scheduler._LRScheduler):
    def __init__(self, optimizer: torch.optim,
                 warmup_steps: int = 10,
                 warmup_factor: float = 0.,
                 warmup_method: str = 'linear',
                 standup_steps: int = 0,
                 decay_method: str = 'None',
                 decay_kwargs: List = [],
                 last_epoch: int = -1):

        if warmup_method not in ("constant", "linear", "None"):
            raise ValueError(f"excepted 'warmup_method' be 'linear' or 'constant', "
                             f"got {warmup_method}, "
                             f"if you do not want to execute warmup, "
                             f"pass 'None' in configuration file")
        if warmup_steps < 0:
            raise ValueError(f"excepted argument 'warmup_steps' must be positive")
        if warmup_factor < 0:
            raise ValueError(f"excepted argument 'warmup_factor' must be positive")
        if standup_steps < 0:
            raise ValueError(f"excepted argument 'standup_steps' must be positive")

        self.warmup_steps = warmup_steps
        self.warmup_factor = warmup_factor
        self.warmup_method = warmup_method

        self.standup_steps = standup_steps
        self.decay_method = decay_method
        self.learning_rate_decay = None
        super(WarmupDecayLR, self).__init__(optimizer, last_epoch-1)
        if decay_method == 'None':
            self.learning_rate_decay = None
        elif getattr(torch.optim.lr_scheduler, decay_method, None) is None:
            if getattr(torch.optim.lr_scheduler, decay_method+'LR', None) is None:
                raise ValueError(f"cannot find specified learning rate decay method {decay_method} "
                                 f"in module 'torch.optim.lr_scheduler'")
            else:
                decay_method += 'LR'
        if decay_method != 'None':
            kwargs = list2dict(decay_kwargs)
            self.learning_rate_decay = getattr(torch.optim.lr_scheduler, decay_method)(
                                                    self.optimizer, last_epoch=-1, **kwargs)

        # resume steps
        self._step_count = last_epoch
        self.optimizer._step_count = last_epoch + 1
        if self.last_epoch > self.warmup_steps + self.standup_steps and \
                self.learning_rate_decay is not None:
            for i in range(self.last_epoch - self.warmup_steps - self.standup_steps):
                self.learning_rate_decay.step()

        # set default self._last_lr
        self._get_lr_called_within_step = True
        values = self.get_lr()
        self._get_lr_called_within_step = False
        for i, data in enumerate(zip(self.optimizer.param_groups, values)):
            param_group, lr = data
            param_group['lr'] = lr
        self._last_lr = [group['lr'] for group in self.optimizer.param_groups]

    def get_lr(self):
        if not self._get_lr_called_within_step:
            warnings.warn("To get the last learning rate computed by the scheduler, "
                          "please use `get_last_lr()`.", UserWarning)

        if self.last_epoch < self.warmup_steps:
            warmup_factor = 1
            if self.warmup_method == 'constant':
                warmup_factor = self.warmup_factor
            elif self.warmup_method == 'linear':
                alpha = self.last_epoch / self.warmup_steps
                warmup_factor = self.warmup_factor * (1 - alpha) + alpha
            return [ base_lr * warmup_factor for base_lr in self.base_lrs ]

        elif self.last_epoch <= self.warmup_steps + self.standup_steps or self.learning_rate_decay is None:
            return [ base_lr for base_lr in self.base_lrs]

        else:
            return self.learning_rate_decay.get_last_lr()
    
    def get_last_lr_factor(self):
        return self.get_last_lr()[0] / self.optimizer.param_groups[0]['initial_lr']

    def step(self):

        if self._step_count == 1:
            if not hasattr(self.optimizer.step, "_with_counter"):
                warnings.warn("Seems like `optimizer.step()` has been overridden after learning rate scheduler "
                              "initialization. Please, make sure to call `optimizer.step()` before "
                              "`lr_scheduler.step()`. See more details at "
                              "https://pytorch.org/docs/1.7.1/optim.html#how-to-adjust-learning-rate", UserWarning)
            elif self.optimizer._step_count < 1:
                warnings.warn("Detected call of `lr_scheduler.step()` before `optimizer.step()`. "
                              "In PyTorch 1.1.0 and later, you should call them in the opposite order: "
                              "`optimizer.step()` before `lr_scheduler.step()`.  Failure to do this "
                              "will result in PyTorch skipping the first value of the learning rate schedule. "
                              "See more details at "
                              "https://pytorch.org/docs/1.7.1/optim.html#how-to-adjust-learning-rate", UserWarning)
        self._step_count += 1

        self._get_lr_called_within_step = True
        self.last_epoch += 1
        if self.last_epoch > self.warmup_steps + self.standup_steps and\
                self.learning_rate_decay is not None:
            self.learning_rate_decay.step()
        values = self.get_lr()
        self._get_lr_called_within_step = False

        for i, data in enumerate(zip(self.optimizer.param_groups, values)):
            param_group, lr = data
            param_group['lr'] = lr

        self._last_lr = [group['lr'] for group in self.optimizer.param_groups]


def make_scheduler(**kwargs):
    if 'optimizer' not in kwargs:
        raise ValueError(f"missing essential argument: 'optimizer'")
    WDLR = WarmupDecayLR(**kwargs)
    return WDLR


def make_grid(grid_config_dict):
    grid_config_dict = complete_grid_config_dict(grid_config_dict)
    if 'range' in grid_config_dict:
        return grid_config_dict['range']

    grid = []
    if grid_config_dict['method'] == 'constant':
        grid = np.arange(grid_config_dict['start'], grid_config_dict['stop'], grid_config_dict['step'],
                         dtype=float).tolist()
    elif grid_config_dict['method'] == 'exp' or 'exponential':
        grid = np.arange(np.log(grid_config_dict['start']) / np.log(grid_config_dict['base']),
                         np.log(grid_config_dict['stop']) / np.log(grid_config_dict['base']),
                         grid_config_dict['step'], dtype=float).tolist()
        grid = [ grid_config_dict['base']**x for x in grid ]
    else:
        raise ValueError(f"invalid step method '{grid_config_dict['method']}'")

    return grid

def complete_grid_config_dict(grid_config_dict: dict):
    if 'range' not in grid_config_dict:
        required_arguments = {'start', 'stop'}
        default_grid_configs = {'step': 1., 'method': 'constant'}
        for key in required_arguments:
            if key not in grid_config_dict:
                raise ValueError(f"missing essential key '{key}' in grid config")
        for key, value in default_grid_configs.items():
            if key not in grid_config_dict:
                grid_config_dict[key] = value
        if grid_config_dict['method'] == 'exp':
            grid_config_dict['method'] = 'exponential'
        if grid_config_dict['method'] == 'exponential':
            if 'base' not in grid_config_dict:
                grid_config_dict['base'] = 10.
        for key, value in grid_config_dict:
            try:
                grid_config_dict[key] = float(value)
            except ValueError:
                pass
    else:
        if not isinstance(grid_config_dict['range'], List):
            raise ValueError(f"excepted input 'range' as a list, "
                             f"got {type(grid_config_dict['range'])}")
        r_list = []
        for value in grid_config_dict['range']:
            try:
                r_list.append(float(value))
            except ValueError:
                r_list.append(value)
        new_grid_config_dict = {}
        new_grid_config_dict['range'] = r_list
        grid_config_dict = new_grid_config_dict
    return grid_config_dict.copy()



# DEBUG only
if __name__ == '__main__':
    from ..blocks import ConvolutionBatchnormActivationModule
    model = ConvolutionBatchnormActivationModule(32, 4, 2, 1)
    params = []
    for key, value in model.named_parameters():
        if 'weight' in key:
            lr = 0.01
        else: 
            lr = 0.1
        params += [{'params': [value], 'lr': lr}]
    optim = torch.optim.Adadelta(params)
    #print(optim.param_groups)
    testLR1 = WarmupDecayLR(optim, 10, 0., 'linear', 30, 'CosineAnnealing', ['T_max', 100])
    for i in range(90):
        print(i, testLR1.get_lr())
        testLR1.step()
    testLR2 = WarmupDecayLR(optim, 5, 0.5, 'linear', 10, 'None')
    for i in range(90):
        print(i, testLR2.get_lr())
        testLR2.step()