# encoding: utf-8
"""
@author:  liaoxingyu
@contact: sherlockliao01@gmail.com
"""
from bisect import bisect_right
import torch
from logzero import logger
import cv2
import numpy as np
import time
import asranger as ranger

# FIXME ideally this would be achieved with a CombinedLRScheduler,
# separating MultiStepLR with WarmupLR
# but the current LRScheduler design doesn't allow it

class WarmupDecayLR(torch.optim.lr_scheduler._LRScheduler):
    def __init__(self, optimizer: torch.optim,
                 warmup_steps: int = 10,
                 warmup_factor: float = 0.,
                 warmup_method: str = 'linear',
                 standup_steps: int = 0,
                 decay_method: str = 'None',
                #  decay_kwargs: List = [],
                 last_epoch: int = -1):

        if warmup_method not in ("constant", "linear", "None"):
            raise ValueError(f"excepted 'warmup_method' be 'linear' or 'constant', "
                             f"got {warmup_method}, "
                             f"if you do not want to execute warmup, "
                             f"pass 'None' in configuration file")
        if warmup_steps < 0:
            raise ValueError(f"excepted argument 'warmup_steps' must be positive")
        if warmup_factor < 0:
            raise ValueError(f"excepted argument 'warmup_factor' must be positive")
        if standup_steps < 0:
            raise ValueError(f"excepted argument 'standup_steps' must be positive")

        self.warmup_steps = warmup_steps
        self.warmup_factor = warmup_factor
        self.warmup_method = warmup_method

        self.standup_steps = standup_steps
        self.decay_method = decay_method
        self.learning_rate_decay = None
        super(WarmupDecayLR, self).__init__(optimizer, last_epoch-1)
        if decay_method == 'None':
            self.learning_rate_decay = None
        elif getattr(torch.optim.lr_scheduler, decay_method, None) is None:
            if getattr(torch.optim.lr_scheduler, decay_method+'LR', None) is None:
                raise ValueError(f"cannot find specified learning rate decay method {decay_method} "
                                 f"in module 'torch.optim.lr_scheduler'")
            else:
                decay_method += 'LR'
        # if decay_method != 'None':
            # kwargs = list2dict(decay_kwargs)
            # self.learning_rate_decay = getattr(torch.optim.lr_scheduler, decay_method)(
            #                                         self.optimizer, last_epoch=-1, **kwargs)

        # resume steps
        self._step_count = last_epoch
        self.optimizer._step_count = last_epoch + 1
        if self.last_epoch > self.warmup_steps + self.standup_steps and \
                self.learning_rate_decay is not None:
            for i in range(self.last_epoch - self.warmup_steps - self.standup_steps):
                self.learning_rate_decay.step()

        # set default self._last_lr
        self._get_lr_called_within_step = True
        values = self.get_lr()
        self._get_lr_called_within_step = False
        for i, data in enumerate(zip(self.optimizer.param_groups, values)):
            param_group, lr = data
            param_group['lr'] = lr
        self._last_lr = [group['lr'] for group in self.optimizer.param_groups]

    def get_lr(self):
        # if not self._get_lr_called_within_step:
        #     warnings.warn("To get the last learning rate computed by the scheduler, "
        #                   "please use `get_last_lr()`.", UserWarning)

        if self.last_epoch < self.warmup_steps:
            warmup_factor = 1
            if self.warmup_method == 'constant':
                warmup_factor = self.warmup_factor
            elif self.warmup_method == 'linear':
                alpha = self.last_epoch / self.warmup_steps
                warmup_factor = self.warmup_factor * (1 - alpha) + alpha
            return [ base_lr * warmup_factor for base_lr in self.base_lrs ]

        elif self.last_epoch <= self.warmup_steps + self.standup_steps or self.learning_rate_decay is None:
            return [ base_lr for base_lr in self.base_lrs]

        else:
            return self.learning_rate_decay.get_last_lr()
    
    def get_last_lr_factor(self):
        return self.get_last_lr()[0] / self.optimizer.param_groups[0]['initial_lr']

    def step(self):

        if self._step_count == 1:
            if not hasattr(self.optimizer.step, "_with_counter"):
                warnings.warn("Seems like `optimizer.step()` has been overridden after learning rate scheduler "
                              "initialization. Please, make sure to call `optimizer.step()` before "
                              "`lr_scheduler.step()`. See more details at "
                              "https://pytorch.org/docs/1.7.1/optim.html#how-to-adjust-learning-rate", UserWarning)
            elif self.optimizer._step_count < 1:
                warnings.warn("Detected call of `lr_scheduler.step()` before `optimizer.step()`. "
                              "In PyTorch 1.1.0 and later, you should call them in the opposite order: "
                              "`optimizer.step()` before `lr_scheduler.step()`.  Failure to do this "
                              "will result in PyTorch skipping the first value of the learning rate schedule. "
                              "See more details at "
                              "https://pytorch.org/docs/1.7.1/optim.html#how-to-adjust-learning-rate", UserWarning)
        self._step_count += 1

        self._get_lr_called_within_step = True
        self.last_epoch += 1
        if self.last_epoch > self.warmup_steps + self.standup_steps and\
                self.learning_rate_decay is not None:
            self.learning_rate_decay.step()
        values = self.get_lr()
        self._get_lr_called_within_step = False

        for i, data in enumerate(zip(self.optimizer.param_groups, values)):
            param_group, lr = data
            param_group['lr'] = lr

        self._last_lr = [group['lr'] for group in self.optimizer.param_groups]

class WarmupMultiStepLR(torch.optim.lr_scheduler._LRScheduler):
    def __init__(
            self,
            optimizer,
            milestones,
            gamma=0.1,
            warmup_factor=1.0 / 3,
            warmup_iters=500,
            warmup_method="linear",
            last_epoch=-1,
    ):
        if not list(milestones) == sorted(milestones):
            raise ValueError(
                "Milestones should be a list of" " increasing integers. Got {}",
                milestones,
            )

        if warmup_method not in ("constant", "linear"):
            raise ValueError(
                "Only 'constant' or 'linear' warmup_method accepted"
                "got {}".format(warmup_method)
            )
        self.milestones = milestones
        self.gamma = gamma
        self.warmup_factor = warmup_factor
        self.warmup_iters = warmup_iters
        self.warmup_method = warmup_method
        super(WarmupMultiStepLR, self).__init__(optimizer, last_epoch)

    def get_lr(self):
        warmup_factor = 1
        if self.last_epoch < self.warmup_iters:
            if self.warmup_method == "constant":
                warmup_factor = self.warmup_factor
            elif self.warmup_method == "linear":
                alpha = self.last_epoch / self.warmup_iters
                warmup_factor = self.warmup_factor * (1 - alpha) + alpha
                # warmup_factor = self.warmup_factor * self.last_epoch
        return [
            base_lr
            * warmup_factor
            * self.gamma ** bisect_right(self.milestones, self.last_epoch)
            for base_lr in self.base_lrs
        ]


def make_optimizer(optim_name, model, base_lr, weight_decay, bias_lr_factor, momentum):
    """
    调低所有bias项的学习率。

    :param optim_name:
    :param model:
    :param base_lr:
    :param weight_decay:
    :param bias_lr_factor:
    :param momentum:
    :return:
    """
    params = []
    for key, value in model.named_parameters():
        if not value.requires_grad:
            continue
        lr = base_lr
        if "bias" in key:
            lr = base_lr * bias_lr_factor
        params += [{"params": [value], "lr": lr, "weight_decay": weight_decay}]
    if optim_name == 'SGD':
        optimizer = getattr(torch.optim, optim_name)(params, momentum=momentum)
    elif 'Ranger' in optim_name:
        optimizer = getattr(ranger, optim_name)(params)
    else:
        optimizer = getattr(torch.optim, optim_name)(params)
    return optimizer


def make_warmup_scheduler(optimizer, milestones, gamma=0.1, warmup_factor=1.0 / 3, warmup_iters=500,
                          warmup_method="linear",
                          last_epoch=-1):
    if last_epoch == 0:
        last_epoch = -1  # init时会自动变成0.否则会初始化错误
    scheduler = WarmupMultiStepLR(optimizer, milestones, gamma, warmup_factor, warmup_iters, warmup_method,
                                  last_epoch=last_epoch)
    return scheduler


def featuremap_perspective_transform(featuremap: torch.Tensor, bpts: torch.Tensor, btarget_pts: torch.Tensor,
                                     output_size):
    """对一个batch的featuremap做投影变换

    Arguments:
        featuremap {torch.Tensor} -- [B, C, H, W]
        pts {torch.Tensor} -- [B, 4, 2] xy格式
        target_pts {torch.Tensor} -- [B, 4, 2] xy格式
        output_shape {torch.Tensor} -- [2] w, h
    """
    device = featuremap.device
    B, C, H, W = featuremap.shape
    w, h = output_size

    # 求解投影矩阵
    bpts_np = bpts.cpu().float().numpy()
    btarget_pts_np = btarget_pts.cpu().float().numpy()

    trans_mats = []

    for pts_np, target_pts_np in zip(bpts_np, btarget_pts_np):
        trans_mat = cv2.getPerspectiveTransform(pts_np, target_pts_np)
        if np.linalg.matrix_rank(trans_mat) < 3:
            trans_mat = np.identity(3, dtype=np.float)
        trans_mats.append(torch.from_numpy(trans_mat))
    inv_trans_mats = torch.stack(trans_mats).float().inverse().to(device)

    # 坐标反变换
    x, y = torch.meshgrid(torch.arange(h), torch.arange(w))
    z = torch.ones_like(x)
    cors = torch.stack([x, y, z]).view(1, 3, -1).to(device).float()
    cors = cors.repeat(B, 1, 1)

    reversed_cors = torch.bmm(inv_trans_mats, cors)
    reversed_cors = reversed_cors[:, :2, :] / \
                    reversed_cors[:, 2, :].view(B, 1, -1)  # [B, 2, wh]
    reversed_cors = reversed_cors.view(-1, 2, h, w).permute(0, 2, 3, 1)
    norm_cors = ((reversed_cors / reversed_cors.new_tensor([W, H])) - 0.5) * 2

    # 插值结果
    output = torch.nn.functional.grid_sample(featuremap, norm_cors, padding_mode='border')
    assert not torch.any(torch.isnan(output)), "Found NaN"
    tmp = output + 1
    return output


