import numpy as np
import math
from torch.optim.optimizer import Optimizer
from torch.optim.lr_scheduler import _LRScheduler

class ExpUp(_LRScheduler):
    def __init__(
            self, 
            optimizer: Optimizer,
            warm_steps, 
            exponent=-5.0
        ):
        self.rampup_len = warm_steps
        self.max_lrs = [x["lr"] for x in optimizer.param_groups]
        self.exponent = exponent
        super(ExpUp, self).__init__(optimizer)

    def _get_scaling_factor(self):
        if self.rampup_len == 0:
            return 1.0
        elif self._step_count < self.rampup_len:
            current = np.clip(self._step_count, 0.0, self.rampup_len)
            phase = 1.0 - current / self.rampup_len
            return float(np.exp(self.exponent * phase * phase))
        else:
            return 1.0

    def get_lr(self):
        return [lr * self._get_scaling_factor() for lr in self.max_lrs]
    
class ExpUpCosDown(_LRScheduler):
    def __init__(
            self, 
            optimizer: Optimizer,
            warm_steps, 
            total_steps,
            exponent=-5.0,
            min_lr_ratio=0.1,
            init_weight=1.0,
            min_weight=0.1
        ):
        self.rampup_len = warm_steps
        self.tot_steps = total_steps
        self.max_lrs = [x["lr"] for x in optimizer.param_groups]
        self.exponent = exponent
        self.min_lr_ratio = min_lr_ratio
        self.init_weight = init_weight
        self.min_weight = min_weight
        super(ExpUpCosDown, self).__init__(optimizer)

    def _get_lr_factor(self):
        if self.rampup_len == 0:
            return 1.0
        elif self._step_count < self.rampup_len:
            current = np.clip(self._step_count, 0.0, self.rampup_len)
            phase = 1.0 - current / self.rampup_len
            return float(np.exp(self.exponent * phase * phase))
        else:
            current = np.clip(self._step_count, self.rampup_len, self._step_count)
            phase = (current - self.rampup_len) / (self.tot_steps - self.rampup_len) * math.pi
            return (math.cos(phase) + 1) / 2 
        
        
    def _get_scaling_factor(self):
        if self.rampup_len == 0:
            return 1.0
        else:
            current = np.clip(self._step_count, 0.0, self.rampup_len)
            phase = 1.0 - current / self.rampup_len
            return float(np.exp(self.exponent * phase * phase))

    def get_lr(self):
        if self._step_count < self.rampup_len:
            lr = [lr * self._get_scaling_factor() for lr in self.max_lrs]
        else:
            lr = [(x - x * self.min_weight) * self._get_lr_factor() + x * self.min_weight for x in self.max_lrs]
        return lr