# Copyright 2021-2023 @ Shenzhen Bay Laboratory &
#                       Peking University &
#                       Huawei Technologies Co., Ltd
#
# This code is a part of Cybertron package.
#
# The Cybertron is open-source software based on the AI-framework:
# PyTorch (https://pytorch.org/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
Learning rate schedule for optimizer
"""

import torch
from torch.optim.lr_scheduler import _LRScheduler
from typing import Optional
from ..utils import get_arguments, get_tensor, GLOBAL_DEVICE

__all__ = [
    "TransformerLR",
]


class TransformerLR(_LRScheduler):
    r"""A transformer type dynamic learning rate schedule.

    Args:
        optimizer (torch.optim.Optimizer): Optimizer instance
        learning_rate (float):  Reference learning rate. Default: 1.0
        warmup_steps (int):     Warm up steps. Default: 4000
        dimension (int):        Dimension of output Tensor. Default: 1
        last_epoch (int):       The index of last epoch. Default: -1
        device (torch.device):  Device to use. Default: None
    """
    def __init__(self,
                 optimizer: torch.optim.Optimizer,
                 learning_rate: float = 1.0,
                 warmup_steps: int = 4000,
                 dimension: int = 1,
                 last_epoch: int = -1,
                 device: Optional[torch.device] = None,
                 ):

        super().__init__(optimizer, last_epoch)
        
        if not isinstance(learning_rate, float):
            raise TypeError("learning_rate must be float.")
        if learning_rate < 0:
            raise ValueError("learning_rate must be non-negative.")
        if not isinstance(warmup_steps, int) or warmup_steps <= 0:
            raise ValueError("warmup_steps must be a positive integer.")

        self.learning_rate = learning_rate
        self.device = GLOBAL_DEVICE()
        
        self.warmup_steps = get_tensor(warmup_steps, dtype=torch.float32, device=self.device)
        dimension = get_tensor(dimension, dtype=torch.float32, device=self.device)
        self.dim_scale = torch.pow(dimension, -0.5)

    def get_lr(self):
        """Calculate the learning rate at current step.

        Returns:
            lr (float): Current learning rate.
        """
        step_num = torch.tensor(self.last_epoch, dtype=torch.float32, device=self.device)
        warmup_scale = torch.pow(self.warmup_steps, -1.5)
        lr1 = torch.pow(step_num, -0.5)
        lr2 = step_num * warmup_scale
        lr_percent = self.dim_scale * torch.minimum(lr1, lr2)
        return [self.learning_rate * lr_percent.item() for _ in self.base_lrs]
