# Copyright 2021-2023 @ Shenzhen Bay Laboratory &
#                       Peking University &
#                       Huawei Technologies Co., Ltd
#
# This code is a part of Cybertron package.
#
# The Cybertron is open-source software based on the AI-framework:
# PyTorch (https://pytorch.org/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
Modules for normalization
"""
from typing import Union, Optional, Dict, Any
from numpy import ndarray

import torch
from torch import nn, Tensor

from .utils import get_tensor, get_arguments, GLOBAL_DEVICE
from .units import Units, get_energy_unit


class ScaleShift(nn.Module):
    r"""A network to scale and shift the label of dataset or prediction.

    Args:
        scale (float): Scale value. Default: 1
        shift (float): Shift value. Default: 0
        type_ref (Union[torch.Tensor, ndarray]): Tensor of shape (T, E). Data type is float
            Reference values of label for each atom type. Default: None
        by_atoms (bool): Whether to do atomwise scale and shift. Default: None
        axis (int): Axis to summation the reference value of molecule. Default: -2
        device (str): Device for tensor operations. Default: None

    Symbols:
        B:  Batch size
        A:  Number of atoms
        T:  Number of total atom types
        Y:  Number of labels
    """

    def __init__(self,
                 scale: Union[float, Tensor, ndarray] = 1,
                 shift: Union[float, Tensor, ndarray] = 0,
                 type_ref: Union[Tensor, ndarray] = None,
                 shift_by_atoms: bool = False,
                 unit: str = None,
                 **kwargs):
        super().__init__()
        self._kwargs = get_arguments(locals(), kwargs)
        self.device = GLOBAL_DEVICE()

        self.output_unit = get_energy_unit(unit)
        self.units = Units(energy_unit=self.output_unit)

        self.register_buffer('_scale', get_tensor(scale, device=self.device))
        self.register_buffer('_shift', get_tensor(shift, device=self.device))

        if type_ref is None:
            self.register_buffer('_type_ref', torch.tensor(0, dtype=torch.float32, device=self.device))
        else:
            self.register_buffer('_type_ref', get_tensor(type_ref, device=self.device))

        self.shift_by_atoms = shift_by_atoms

    @property
    def scale(self) -> torch.Tensor:
        return self._scale

    @scale.setter
    def scale(self, scale_: Union[float, torch.Tensor, ndarray]):
        self._scale.copy_(get_tensor(scale_, device=self.device))

    @property
    def shift(self) -> torch.Tensor:
        return self._shift

    @shift.setter
    def shift(self, shift_: Union[float, torch.Tensor, ndarray]):
        self._shift.copy_(get_tensor(shift_, device=self.device))

    @property
    def type_ref(self) -> torch.Tensor:
        return self._type_ref

    @type_ref.setter
    def type_ref(self, type_ref_: Union[float, torch.Tensor, ndarray]):
        if type_ref_ is None:
            type_ref_ = 0
        self._type_ref.copy_(get_tensor(type_ref_, device=self.device))

    def set_scaleshift(self,
                       scale: Union[float, torch.Tensor, ndarray],
                       shift: Union[float, torch.Tensor, ndarray],
                       type_ref: Union[torch.Tensor, ndarray] = None):
        """Set scale and shift parameters"""
        self._scale.copy_(get_tensor(scale, device=self.device))
        self._shift.copy_(get_tensor(shift, device=self.device))
        if type_ref is not None:
            self._type_ref.copy_(get_tensor(type_ref, device=self.device))
        return self

    def set_unit(self, unit: str):
        """set output unit"""
        self.output_unit = get_energy_unit(unit)
        self.units.set_energy_unit(self.output_unit)
        self._kwargs['unit'] = self.output_unit
        return self

    def convert_energy_from(self, unit) -> float:
        """returns a scale factor that converts the energy from a specified unit."""
        return self.units.convert_energy_from(unit)

    def convert_energy_to(self, unit) -> float:
        """returns a scale factor that converts the energy to a specified unit."""
        return self.units.convert_energy_to(unit)

    def _print_info(self,
                    num_retraction: int = 0,
                    num_gap: int = 3,
                    char: str = '-'
                    ) -> None:
        """print the information of readout"""
        ret = char * num_retraction
        gap = char * num_gap
        print(ret+gap+f" Scale: {self.scale.cpu().numpy()}")
        print(ret+gap+f" Shift: {self.shift.cpu().numpy()}")
        if self.type_ref.ndim > 1:
            print(ret+gap+" Reference value for atom types:")
            for i, ref in enumerate(self.type_ref):
                print(ret+gap+gap+' No.{: <5}'.format(f'{i}: {ref.cpu().numpy()}'))
        else:
            print(ret+gap+f" Reference value for atom types: {self.type_ref.cpu().numpy()}")
        print(ret+gap+f" Scale the shift by the number of atoms: {self.shift_by_atoms}")
        print('-'*80)

    def scale_force(self, force: Tensor) -> Tensor:
        """Scale force"""
        return force * self._scale

    def normalize_force(self, force: Tensor) -> Tensor:
        """Normalize force"""
        return force / self._scale

    def normalize(self,
                  label: Tensor,
                  atom_type: Tensor,
                  num_atoms: Optional[Tensor] = None
                  ) -> Tensor:
        """Normalize outputs.

        Args:
            label (Tensor): Tensor with shape (B, ...). Data type is float.
            atom_type (Tensor): Tensor with shape (B, A). Data type is int.
            num_atoms (Tensor, optional): Tensor with shape (B, 1). Data type is int.

        Returns:
            outputs (Tensor): Tensor with shape (B, ...). Data type is float.
        """
        ref = 0
        if self._type_ref.ndim > 0:
            # (B, A, ...) <- (T, ...)
            ref = torch.index_select(self._type_ref, 0, atom_type)
            # (B, ...) <- (B, A, ...)
            ref = torch.sum(ref, dim=1)

        # (B, ...) - (B, ...)
        label = label - ref

        # (...)
        shift = self._shift
        if self.shift_by_atoms:
            if num_atoms is None:
                num_atoms = torch.count_nonzero(atom_type > 0, dim=-1, keepdim=True)
            if label.ndim > 2:
                # (B, ...) <- (B, 1)
                num_atoms = num_atoms.reshape((num_atoms.shape[0],) + (1,) * (label.ndim - 1))
            # (B, ...) = (...) * (B, ...)
            shift = shift * num_atoms

        return (label - shift) / self._scale

    def forward(self,
                output: Tensor,
                atom_type: Tensor,
                num_atoms: Optional[Tensor] = None
                ) -> Tensor:
        """Scale and shift output.

        Args:
            outputs (Tensor): Tensor with shape (B, ...). Data type is float.
            atom_type (Tensor): Tensor with shape (B, A). Data type is int.
            num_atoms (Tensor, optional): Tensor with shape (B, 1). Data type is int.

        Returns:
            outputs (Tensor): Tensor with shape (B, ...). Data type is float.
        """
        ref = 0
        if self._type_ref.ndim > 0:
            # (B, A, ...) <- (T, ...)
            ref = torch.index_select(self._type_ref, 0, atom_type)
            # (B, ...) <- (B, A, ...)
            ref = torch.sum(ref, dim=1)

        # (B, ...) * (...) + (B, ...)
        output = output * self._scale + ref

        # (...)
        shift = self._shift
        if self.shift_by_atoms:
            if num_atoms is None:
                num_atoms = torch.count_nonzero(atom_type > 0, dim=-1, keepdim=True)
            if output.ndim > 2:
                # (B, ...) <- (B, 1)
                num_atoms = num_atoms.reshape((num_atoms.shape[0],) + (1,) * (output.ndim - 1))
            # (B, ...) = (B, ...) + (...) * (B, ...)
            output = output + shift * num_atoms
        else:
            # (B, ...) = (B, ...) + (...)
            output = output + shift

        return output
