# Copyright 2021-2023 @ Shenzhen Bay Laboratory &
#                       Peking University &
#                       Huawei Technologies Co., Ltd
#
# This code is a part of Cybertron package.
#
# The Cybertron is open-source software based on the AI-framework:
# PyTorch (https://pytorch.org/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
Loss functions
"""

from typing import Union, Optional
from numpy import ndarray

import torch
from torch import nn, Tensor 
from torch.nn.modules.loss import _Loss

from ..utils import get_tensor, get_arguments, GLOBAL_DEVICE


__all__ = [
    'MolecularLoss',
    'MAELoss',
    'MSELoss',
    'CrossEntropyLoss',
]


class MolecularLoss(_Loss):
    r"""Loss function of the energy and force of molecule.

    Args:
        force_dis (float): A average norm value of force, which used to scale the force. Default: 1
        atomwise (bool): Whether to average over each atom when calculating the loss function.
            Default: None
        reduction (str): Type of reduction to be applied to loss. The optional values are "mean", "sum", and "none".
            Default: "mean".
        device (torch.device): Device to use. Default: None
    """
    def __init__(self,
                 atomwise: bool = None,
                 force_dis: Union[float, Tensor, ndarray] = 1,
                 reduction: str = 'mean',
                 **kwargs
                 ):
        super().__init__()
        self._kwargs = kwargs

        self._atomwise = atomwise
        self.reduction = reduction
        self.device = GLOBAL_DEVICE()
        self._force_dis = get_tensor(force_dis, dtype=torch.float32, device=self.device)

    def set_atomwise(self, atomwise: bool = True):
        """set whether to use atomwise """
        if self._atomwise is None:
            self._atomwise = atomwise
        return self

    def get_loss(self,
                 loss: Tensor,
                 weights: Optional[Tensor] = None
                 ) -> Tensor:
        """Apply reduction to loss."""
        if weights is not None:
            loss = loss * weights
        if self.reduction == 'mean':
            return torch.mean(loss)
        if self.reduction == 'sum':
            return torch.sum(loss)
        return loss

    def forward(self,
                predict: Tensor,
                label: Tensor,
                num_atoms: Tensor = 1,
                atom_mask: Optional[Tensor] = None,
                **kwargs,
                ):
        """calculate loss function

        Args:
            pred_energy (Tensor):   Tensor with shape (B, E). Data type is float.
                                    Predicted energy.
            label_energy (Tensor):  Tensor with shape (B, E). Data type is float.
                                    Label energy.
            pred_forces (Tensor):   Tensor with shape (B, A, D). Data type is float.
                                    Predicted force.
            label_forces (Tensor):  Tensor with shape (B, A, D). Data type is float.
                                    Label energy.
            num_atoms (Tensor):     Tensor with shape (B, 1). Data type is int.
                                    Number of atoms in each molecule.
                                    Default: 1
            atom_mask (Tensor):     Tensor with shape (B, A). Data type is bool.
                                    Mask of atoms in each molecule.
                                    Default: None

        Symbols:
            B:  Batch size
            A:  Number of atoms
            D:  Dimension of position coordinate. Usually is 3.
            E:  Number of labels

        Returns:
            loss (Tensor):  Tensor with shape (B, 1). Data type is float.
                            Loss function.
        """
        num_atoms = num_atoms.to(torch.float32)
        if (not self._atomwise) or predict.ndim > 3 or predict.ndim < 2:
            loss = self._calc_loss(predict - label)
            return self.get_loss(loss)

        if predict.ndim == 3:
            # (B, A, X)
            diff = (predict - label) * self._force_dis
            diff = self._calc_loss(diff)
            # (B, A)
            diff = torch.sum(diff, -1)

            if atom_mask is None:
                # (B, 1) <- (B, A)
                loss = torch.mean(diff, -1, keepdim=True)
            else:
                # (B, A) * (B, A)
                diff = diff * atom_mask
                # (B, 1) <- (B, A)
                loss = torch.sum(diff, -1, keepdim=True)
                # (B, 1) / (B, 1)
                loss = loss / num_atoms
        else:
            # (B, Y)
            diff = (predict - label) / num_atoms
            loss = self._calc_loss(diff)

        # (B, 1)
        weights = num_atoms / torch.mean(num_atoms)

        return self.get_loss(loss, weights)

    def _calc_loss(self, diff: torch.Tensor) -> torch.Tensor:
        """calculate loss function"""
        raise NotImplementedError


class MAELoss(MolecularLoss):
    r"""Mean-absolute-error-type Loss function for energy and force.

    Args:
        force_dis (float): A average norm value of force, which used to scale the force. Default: 1
        atomwise (bool): Whether to average over each atom when calculating the loss function.
            Default: None
        reduction (str): Method to reduction the output Tensor. Default: 'mean'
        device (torch.device): Device to use. Default: None
    """
    def __init__(self,
                 atomwise: bool = None,
                 force_dis: Union[float, torch.Tensor, ndarray] = 1,
                 reduction: str = 'mean',
                 **kwargs
                 ):
        super().__init__(
            force_dis=force_dis,
            atomwise=atomwise,
            reduction=reduction,
        )
        self._kwargs = get_arguments(locals(), kwargs)

    def _calc_loss(self, diff: Tensor) -> Tensor:
        return torch.abs(diff)


class MSELoss(MolecularLoss):
    r"""Mean-square-error-type Loss function for energy and force.

    Args:
        force_dis (float): A average norm value of force, which used to scale the force. Default: 1
        atomwise (bool): Whether to average over each atom when calculating the loss function.
            Default: None
        reduction (str): Method to reduction the output Tensor. Default: 'mean'
        device (torch.device): Device to use. Default: None
    """
    def __init__(self,
                 atomwise: bool = None,
                 force_dis: Union[float, Tensor, ndarray] = 1,
                 reduction: str = 'mean',
                 **kwargs
                 ):
        super().__init__(
            force_dis=force_dis,
            atomwise=atomwise,
            reduction=reduction,
        )
        self._kwargs = get_arguments(locals(), kwargs)

    def _calc_loss(self, diff: Tensor) -> Tensor:
        return torch.square(diff)


class CrossEntropyLoss(MolecularLoss):
    r"""Cross entropy loss function.

    Args:
        reduction (str): Type of reduction to be applied to loss. The optional values are "mean", "sum", and "none".
            Default: "mean".
        use_sigmoid (bool): Whether to use sigmoid function before calculating cross entropy. Default: False
    """
    def __init__(self,
                 reduction: str = 'mean',
                 use_sigmoid: bool = True
                 ):
        super().__init__(
            atomwise=False,
            reduction=reduction,
        )
        self.use_sigmoid = use_sigmoid
        if self.use_sigmoid:
            self.loss_fn = nn.BCEWithLogitsLoss(reduction=reduction)
        else:
            self.loss_fn = nn.BCELoss(reduction=reduction)
    
    def forward(self,
                predict: Tensor,
                label: Tensor,                
                num_atoms: Tensor = 1,
                atom_mask: Optional[Tensor] = None,
                **kwargs,):
        """Calculate cross entropy loss.

        Args:
            pos_pred (Tensor): Predicted values for positive samples.
            neg_pred (Tensor): Predicted values for negative samples.

        Returns:
            Tensor: Loss value.
        """
        loss = self.loss_fn(predict, label)
        return loss
