# Copyright 2021-2023 @ Shenzhen Bay Laboratory &
#                       Peking University &
#                       Huawei Technologies Co., Ltd
#
# This code is a part of Cybertron package.
#
# The Cybertron is open-source software based on the AI-framework:
# PyTorch (https://pytorch.org/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
Metric functions
"""

from typing import Tuple, Optional, Union, List
import numpy as np
from numpy import ndarray

import torch
from torch import nn, Tensor
from abc import ABCMeta, abstractmethod

from ..utils import get_tensor, get_arguments, GLOBAL_DEVICE, get_integer


__all__ = [
    'Metric',
    'MaxError',
    'Error',
    'MAE',
    'MSE',
    'MNE',
    'RMSE',
    'Loss',
]

class Metric(metaclass=ABCMeta):
    """
    Base class of metric, which is used to evaluate metrics.

    The `clear`, `update`, and `eval` should be called when evaluating metric, and they should be overridden by
    subclasse. `update` will accumulate intermediate results in the evaluation process, `eval` will evaluate the final
    result, and `clear` will reinitialize the intermediate results.

    Never use this class directly, but instantiate one of its subclasses instead, for examples,
    :class:`cybertorch.train.MAE`, :class:`cybertorch.train.RMSE` etc.

    Supported Platforms:
        ``Ascend`` ``GPU`` ``CPU``
    """
    def __init__(self, 
                 name=None,
                 mode="min",
                 index=0,
                 per_atom=False,
                 reduction='mean',
                 aggregate='mean'):
        """
        Args:
            name: Name of the metric
            mode: 'min' or 'max', indicates whether smaller value is better
            index: For multi-output cases, specify which output to calculate metric for
            per_atom: Whether to average by number of atoms
            reduction: Reduction method from (B,Y) to (B,1), can be 'mean','sum','none'
            aggregate: Aggregation method from (B,A,...) to (B,A), can be 'mean','sum','none'
        """
        self.name = name
        self.mode = mode
        self.index = get_integer(index)
        self.per_atom = per_atom
        
        # Check reduction parameter
        if reduction is not None:
            if reduction not in ('mean', 'sum', 'none'):
                raise ValueError(f"reduction must be one of 'mean','sum','none', got {reduction}")
            self.reduction = None if reduction == 'none' else reduction
        else:
            self.reduction = None
            
        # Check aggregate parameter
        if aggregate is not None:
            if aggregate not in ('mean', 'sum', 'none'):
                raise ValueError(f"aggregate must be one of 'mean','sum','none', got {aggregate}")
            self.aggregate = None if aggregate == 'none' else aggregate
        else:
            self.aggregate = None
            
        self.clear()
        self.best_value = float('inf') if self.mode == "min" else float('-inf')
    
    def _convert_data(self, data):
        """
        Convert data type to numpy array.

        Args:
            data (Object): Input data.

        Returns:
            Ndarray, data with `np.ndarray` type.
        """
        if isinstance(data, Tensor):
            data = data.cpu().numpy()
        elif isinstance(data, list):
            data = np.array(data)
        elif isinstance(data, np.ndarray):
            pass
        else:
            raise TypeError(f"For 'Metric' and its derived classes, the input data type must be tensor, list or "
                            f"numpy.ndarray, but got {type(data)}.")
        return data
    
    def __call__(self, *inputs):
        """
        Evaluate input data once.

        Args:
            inputs (tuple): The first item is a predict array, the second item is a target array.

        Returns:
            Float, compute result.
        """
        # self.clear()
        self.update(*inputs)
        return self.eval()
    
    @abstractmethod
    def clear(self):
        """
        An interface describes the behavior of clearing the internal evaluation result.
        """
        raise NotImplementedError('Must define clear function to use this base class')
    
    @abstractmethod
    def eval(self):
        """
        An interface describes the behavior of computing the evaluation result.
        """
        raise NotImplementedError('Must define eval function to use this base class')
    
    @abstractmethod
    def update(self, *inputs):
        """
        An interface describes the behavior of updating the internal evaluation result.
        Args:
            inputs: A variable-length input argument list, usually are the logits and the corresponding labels.
        """
        raise NotImplementedError('Must define update function to use this base class')
    
    def is_better(self, new_value):
        """Judge if new value is better"""
        if new_value is None:
            return False
        if self.mode == "min":
            return new_value < self.best_value
        return new_value > self.best_value
        
    def update_best(self):
        """Update the best value"""
        new_value = self.eval()
        if self.is_better(new_value):
            self.best_value = new_value
            return True
        return False
    
    def _select_output(self, outputs, labels):
        """Select the output and label to compute"""
        if isinstance(outputs, tuple):
            outputs = outputs[self.index]
        if isinstance(labels, tuple):
            labels = labels[self.index]
        return outputs, labels
    
    def _calc_error(self, predict, label):
        """Calculate error between prediction and label"""
        raise NotImplementedError
    
class Error(Metric):
    r"""Metric to calcaulte the error.

    Args:

        index (int): Index of the output for which error to be calculated. Default: 0

        per_atom (bool): Calculate the error per atom. Default: False

        reduction (str): The way to reduce the shape of the output tensor from `(B, Y)` to `(B, 1)`.
            The optional values are "mean", "sum", and "none". Default: "mean".

        aggregate (str): The way of aggregating the extra dimensions of the output tensor,
            i.e. from `(B, A, ...)` to `(B, A)`. The optional values are "mean", "sum", and "none".
            Default: "mean".

    """
    def __init__(self,
                 index=0,
                 per_atom=False,
                 reduction='mean',
                 aggregate='mean',
                 **kwargs
                 ):

        super().__init__(
                 name='Error',
                 mode="min",
                 index=index,
                 per_atom=per_atom,
                 reduction=reduction,
                 aggregate=aggregate,
                 )

        self._kwargs = kwargs

        if not isinstance(index, int):
            raise TypeError(f'The type of index should be int but got: {type(index)}')

        self._error_sum = 0
        self._samples_num = 0

        self.clear()
        self.best_value = float('inf') if self.mode == "min" else float('-inf')

    def clear(self):
        self._error_sum = 0
        self._samples_num = 0

    def update(self,
               loss: Tensor,
               predicts: Tuple[Tensor],
               labels: Tuple[Tensor],
               atom_mask: Tensor,
               ):
        """update metric"""
        #pylint: disable=unused-argument
   
        predict, label = self._select_output(predicts, labels)
        error = self._calc_error(predict, label)
        batch_size = error.shape[0]

        if len(error.shape) > 2 and self.aggregate is not None:
            axis = tuple(range(2, len(error.shape)))
            # (B, A) <- (B, A, ...)
            if self.aggregate == 'mean':
                error = torch.mean(error, axis=axis)
            else:
                error = torch.sum(error, axis=axis)

        num_atoms = 1
        total_num = batch_size
        if atom_mask is not None:
            # atom_mask = self._convert_data(atom_mask)
            # pylint: disable=unexpected-keyword-arg
            # (B, 1) <- (B, A) OR (1, 1) <- (1, A)
            num_atoms = torch.count_nonzero(atom_mask, -1).unsqueeze(-1)
            total_num = torch.sum(num_atoms).item()
            if num_atoms.shape[0] == 1:
                total_num = total_num * batch_size

        atomic = False
        if atom_mask is not None and error.shape[1] == atom_mask.shape[1]:
            atomic = True
            atom_mask_ = atom_mask
            if error.ndim != atom_mask.ndim:
                # (B, A, ...) <- (B, A)
                newshape = atom_mask.shape + (1,) * (error.ndim - atom_mask.ndim)
                atom_mask_ = torch.reshape(atom_mask, newshape)
            # (B, A) * (B, A)
            error *= atom_mask_

        weight = batch_size
        if self.reduction is not None:
            error_shape1 = error.shape[1]
            # (B,) <- (B, ...)
            axis = tuple(range(1, len(error.shape)))
            error = torch.sum(error, dim=axis)
            if self.reduction == 'mean':
                weight = batch_size * error_shape1
                if atomic or self.per_atom:
                    weight = total_num

        self._error_sum += self._convert_data(torch.sum(error, axis=0))
        self._samples_num += weight

    def eval(self) -> float:
        if self._samples_num == 0:
            raise RuntimeError('Total samples num must not be 0.')
        return self._error_sum / self._samples_num

    def _calc_error(self, predict: ndarray, label: ndarray) -> ndarray:
        """calculate error"""
        raise NotImplementedError
    
class MAE(Error):
    """Mean Absolute Error metric"""
    def __init__(self,
                 index=0,
                 per_atom=False,
                 reduction='mean',
                 aggregate='mean',
                 **kwargs):
        super().__init__(
            "mae",
            mode="min",
            index=index,
            per_atom=per_atom,
            reduction=reduction,
            aggregate=aggregate)
        self._kwargs = get_arguments(locals(), kwargs)
        
    def _calc_error(self, predict: ndarray, label: ndarray) -> ndarray:
        return torch.abs(label.reshape(predict.shape) - predict)

class MSE(Error):
    """Mean Squared Error metric"""
    def __init__(self,
                 index=0,
                 per_atom=False,
                 reduction='mean',
                 aggregate='mean',
                 **kwargs):
        super().__init__(
            "mse",
            mode="min",
            index=index,
            per_atom=per_atom,
            reduction=reduction,
            aggregate=aggregate)
        self._kwargs = get_arguments(locals(), kwargs)
        
    def _calc_error(self, predict: ndarray, label: ndarray) -> ndarray:
        return torch.square(label.reshape(predict.shape) - predict)

class RMSE(Error):
    """Root Mean Squared Error metric"""
    def __init__(self,
                 index=0,
                 per_atom=False,
                 reduction='mean',
                 aggregate='mean',
                 **kwargs):
        super().__init__(
            name="rmse",
            mode="min",
            index=index,
            per_atom=per_atom,
            reduction=reduction,
            aggregate=aggregate)
        self._kwargs = get_arguments(locals(), kwargs)
                
    def eval(self):
        if self._samples_num == 0:
            raise RuntimeError('Total samples num must not be 0.')
        return np.sqrt(self._error_sum / self._samples_num)

    def _calc_error(self, predict: ndarray, label: ndarray) -> ndarray:
        return torch.square(label.reshape(predict.shape) - predict)

class Loss(Metric):
    r"""Metric to calcaulte the loss function.

    Args:

        indexes (int):            Index for loss function. Default: 0

    """
    def __init__(self, **kwargs):
        super().__init__()
        self._kwargs = get_arguments(locals(), kwargs)

        self.clear()

    def clear(self):
        self._sum_loss = 0
        self._total_num = 0

    def update(self,
               loss: Tensor,
               predicts: Tuple[Tensor],
               labels: Tuple[Tensor],
               num_atoms: Tensor,
               *args):
        #pylint: disable=unused-argument
        """update metric"""

        if loss.ndim == 0:
            loss = loss.reshape(1)

        if loss.ndim != 1:
            raise ValueError(
                "Dimensions of loss must be 1, but got {}".format(loss.ndim))

        loss = loss.mean(-1)
        self._sum_loss += self._convert_data(loss)
        self._total_num += 1

    def eval(self):
        if self._total_num == 0:
            raise RuntimeError('Total number can not be 0.')
        return self._sum_loss / self._total_num
    
class MNE(Error):
    r"""Metric to calcaulte the mean norm error.

    Args:

        index (int): Index of the output for which error to be calculated. Default: 0

        per_atom (bool): Calculate the error per atom. Default: False

        reduction (str): The way to reduce the shape of the output tensor from `(B, Y)` to `(B, 1)`.
            The optional values are "mean", "sum", and "none". Default: "mean".

        aggregate (str): The way of aggregating the extra dimensions of the output tensor,
            i.e. from `(B, A, ...)` to `(B, A)`. The optional values are "mean", "sum", and "none".
            Default: "mean".

    """
    def __init__(self,
                 index: int = 0,
                 per_atom: bool = False,
                 reduction: str = 'mean',
                 aggregate: str = 'mean',
                 **kwargs
                 ):

        super().__init__(
            name="mne",
            mode="min",
            index=index,
            per_atom=per_atom,
            reduction=reduction,
            aggregate=aggregate,
        )
        self._kwargs = get_arguments(locals(), kwargs)

    def _calc_error(self, predict: ndarray, label: ndarray) -> ndarray:
        diff = label.reshape(predict.shape) - predict
        return torch.norm(diff, axis=-1)
    
class MaxError(Metric):
    r"""Metric to calcaulte the max error.

    Args:

        indexes (tuple):        Indexes for label and predicted data. Default: (1, 2)

        reduce_dims (bool): Whether to summation the data of all atoms in molecule. Default: True

    """
    def __init__(self, index: int = 0, **kwargs):
        super().__init__()
        self._kwargs = get_arguments(locals(), kwargs)

        self.clear()
        self._indexes = get_integer(index)

    def clear(self):
        self._max_error = 0

    def update(self,
               loss: Tensor,
               predicts: Tuple[Tensor],
               labels: Tuple[Tensor],
               num_atoms: Tensor,
               *args):
        """update metric"""
        #pylint: disable=unused-argument

        predict, label = self._select_output(predicts, labels)
        diff = label.reshape(predict.shape) - predict
        max_error = diff.max() - diff.min()
        if max_error > self._max_error:
            self._max_error = self._convert_data(max_error)

    def eval(self):
        return self._max_error

class Loss(Metric):
    """Loss value metric"""
    def __init__(self):
        super().__init__("loss", mode="min", index=None,
                        per_atom=False, reduction=None, aggregate=None)
        self.clear()
        
    def clear(self):
        """Clear the internal evaluation result"""
        self.loss_sum = 0
        self.samples_num = 0
        self.best_value = float('inf')
        
    def eval(self):
        """Compute the evaluation result"""
        return self.loss_sum / self.samples_num if self.samples_num > 0 else 0
        
    def update(self, outputs, labels=None, atom_mask=None):
        """Update the internal evaluation result"""
        # For Loss metric, outputs is the loss value
        loss = outputs
        if isinstance(loss, torch.Tensor):
            loss = loss.item()
            
        # Update state
        self.loss_sum += loss
        self.samples_num += 1

class BinaryAccuracy(Metric):
    r"""Metric to calculate the binary accuracy.

    Args:
        index (int): Index of the output for which accuracy to be calculated. Default: 0
        per_atom (bool): Calculate the accuracy per atom. Default: False
        reduction (str): The way to reduce the shape of the output tensor from `(B, Y)` to `(B, 1)`.
            The optional values are "mean", "sum", and "none". Default: "mean".
        aggregate (str): The way of aggregating the extra dimensions of the output tensor,
            i.e. from `(B, A, ...)` to `(B, A)`. The optional values are "mean", "sum", and "none".
            Default: "mean".
        threshold (float): Threshold for binary classification. Predictions greater than or equal to this
            threshold are considered positive. Default: 0.5
    """
    def __init__(self,
                 index=0,
                 per_atom=False,
                 reduction='mean',
                 aggregate='mean',
                 threshold=0.5,
                 use_sigmoid=True,
                 **kwargs):
        super().__init__(
            name='binary_accuracy',
            mode="max",  # Higher accuracy is better
            index=index,
            per_atom=per_atom,
            reduction=reduction,
            aggregate=aggregate,
        )
        
        self._kwargs = get_arguments(locals(), kwargs)

        self._correct_count = 0
        self._total_count = 0
        self.threshold = threshold
        self.use_sigmoid = use_sigmoid

    def clear(self):
        """Clear the internal evaluation result"""
        self._correct_count = 0
        self._total_count = 0

    def update(self, 
               loss: Tensor, 
               predicts: Tuple[Tensor], 
               labels: Tuple[Tensor], 
               atom_mask: Tensor):
        """
        Update the internal evaluation result.

        Args:
            loss: Loss tensor (not used for accuracy calculation)
            predicts: Predicted values
            labels: Ground truth labels
            atom_mask: Mask indicating valid atoms
        """
        #pylint: disable=unused-argument
        
        predict, label = self._select_output(predicts, labels)

        if self.use_sigmoid:
            predict = torch.sigmoid(predict)
        
        # Convert data to numpy arrays
        predict = self._convert_data(predict)
        label = self._convert_data(label)
        
        # Calculate correct predictions for binary classification
        correct = self._calculate_correct(predict, label)
        batch_size = correct.shape[0]

        # Handle reduction
        weight = batch_size
        if self.reduction is not None:
            correct_shape1 = correct.shape[1]
            axis = tuple(range(1, len(correct.shape)))
            correct = np.sum(correct, axis=axis)
            if self.reduction == 'mean':
                weight = batch_size * correct_shape1

        # Update counts
        self._correct_count += np.sum(correct, axis=0)
        self._total_count += weight

    def eval(self) -> float:
        """
        Compute the final accuracy result.

        Returns:
            float: The calculated accuracy value.
        """
        if self._total_count == 0:
            raise RuntimeError('Total samples num must not be 0.')
        
        return self._correct_count / self._total_count

    def _calculate_correct(self, predict: ndarray, label: ndarray) -> ndarray:
        """Calculate correct predictions for binary classification between predict and label"""
        # Ensure prediction and label have the same shape

        label = label.astype(np.float32)
        # Apply threshold to get binary predictions
        binary_predictions = (predict >= self.threshold).astype(np.float32)
        
        # Calculate correct predictions (1 for correct, 0 for incorrect)
        correct = (binary_predictions == label).astype(np.float32)
        
        return np.mean(correct, axis=1, keepdims=True)