# Copyright 2021-2023 @ Shenzhen Bay Laboratory &
#                       Peking University &
#                       Huawei Technologies Co., Ltd
#
# This code is a part of Cybertron package.
#
# The Cybertron is open-source software based on the AI-framework:
# MindSpore (https://www.mindspore.cn/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
Basic functions
"""

from collections import OrderedDict
from typing import Optional, List, Tuple, Dict, Any, Union

import torch
from torch.nn import Parameter
from torch import nn, Tensor

from .utils import get_integer, get_arguments, get_initializer, get_tensor, GLOBAL_DEVICE
from .layer import Dense, Residual
from .cutoff import SmoothCutoff

__all__ = [
    "GraphNorm",
    "Aggregate",
    "SmoothReciprocal",
    "SoftmaxWithMask",
    "PositionalEmbedding",
    "MultiheadAttention",
    "FeedForward",
    "Pondering",
    "ACTWeight",
]

class GraphNorm(nn.Module):
    """Graph normalization.

    Args:
        dim_features: Dimensionality of node features
        axis: Axis along which to compute mean and variance
        alpha_init: Initialization method for alpha parameter
        beta_init: Initialization method for beta parameter
        gamma_init: Initialization method for gamma parameter
    """
    def __init__(self, 
                 dim_features: int, 
                 axis: int = -2, 
                 alpha_init: str = 'one', 
                 beta_init: str = 'zero', 
                 gamma_init: str = 'one',
                 **kwargs,
                 ):
        super().__init__()
        self._kwargs = get_arguments(locals(), kwargs)

        self.dim_feature = get_integer(dim_features)
        self.device = GLOBAL_DEVICE()
        self.axis = get_integer(axis)

        self.alpha = Parameter(get_initializer(alpha_init, self.dim_feature, dtype=torch.float32, device=self.device),requires_grad=True)
        self.beta = Parameter(get_initializer(beta_init, self.dim_feature, dtype=torch.float32, device=self.device),requires_grad=True)
        self.gamma = Parameter(get_initializer(gamma_init, self.dim_feature, dtype=torch.float32, device=self.device),requires_grad=True)

    def forward(self, nodes:Tensor) -> Tensor:
        """Apply graph normalization to node features.

        Args:
            nodes: Node feature tensor of shape (batch_size, num_nodes, dim_features)

        Returns:
            y: Normalized node feature tensor
        """
        mu = nodes.mean(dim=self.axis, keepdim=True)
        mu2 = (nodes ** 2).mean(dim=self.axis, keepdim=True)
        a = self.alpha
        sigma2 = mu2 + (a * a - 2 * a) * mu * mu
        sigma = torch.sqrt(sigma2+1e-12)
        y = self.gamma * (nodes - a * mu) / sigma + self.beta
        return y

class Aggregate(nn.Module):
    """Aggregation layer for summing or averaging over specified axis.

    Args:
        axis: Axis along which to aggregate
        mean: Whether to compute mean instead of sum
    """
    def __init__(self,
                 axis: int = -2,
                 mean: bool = False,
                 **kwargs,
                 ):
        super().__init__()
        self._kwargs = get_arguments(locals(), kwargs)

        self.axis = get_integer(axis)
        self.mean = mean

    def forward(self, inputs:Tensor, mask:Tensor=None) -> Tensor:
        """To aggregate the representation of each nodes.

        Args:
            inputs (Tensor):    Tensor with shape (B, A, N, F). Data type is float.
            mask (Tensor):      Tensor with shape (B, A, N). Data type is bool.

        Returns:
            y (Tensor):         Tensor with shape (B, A, F). Data type is float.
        """
        if mask is not None:
            inputs = inputs * mask.unsqueeze(-1)
        y = inputs.sum(dim=self.axis)
        if self.mean:
            if mask is not None:
                num = mask.sum(dim=self.axis)
                num = torch.max(num, torch.ones_like(num))
            else:
                num = inputs.size(self.axis)
            y = y / num
        return y

class SmoothReciprocal(nn.Module):
    """Smooth reciprocal function with cutoff for distance-based interactions.

    Args:
        dmax: Maximum distance for cutoff
        cutoff_network: Optional cutoff network class
    """
    def __init__(self, 
                 dmax: float, 
                 cutoff_network: Optional[nn.Module] = None,
                 **kwargs,
                 ):
        super().__init__()
        self._kwargs = get_arguments(locals(), kwargs)

        if cutoff_network is None:
            self.cutoff_network = SmoothCutoff(dmax)
        else:
            self.cutoff_network = cutoff_network(dmax)

    def forward(self, rij:Tensor, mask:Tensor) -> Tensor:
        """calculate smooth reciprocal of Tensor

        Args:
            rij (Tensor):   Tensor with shape (..., X, ...). Data type is float.
            mask (Tensor):  Tensor with shape (..., X, ...). Data type is bool.

        Returns:
            output (Tensor):    Tensor with shape (..., X, ...). Data type is float.

        """
        phi2rij, _ = self.cutoff_network(rij * 2, mask)
        r_near = phi2rij / torch.sqrt(rij * rij + 1.0)
        r_far = torch.where(rij > 0, (1.0 - phi2rij) / rij, 0)
        reciprocal = r_near + r_far
        if mask is not None:
            reciprocal = reciprocal * mask

        return reciprocal

class SoftmaxWithMask(nn.Module):
    """Softmax layer that handles masked inputs.

    Args:
        axis: Axis along which to apply softmax
    """
    def __init__(self, axis: int = -1, **kwargs):
        super().__init__()
        self._kwargs = get_arguments(locals(), kwargs)

        self.axis = axis
        self.softmax = nn.Softmax(dim=axis)

    def forward(self, x:Tensor, mask:Tensor) -> Tensor:
        """Apply softmax to input tensor while masking invalid positions.

        Args:
            x (Tensor): Input tensor of shape (..., X, ...)
            mask (Tensor): Boolean mask tensor of shape (..., X, ...)

        Returns:
            output (Tensor): Softmax output tensor of shape (..., X, ...)
        """
        masked_x = x.masked_fill(~mask, float('-inf'))
        return self.softmax(masked_x)

class PositionalEmbedding(nn.Module):
    """Positional embedding layer for graph neural networks.

    Args:
        dim_features: Dimensionality of input features
        use_public_layer_norm: Whether to use shared layer normalization for x and g
    """
    def __init__(self,
                 dim_features: int,
                 use_public_layer_norm: bool = True,
                 **kwargs,
                 ):
        super().__init__()
        self._kwargs = get_arguments(locals(), kwargs)

        self.dim = get_integer(dim_features)
        self.device = GLOBAL_DEVICE()

        if use_public_layer_norm:
            self.norm = nn.LayerNorm((self.dim,), device=self.device)
            self.x_norm = self.norm
            self.g_norm = self.norm
        else:
            self.x_norm = nn.LayerNorm((self.dim,), device=self.device)
            self.g_norm = nn.LayerNorm((self.dim,), device=self.device)

        self.x2q = Dense(self.dim, self.dim, has_bias=False)
        self.x2k = Dense(self.dim, self.dim, has_bias=False)
        self.x2v = Dense(self.dim, self.dim, has_bias=False)

    def forward(self, 
                x_i: Tensor,
                g_ij: Tensor,
                t: float = 0,
                neigh_idx: Optional[Tensor] = None
                ) -> Tuple[Tensor, Tensor, Tensor]:
        """Compute positional embeddings for attention mechanisms.

        Args:
            x_i: Node feature tensor
            g_ij: Positional encoding tensor
            t: Optional time step parameter
            neigh_idx: Optional neighbor indices for sparse graphs

        Returns:
            Tuple of query, key, value tensors
        """
        if neigh_idx is None:
            g_i = g_ij[..., 0, 0, :].unsqueeze(-2)
            xgi = x_i * g_i
            xgii = xgi.unsqueeze(-2)
            xgij = x_i.unsqueeze(1) * g_ij

            xgii = self.x_norm(xgii + t)
            xgij = self.g_norm(xgij + t)

            query = self.x2q(xgii)
            key = self.x2k(xgij)
            value = self.x2v(xgij)
        else:
            g_i = g_ij[:,:,0]
            xgi = x_i * g_i
            xgii = xgi.unsqueeze(-2)
            xgij = torch.take_along_dim(x_i.unsqueeze(1),neigh_idx[...,None],2) * g_ij
            
            xgii = self.x_norm(xgii + t)
            xgij = self.g_norm(xgij + t)

            query = self.x2q(xgii)
            key = self.x2k(xgij)
            value = self.x2v(xgij)
        return query, key, value
    
class MultiheadAttention(nn.Module):
    """Multi-head attention mechanism for graph neural networks.

    Args:
        dim_features: Dimensionality of input features
        n_heads: Number of attention heads
    """
    def __init__(self,
                 dim_features: int,
                 n_heads: int = 8,
                 **kwargs,
                 ):
        super().__init__()
        self._kwargs = get_arguments(locals(), kwargs)

        self.device = GLOBAL_DEVICE()
        self.n_heads = get_integer(n_heads)
        self.dim_feature = get_integer(dim_features)

        self.size_per_head = self.dim_feature // self.n_heads
        self.scores_mul = torch.rsqrt(get_tensor(self.size_per_head, dtype=torch.float32, device=self.device))
        
        self.output = Dense(self.dim_feature, self.dim_feature, has_bias=False)

    def _reshape_qkv(self,
                     x: Tensor,
                     batch_size: int,
                     seq_len: int,
                     nb_len: int
                     ) -> Tensor:
        # 高效的reshape操作
        return x.view(batch_size, seq_len, nb_len, self.n_heads, self.size_per_head).transpose(2, 3)

    def _compute_attention(self, 
                           q: Tensor,
                           k: Tensor,
                           v: Tensor, 
                           mask: Optional[Tensor] = None,
                           cutoff: Optional[Tensor] = None
                           ) -> Tensor:
        # 计算attention scores
        attention_scores = torch.matmul(q, k.transpose(-2, -1)) * self.scores_mul
        # 处理mask
        if mask is not None:
            attention_scores = attention_scores.masked_fill(~mask.unsqueeze(-2).unsqueeze(-2), float('-inf'))
        
        # 计算attention weights
        attention_weights = torch.softmax(attention_scores, dim=-1)
        
        # 应用cutoff (保持其数值特性)
        if cutoff is not None:
            attention_weights = attention_weights * cutoff.unsqueeze(-2).unsqueeze(-2)
        return torch.matmul(attention_weights, v)

    def forward(self, 
                query: Tensor,
                key: Tensor,
                value: Tensor,
                mask: Optional[Tensor] = None,
                cutoff: Optional[Tensor] = None
                ) -> Tensor:
        """
        Args:
            query: Query tensor
            key: Key tensor
            value: Value tensor
            mask: Optional attention mask
            cutoff: Optional cutoff values

        Returns:
            Output tensor after attention and projection
        """
        batch_size, seq_len, nb, __ = key.shape
        
        if self.n_heads > 1:
            # 并行处理q,k,v的reshape
            q = self._reshape_qkv(query, batch_size, seq_len, 1)
            k = self._reshape_qkv(key, batch_size, seq_len, nb)
            v = self._reshape_qkv(value, batch_size, seq_len, nb)
            
            # 应用attention
            context = self._compute_attention(q, k, v, mask, cutoff)
            # 高效reshape回原始维度
            context = context.transpose(-2, -3).contiguous().view(batch_size, seq_len, 1, self.dim_feature)
        else:
            # 单头attention的优化实现
            attention_scores = torch.bmm(query, key.transpose(-2, -1)) * self.scores_mul
            
            if mask is not None:
                attention_scores = attention_scores.masked_fill(~mask.unsqueeze(-2), float('-inf'))
            
            attention_weights = torch.softmax(attention_scores, dim=-1)
            
            if cutoff is not None:
                attention_weights = attention_weights * cutoff.unsqueeze(-2)
            
            context = torch.bmm(attention_weights, value)
        
        return self.output(context)

class FeedForward(nn.Module):
    """Feed-forward network with residual connection.

    Args:
        dim_features: Dimensionality of input features
        activation: Activation function name or module
        n_hidden: Number of hidden layers
    """
    def __init__(self,
                 dim_features: int,
                 activation: str = 'relu',
                 n_hidden: int = 1,
                 **kwargs,
                 ):
        super().__init__()
        self._kwargs = get_arguments(locals(), kwargs)

        self.device = GLOBAL_DEVICE()
        self.dim = get_integer(dim_features)
        self.residual = Residual(self.dim, activation=activation, n_hidden=get_integer(n_hidden), device=self.device)
        
    def forward(self, x: Tensor) -> Tensor:
        return self.residual(x)

class Pondering(nn.Module):
    """Pondering module for adaptive computation time.

    Args:
        in_features: Input feature dimensionality
        n_hidden: Number of hidden layers
        bias_const: Initial bias constant for output layer
    """
    def __init__(self,
                 in_features: int,
                 n_hidden: int = 0,
                 bias_const: float = 1.,
                 **kwargs,
                 ):
        super().__init__()
        self._kwargs = get_arguments(locals(), kwargs)

        self.device = GLOBAL_DEVICE()
        self.dim = in_features

        if n_hidden == 0:
            self.dense = Dense(self.dim, 1, has_bias=True, weight_init='xavier_uniform', bias_init='one', activation='sigmoid', device=self.device)
        elif n_hidden > 0:
            net = [['Dense{}'.format(_),Dense(self.dim, self.dim, activation='relu', device=self.device)] for _ in range(n_hidden)]
            net.append(['Dense{}'.format(n_hidden),Dense(self.dim, 1, has_bias=True, weight_init='xavier_uniform', bias_init='one', activation='sigmoid', device=self.device)])
            nets = OrderedDict(net)
            self.dense = nn.Sequential(nets)
        else:
            raise ValueError("n_hidden cannot be negative!")

    def forward(self, x: Tensor) -> Tensor:
        y = self.dense(x)
        return y.squeeze(-1)

class ACTWeight(nn.Module):
    """Adaptive Computation Time weight module.

    Args:
        threshold: Threshold for halting decision
    """
    def __init__(self, threshold: float = 0.9, **kwargs,):
        super().__init__()
        self._kwargs = get_arguments(locals(), kwargs)
        
        self.threshold = threshold

    def forward(self, prob: Tensor, halting_prob: Tensor) -> Tensor:
        """Calculate Adapetive computation time.

        Args:

            prob (Tensor):          Tensor with shape (B, A, 1). Data type is float.
            halting_prob (Tensor):  Tensor with shape (B, A, 1). Data type is float.

        Returns:
            w (Tensor):     Tensor with shape (B, A, 1). Data type is float.
            dp (Tensor):    Tensor with shape (B, A, 1). Data type is float.
            dn (Tensor):    Tensor with shape (B, A, 1). Data type is float.

        """
        running = (halting_prob < 1.0).to(prob.dtype)

        add_prob = prob * running
        new_prob = halting_prob + add_prob

        mask_run = (new_prob <= self.threshold).to(prob.dtype)
        mask_halt = (new_prob > self.threshold).to(prob.dtype)

        still_running = mask_run * running
        running_prob = halting_prob + prob * still_running
        new_halted = mask_halt * running
        remainders = new_halted * (1.0 - running_prob)

        dp = add_prob + remainders
        dn = running
        update_weights = prob * still_running + new_halted * remainders
        w = update_weights.unsqueeze(-1)

        return w, dp, dn