# Copyright 2021-2023 @ Shenzhen Bay Laboratory &
#                       Peking University &
#                       Huawei Technologies Co., Ltd
#
# This code is a part of Cybertron package.
#
# The Cybertron is open-source software based on the AI-framework:
# MindSpore (https://www.mindspore.cn/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
Interaction layers
"""

from typing import Union, Tuple, Optional, List

import torch
from torch import nn, Tensor

from ..utils import get_integer, get_tensor, get_arguments
from ..base import PositionalEmbedding, MultiheadAttention, FeedForward, Pondering, ACTWeight
from .interaction import Interaction, _interaction_register

@_interaction_register('niu')
class NeuralInteractionUnit(Interaction):
    """
    Neural Interaction Unit (NIU) for MolCT.
    
    Args:
        dim_feature (int): Dimension of the feature vectors.
        n_heads (int, optional): Number of attention heads. Defaults to 8.
        max_cycles (int, optional): Maximum number of computation cycles for ACT.
            Defaults to 10.
        activation (str, optional): Activation function name. Defaults to 'silu'.
        fixed_cycles (bool, optional): Whether to use fixed computation cycles.
            Defaults to False.
        use_feed_forward (bool, optional): Whether to use feed-forward networks.
            Defaults to False.
        act_threshold (float, optional): Threshold for ACT halting probability.
            Defaults to 0.9.
    """
    def __init__(self, 
                 dim_feature: int, 
                 n_heads: int = 8, 
                 max_cycles: int = 10, 
                 activation: str = 'silu',
                 fixed_cycles: bool = False, 
                 use_feed_forward: bool = False, 
                 act_threshold: float = 0.9,
                 **kwargs
                 ):
        super().__init__(
            dim_node_rep=dim_feature,
            dim_edge_rep=dim_feature,
            dim_node_emb=dim_feature,
            dim_edge_emb=dim_feature,
            activation=activation
        )
        self._kwargs = get_arguments(locals(), kwargs)

        self.n_heads = get_integer(n_heads)
        self.dim_feature = get_integer(dim_feature)

        if self.dim_feature % self.n_heads != 0:
            raise ValueError(
                'The term "dim_feature" cannot be divisible by the term "n_heads" in NeuralInteractionUnit!'
                )

        self.max_cycles = get_integer(max_cycles)

        self.fixed_cycles = fixed_cycles

        if self.fixed_cycles:
            self.time_embedding = [0 for _ in range(self.max_cycles)]
        else:
            self.time_embedding = self.get_time_signal(
                self.max_cycles,
                dim_feature,
                device=self.device
                )

        self.positional_embedding = PositionalEmbedding(dim_feature)
        self.multi_head_attention = MultiheadAttention(dim_feature, self.n_heads)
        self.norm = nn.LayerNorm(
            [dim_feature],
            dtype=torch.float32,
            device=self.device
            )

        self.use_feed_forward = use_feed_forward
        self.feed_forward = None
        if self.use_feed_forward:
            self.feed_forward = FeedForward(dim_feature, self.activation)

        self.act_threshold = act_threshold
        self.act_epsilon = 1.0 - act_threshold

        self.pondering = None
        self.act_weight = None
        self.open_act = False
        if self.max_cycles > 1:
            self.open_act = True
            self.pondering = Pondering(dim_feature * 3, bias_const=3)
            self.act_weight = ACTWeight(self.act_threshold)

    @staticmethod
    def get_time_signal(length: int,
                        channels: int, 
                        min_timescale: float = 1.0, 
                        max_timescale: float = 1.0e4, 
                        device: str = 'cpu'
                        ) -> Tensor:
        """
        Generates a [1, length, channels] timing signal consisting of sinusoids
        Adapted from:
        https://github.com/andreamad8/Universal-Transformer-Pytorch/blob/master/models/common_layer.py
        """
        position = torch.arange(length, dtype=torch.float32, device=device)
        num_timescales = channels // 2
        log_timescale_increment = torch.log(
            get_tensor(max_timescale / min_timescale, dtype=torch.float32, device=device)
            ) / (num_timescales - 1)
        inv_timescales = min_timescale * torch.exp(torch.arange(num_timescales, dtype=torch.float32, device=device) * -log_timescale_increment)
        scaled_time = position.unsqueeze(1) * inv_timescales.unsqueeze(0)

        signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], dim=1)
        signal = torch.nn.functional.pad(
            signal,
            (0, 0, 0, channels % 2),
            'constant',
            0.0
            )

        return signal

    def _print_info(self,
                    num_retraction: int = 6,
                    num_gap: int = 3,
                    char: str = '-'
                    ) -> None:
        ret = char * num_retraction
        gap = char * num_gap
        print(ret + gap + ' Feature dimension: ' + str(self.dim_node_rep))
        print(ret + gap + ' Activation function: ' + self.activation._get_name())
        print(ret + gap + ' Number of heads in multi-head attention: ' + str(self.n_heads))
        print(ret + gap + ' Use feed forward network: ' + ('Yes' if self.use_feed_forward else 'No'))
        if self.max_cycles > 1:
            print(ret + gap + ' Adaptive computation time (ACT) with maximum cycles: ' + str(self.max_cycles))
            print(ret + gap + ' Cycle mode: ' + ('Fixed' if self.fixed_cycles else 'Flexible'))
            print(ret + gap + ' Threshold for ACT: ' + str(self.act_threshold))


    def forward(self, 
                node_vec: Tensor, 
                node_emb: Tensor, 
                node_mask: Tensor, 
                edge_vec: Tensor, 
                edge_emb: Tensor, 
                edge_mask: Optional[Tensor] = None, 
                edge_cutoff: Optional[Tensor] = None,
                neigh_idx: Optional[Tensor] = None,
                **kwargs
                ) -> Tuple[Tensor, Tensor]:
        
        def _encoder(
                node_vec: Tensor, 
                edge_vec: Tensor = 1, 
                edge_mask: Optional[Tensor] = None, 
                edge_cutoff: Optional[Tensor] = None, 
                time_signal: Union[int, Tensor] = 0, 
                neigh_idx: Optional[Tensor] = None
                ) -> Tensor:
            """Internal encoder function that applies attention and normalization."""
            query, key, value = self.positional_embedding(
                node_vec, 
                edge_vec, 
                time_signal, 
                neigh_idx
            )
            dv = self.multi_head_attention(
                query, 
                key, 
                value, 
                mask=edge_mask, 
                cutoff=edge_cutoff
            )
            dv = dv.squeeze(-2)
            node_new = node_vec + dv
            node_new = self.norm(node_new)

            if self.use_feed_forward:
                node_new = self.feed_forward(node_new)

            return node_new

        if self.open_act:
            def _act_encoder(
                    node_new: Tensor, 
                    node_vec: Tensor, 
                    node_emb: Tensor, 
                    edge_vec: Tensor, 
                    edge_mask: Optional[Tensor], 
                    edge_cutoff: Optional[Tensor],
                    halting_prob: Tensor, 
                    n_updates: Tensor, 
                    cycle: int, 
                    neigh_idx: Optional[Tensor] = None
                    ) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
                """Internal encoder function with adaptive computation time."""
                time_signal = self.time_embedding[cycle]
                vt = torch.zeros_like(node_emb, device=self.device) + time_signal

                xp = torch.cat((node_vec, node_emb, vt), dim=-1)
                p = self.pondering(xp)
                w, dp, dn = self.act_weight(p, halting_prob)
                halting_prob = halting_prob + dp
                n_updates = n_updates + dn

                node_vec = _encoder(
                    node_vec=node_vec, 
                    edge_vec=edge_vec, 
                    edge_mask=edge_mask, 
                    edge_cutoff=edge_cutoff, 
                    time_signal=time_signal, 
                    neigh_idx=neigh_idx
                )

                node_new = node_vec * w + node_new * (1.0 - w)

                return node_new, node_vec, halting_prob, n_updates

            node_new = torch.zeros_like(node_vec, device=self.device)
            halting_prob = torch.zeros(
                (node_vec.size(0), node_vec.size(1)), 
                dtype=torch.float32, 
                device=self.device
            )
            n_updates = torch.zeros(
                (node_vec.size(0), node_vec.size(1)), 
                dtype=torch.float32, 
                device=self.device
            )

            if self.fixed_cycles:
                for cycle in range(self.max_cycles):
                    node_new, node_vec, halting_prob, n_updates = _act_encoder(
                        node_new=node_new,
                        node_vec=node_vec,
                        node_emb=node_emb,
                        edge_vec=edge_vec,
                        edge_mask=edge_mask,
                        edge_cutoff=edge_cutoff,
                        halting_prob=halting_prob,
                        n_updates=n_updates,
                        cycle=cycle, 
                        neigh_idx=neigh_idx
                    )
            else:
                cycle = 0
                while (halting_prob < self.act_threshold).any() and (cycle < self.max_cycles):
                    node_new, node_vec, halting_prob, n_updates = _act_encoder(
                        node_new=node_new,
                        node_vec=node_vec,
                        node_emb=node_emb,
                        edge_vec=edge_vec,
                        edge_mask=edge_mask,
                        edge_cutoff=edge_cutoff,
                        halting_prob=halting_prob,
                        n_updates=n_updates,
                        cycle=cycle, 
                        neigh_idx=neigh_idx
                    )
                    cycle += 1
        else:
            time_signal = self.time_embedding[0]
            node_new = _encoder(
                node_vec=node_vec, 
                edge_vec=edge_vec, 
                edge_mask=edge_mask, 
                edge_cutoff=edge_cutoff, 
                time_signal=time_signal, 
                neigh_idx=neigh_idx
            )

        return node_new, edge_vec
