import numpy as np
from mindspore import nn
from mindspore import Tensor
from mindspore import Parameter
import mindspore.ops as ops
import mindspore.numpy as mnp
from mindspore.ops import operations as P
from mindspore.ops import functional as F
from mindspore.ops import composite as C
# from mindspore.common.initializer import TruncatedNormal, XavierUniform, HeUniform
from mindspore.common.initializer import initializer, TruncatedNormal, Normal
import mindspore.common.dtype as mstype

from .utils import lecun_init, glorot_uniform
from .. import base as B

import sys
sys.path.append("..") 
from .global_config import ModelConfig

global_config = ModelConfig() ### Set Hyper-parameters here
msint = global_config.msint
msfp = global_config.msfp
ms_small = global_config.ms_small # In case of log(0) or divide by 0;

class FeatureTransformer(nn.Cell):
    r"""Perform Raw Feature Embedding.
    """
    def __init__(self, config):
        super(FeatureTransformer, self).__init__()
        self.config = config.feat_generator ### Pass config.model here.
        self.flag_use_cls = self.config.use_cls

        ### Feature Transforms:
        self.atom_types = config.num_atom_types
        self.pair_types = config.num_bond_types

        self.atom_act_dim = config.atom_act_dim
        self.pair_act_dim = config.pair_act_dim

        ###
        self.matmul = P.MatMul(transpose_b=True)
        self.batch_matmul_trans_b = P.BatchMatMul(transpose_b=True)
        self.softmax = nn.Softmax()
        self.sigmoid = nn.Sigmoid()
        self.one = Tensor(1, msfp)

        self._init_parameter()

    def _init_parameter(self):
        self.preprocess_1d = Parameter(initializer(lecun_init(self.atom_types, initializer_name='relu'), [self.atom_act_dim, self.atom_types], mstype.float32))
        self.preprocess_1d_biases = Parameter(Tensor(np.zeros([self.atom_act_dim]), mstype.float32))
        self.left_single = Parameter(initializer(lecun_init(self.atom_types, initializer_name='relu'), [self.pair_act_dim, self.atom_types], mstype.float32))
        self.left_single_biases = Parameter(Tensor(np.zeros([self.pair_act_dim]), mstype.float32))
        self.right_single = Parameter(initializer(lecun_init(self.atom_types, initializer_name='relu'), [self.pair_act_dim, self.atom_types], mstype.float32))
        self.right_single_biases = Parameter(Tensor(np.zeros([self.pair_act_dim]), mstype.float32))
        self.pair_activations = Parameter(initializer(lecun_init(self.pair_types, initializer_name='relu'), [self.pair_act_dim, self.pair_types], mstype.float32))
        self.pair_activations_biases = Parameter(Tensor(np.zeros([self.pair_act_dim]), mstype.float32))
        # Changed to:
        self.cls_atom_embedding = Parameter(Tensor(np.random.normal(0.,1.,[self.atom_act_dim]), mstype.float32))
        self.cls_pair_embedding = Parameter(Tensor(np.random.normal(0.,1.,[self.pair_act_dim]), mstype.float32))

    def construct(self, atom_raw_feat, pair_raw_feat):
        r"""Basic Attention operation.
        q_data/m_data: (Nseq/Nres,Nres/Nres',C).
        bias: mask info; (Nseq/Nres,1,1,Nres) or scalar float.
        pair_bias: pair-wise bias as T5 or AF2 module; Check: (h,Nres,Nres').
        """

        ### Convert float types:
        preprocess_1d = P.Cast()(self.preprocess_1d, msfp)
        preprocess_1d_biases = P.Cast()(self.preprocess_1d_biases, msfp)
        left_single = P.Cast()(self.left_single, msfp)
        left_single_biases = P.Cast()(self.left_single_biases, msfp)
        right_single = P.Cast()(self.right_single, msfp)
        right_single_biases = P.Cast()(self.right_single_biases, msfp)
        pair_activations = P.Cast()(self.pair_activations, msfp)
        pair_activations_biases = P.Cast()(self.pair_activations_biases, msfp)
        cls_atom_embedding = P.Cast()(self.cls_atom_embedding, msfp)
        cls_pair_embedding = P.Cast()(self.cls_pair_embedding, msfp)

        ###
        # (B*A,Cin):
        _atom_act = P.Reshape()(atom_raw_feat,(-1, self.atom_types))
        # (B*A*A,Cin'):
        _pair_act = P.Reshape()(pair_raw_feat,(-1, self.pair_types))

        # (B*A,Cm):
        atom_act = self.matmul(_atom_act, preprocess_1d)
        atom_act = P.BiasAdd()(atom_act, preprocess_1d_biases)
        # atom_act += P.Reshape()(preprocess_1d_biases,(1,-1))
        # (B,A,Cm):
        atom_act = P.Reshape()(atom_act, atom_raw_feat.shape[:-1]+(-1,))
        
        # (B*A*A,Cz):
        pair_act = self.matmul(_pair_act, pair_activations)
        pair_act = P.BiasAdd()(pair_act, pair_activations_biases)
        # pair_act += P.Reshape()(pair_activations_biases,(1,-1))
        # (B,A,A,Cz):
        pair_act = P.Reshape()(pair_act, pair_raw_feat.shape[:-1]+(-1,))

        # (B*A,Cz):
        left_act = self.matmul(_atom_act, left_single)
        left_act = P.BiasAdd()(left_act, left_single_biases)
        # left_act += P.Reshape()(left_single_biases,(1,-1))
        # (B,A,Cz):
        left_act = P.Reshape()(left_act, atom_raw_feat.shape[:-1]+(-1,))
        
        # (B*A,Cz):
        right_act = self.matmul(_atom_act, right_single)
        right_act = P.BiasAdd()(right_act, right_single_biases)
        # right_act += P.Reshape()(right_single_biases,(1,-1))
        # (B,A,Cz):
        right_act = P.Reshape()(right_act, atom_raw_feat.shape[:-1]+(-1,))

        # (B,A,A,Cz):
        pair_act += P.ExpandDims()(left_act, 2) + P.ExpandDims()(right_act, 1)

        if self.flag_use_cls:
            # (B,A,Cm):
            cls_atom_act = mnp.broadcast_to(cls_atom_embedding, atom_act.shape)
            #             print("============", cls_atom_act)
            #             print("============", atom_act)
            atom_act = mnp.concatenate((cls_atom_act[:,:1,:],atom_act[:,:-1,:]),1) # type: ignore
            # (B,A,A,Cm):
            cls_pair_act = mnp.broadcast_to(cls_pair_embedding,pair_act.shape)
            pair_act = mnp.concatenate((cls_pair_act[:,:1,:,:],pair_act[:,:-1,:,:]),1) # type: ignore
            pair_act = mnp.concatenate((cls_pair_act[:,:,:1,:],pair_act[:,:,:-1,:]),2) # type: ignore

        return atom_act, pair_act


class Attention(nn.Cell):
    r"""Basic Attention operation.
    """
    def __init__(self, config, q_data_dim, m_data_dim, output_dim):
        super(Attention, self).__init__()
        self.config = config ### Pass config.model.NIU.hyperformer here.
        
        self.q_data_dim = q_data_dim
        self.m_data_dim = m_data_dim
        self.output_dim = output_dim ### Change this if value&output needs downscale/upscale.
        self.num_head = self.config.num_head
        self.gating = self.config.gating
        
        ## @@ Check how this works:
        self.key_dim = self.config.get('key_dim', int(q_data_dim)) 
        self.value_dim = self.config.get('value_dim', int(m_data_dim)) ### Add this to Config if value needs downscale/upscale.
        self.key_dim = self.key_dim // self.num_head
        self.value_dim = self.value_dim // self.num_head
        
        self.matmul = P.MatMul(transpose_b=True)
        self.batch_matmul_trans_b = P.BatchMatMul(transpose_b=True)
        self.softmax = nn.Softmax()
        self.sigmoid = nn.Sigmoid()
        self.one = Tensor(1, msfp)

        self._init_parameter()

    def _init_parameter(self):
        self.linear_q_weights = Parameter(Tensor(glorot_uniform(self.q_data_dim, self.key_dim, [self.num_head * self.key_dim, self.q_data_dim]), mstype.float32))
        self.linear_k_weights = Parameter(Tensor(glorot_uniform(self.m_data_dim, self.key_dim, [self.num_head * self.key_dim, self.m_data_dim]), mstype.float32))
        self.linear_v_weights = Parameter(Tensor(glorot_uniform(self.m_data_dim, self.value_dim, [self.num_head * self.value_dim, self.m_data_dim]), mstype.float32))
        self.linear_output_weights = Parameter(Tensor(glorot_uniform(self.num_head*self.value_dim, self.output_dim, [self.output_dim, self.num_head*self.value_dim]), mstype.float32))
        self.o_biases = Parameter(Tensor(np.zeros([self.output_dim]), mstype.float32))
        if self.gating:
            self.linear_gating_weights = Parameter(Tensor(np.zeros([self.num_head*self.value_dim, self.q_data_dim]), mstype.float32))
            self.gating_biases = Parameter(Tensor(np.ones((self.num_head, self.value_dim)), mstype.float32), name="gating_b")
    
    def construct(self, q_data, m_data, bias, pair_bias=None):
        r"""Basic Attention operation.
        q_data: (B,A,C).
        m_data: (B,A',C).
        bias: mask info; (B,1,1,A) (or scalar float): Atom/Sequence Padding Masks.
        pair_bias: pair-wise bias as T5 or AF2 module; Check: (B,h,A,A').
        """
        ### Convert float types:
        linear_q_weight = P.Cast()(self.linear_q_weights, msfp)
        linear_k_weight = P.Cast()(self.linear_k_weights, msfp)
        linear_v_weight = P.Cast()(self.linear_v_weights, msfp)
        linear_output_weight = P.Cast()(self.linear_output_weights, msfp)
        o_bias = P.Cast()(self.o_biases, msfp)
        
        linear_gating_weight = 0 ### For GraphMode Compatiblility
        gating_bias = 0
        if self.gating:
            linear_gating_weight = P.Cast()(self.linear_gating_weights, msfp)
            gating_bias = P.Cast()(self.gating_biases, msfp)

        _b, _q, _a = q_data.shape
        _, _k, _C = m_data.shape
        _h = self.num_head

        # (B*A,C):
        q_data = P.Reshape()(q_data, (-1, _a))
        # (B*A',C):
        m_data = P.Reshape()(m_data, (-1, _C))

        q = self.matmul(q_data, linear_q_weight) * self.key_dim ** (-0.5)
        k = self.matmul(m_data, linear_k_weight)
        v = self.matmul(m_data, linear_v_weight)

        # (B,A,h,c):
        q = P.Reshape()(q, (_b, _q, _h, -1))
        # (B,A',h,c):
        k = P.Reshape()(k, (_b, _k, _h, -1))
        v = P.Reshape()(v, (_b, _k, _h, -1))

        # (B,h,A,c)->(B*h,A,c):
        tmp_q = P.Reshape()(P.Transpose()(q, (0, 2, 1, 3)), (_b * _h, _q, -1))
        # (B,h,A',c)->(B*h,A',c):
        tmp_k = P.Reshape()(P.Transpose()(k, (0, 2, 1, 3)), (_b * _h, _k, -1))
        
        ### Since we use large bias as mask, fp32 is needed hereby:
        bias = P.Cast()(bias, mstype.float32)
        # (B,h,A,A'):
        logits = P.Add()(P.Cast()(P.Reshape()(self.batch_matmul_trans_b(tmp_q, tmp_k), (_b, _h, _q, _k)), mstype.float32), bias)

        if pair_bias is not None:
            # (B,h,A,A'):
            bias_ = P.Cast()(pair_bias, mstype.float32)
            logits = P.Add()(logits, bias_)
        # (B,h,A,A'):
        probs = self.softmax(logits)
        probs = P.Cast()(probs, msfp)
        
        # (B,h,c,A')->(B*h,c,A'):
        tmp_v = P.Reshape()(P.Transpose()(v, (0, 2, 3, 1)), (_b * _h, -1, _k))
        # (B*h,A,A'):
        tmp_probs = P.Reshape()(probs, (_b * _h, _q, _k))
        
        # (B*h,A,c) -> (B,A,h,c):
        weighted_avg = P.Transpose()(P.Reshape()(self.batch_matmul_trans_b(tmp_probs, tmp_v), (_b, _h, _q, -1)), (0, 2, 1, 3))

        if self.gating:
            # (1,1,h,c):
            gating_bias = P.ExpandDims()(P.ExpandDims()(self.gating_biases, 0), 0)
            # (B,A,h,c):
            gate_values = P.Add()(P.Reshape()(self.matmul(q_data, linear_gating_weight), (_b, _q, _h, -1)), gating_bias)
            ### In case of underflow of Sigmoid:
            gate_values = P.Cast()(gate_values, mstype.float32)
            gate_values = self.sigmoid(gate_values)
            gate_values = P.Cast()(gate_values, msfp)
            # (B,A,h,c):
            weighted_avg = weighted_avg * gate_values
        
        # (B*A,C):
        weighted_avg = P.Reshape()(weighted_avg, (_b * _q, -1))
        # (B,A,C):
        output = P.Add()(P.Reshape()(self.matmul(weighted_avg, linear_output_weight), (_b, _q, -1)), P.ExpandDims()(o_bias, 0))
        return output


class HyperAttention(nn.Cell):
    r"""Adapted from MSARowAttentionWithPairBias.
        Similar to HyperNets:
        att_scores = Qi@W(i,j)@Kj.T + b(i,j), where W(i,j) and b(i,j) can also be generated by neural networks.
    """
    def __init__(self, config, atom_act_dim, pair_act_dim):
        super(HyperAttention, self).__init__()
        self.config = config ### Pass config.model.NIU.hyperformer here.

        self.flag_hyper_att = self.config.use_hyper_attention
        self.num_head = self.config.num_head

        self.norm = P.LayerNorm(begin_norm_axis=-1, begin_params_axis=-1, epsilon=1e-5)
        
        self.matmul = P.MatMul(transpose_b=True)
        self.attn_mod = Attention(self.config, atom_act_dim, atom_act_dim, atom_act_dim)
        self.atom_act_dim = atom_act_dim
        self.pair_act_dim = pair_act_dim

        self.one = Tensor(1, msfp)
        self.zero = 1. - self.one

        self._init_parameter()

    def _init_parameter(self):
        self.query_norm_gammas = Parameter(Tensor(np.ones([self.atom_act_dim,]), mstype.float32))
        self.query_norm_betas = Parameter(Tensor(np.zeros([self.atom_act_dim,]), mstype.float32))
        self.feat_2d_norm_gammas = Parameter(Tensor(np.ones([self.pair_act_dim,]), mstype.float32))
        self.feat_2d_norm_betas = Parameter(Tensor(np.zeros([self.pair_act_dim,]), mstype.float32))
        self.feat_2d_weights = Parameter(Tensor(np.random.normal(scale=1/np.sqrt(self.pair_act_dim), size=[self.num_head, self.pair_act_dim]), mstype.float32))
        if self.flag_hyper_att:
            self.pair_mat_weights = Parameter(Tensor(np.random.normal(scale=1/np.sqrt(self.pair_act_dim), size=[self.atom_act_dim, self.pair_act_dim]), mstype.float32))
            self.pair_mat_biases = Parameter(Tensor(np.zeros([self.atom_act_dim,]), mstype.float32))
            self.mat_norm_gammas = Parameter(Tensor(np.ones([self.atom_act_dim,]), mstype.float32))
            self.mat_norm_betas = Parameter(Tensor(np.zeros([self.atom_act_dim,]), mstype.float32))

    def construct(self, atom_act, pair_act, atom_mask, pair_mask):
        r"""Basic Attention operation.
        atom_act: (B,A,Cm).
        pair_act: (B,A,A,Cz).
        atom_mask: (B,A).
        pair_mask: (B,A,A).
        """
        query_norm_gamma = self.query_norm_gammas
        query_norm_beta = self.query_norm_betas
        feat_2d_norm_gamma = self.feat_2d_norm_gammas
        feat_2d_norm_beta = self.feat_2d_norm_betas
        feat_2d_weight = P.Cast()(self.feat_2d_weights, msfp)
        

        # (B,A,A',Cz):
        b, q, k, _ = pair_act.shape
        
        ### We absorb MASK into the Bias term:
        # Large number is used @ FP32:
        # (B,A):
        atom_mask = P.Cast()(atom_mask, mstype.float32)  
        bias = 1e9 * (atom_mask - 1.0)
        # (B,1,1,A):
        bias = P.ExpandDims()(P.ExpandDims()(bias, 1), 2)

        # (B,A,Cm):
        atom_act = P.Cast()(atom_act, mstype.float32)
        atom_act, _, _ = self.norm(atom_act, query_norm_gamma, query_norm_beta)
        atom_act = P.Cast()(atom_act, msfp)
        # msa_act = masked_layer_norm(self.norm, msa_act, query_norm_gamma, query_norm_beta, mask=msa_mask)

        # (B,A,A',Cz):
        pair_act = P.Cast()(pair_act, mstype.float32)
        pair_act, _, _ = self.norm(pair_act, feat_2d_norm_gamma, feat_2d_norm_beta) 
        pair_act = P.Cast()(pair_act, msfp)
        # pair_act = masked_layer_norm(self.norm, pair_act, feat_2d_norm_gamma, feat_2d_norm_beta, mask=pair_mask) 

        atom_act = P.Cast()(atom_act, msfp)
        pair_act = P.Cast()(pair_act, msfp)

        # (B*A*A',Cz):
        pair_act = P.Reshape()(pair_act, (-1, pair_act.shape[-1]))
        # (B*A*A',C)@(h,C).T -> (B*A*A',h) -> (B,A,A',h) -> (B,h,A,A')：
        pair_bias = P.Transpose()(P.Reshape()(self.matmul(pair_act, feat_2d_weight), (b,q,k,self.num_head)), (0,3,1,2))

        ###
        query_act = atom_act
        key_act = atom_act
        if self.flag_hyper_att:
            pair_mat_weight = P.Cast()(self.pair_mat_weights, msfp)
            pair_mat_bias = P.Cast()(self.pair_mat_biases, msfp)
            mat_norm_gamma = self.mat_norm_gammas
            mat_norm_beta = self.mat_norm_betas

            # (B*A*A',Cz)->(B*A*A',Cm):
            pair_matrix = self.matmul(pair_act, pair_mat_weight)
            pair_matrix = P.BiasAdd()(pair_matrix, pair_mat_bias)
            # (B*A*A',Cm):
            pair_matrix = P.Cast()(pair_matrix, mstype.float32)
            pair_matrix, _, _ = self.norm(pair_matrix, mat_norm_gamma, mat_norm_beta)
            # (B,A,A',Cm):
            pair_matrix = P.Reshape()(pair_matrix, (b,q,k,-1))

            pair_mask = P.ExpandDims()(pair_mask,-1)
            # (B,A,Cm):
            query_act += P.ReduceSum()(pair_matrix*pair_mask,2)/P.ReduceSum()(pair_mask,2)
            # (B,A',Cm):
            key_act += P.ReduceSum()(pair_matrix*pair_mask,1)/P.ReduceSum()(pair_mask,1)

        atom_act = self.attn_mod(query_act, key_act, bias, pair_bias=pair_bias)

        return atom_act


class Transition(nn.Cell):
    def __init__(self, 
                 layer_norm_dim, 
                 act_fn: str = 'relu', 
                 num_intermediate_factor: int = 4):
        super(Transition, self).__init__()

        # self.input_layer_norm = P.LayerNorm(begin_norm_axis=-1, begin_params_axis=-1, epsilon=1e-5)
        self.layer_norm = B.LayerNorm(epsilon=1e-5, dim_feature=layer_norm_dim,)
        self.matmul = P.MatMul(transpose_b = True)
        self.layer_norm_dim = layer_norm_dim

        self.num_intermediate_factor = num_intermediate_factor
        self.num_intermediate = int(layer_norm_dim * self.num_intermediate_factor)

        if act_fn == 'gelu':
            self.act_fn = nn.GELU()
        else:
            self.act_fn = nn.ReLU()

        self._init_parameter()

    def _init_parameter(self):
        # self.input_layer_norm_gammas = Parameter(Tensor(np.ones((self.layer_norm_dim)), mstype.float32))
        # self.input_layer_norm_betas = Parameter(Tensor(np.zeros((self.layer_norm_dim)), mstype.float32))
        self.transition1_weights = Parameter(initializer(lecun_init(self.layer_norm_dim, initializer_name='relu'), [self.num_intermediate, self.layer_norm_dim]))
        self.transition1_biases = Parameter(Tensor(np.zeros((self.num_intermediate)), mstype.float32))
        self.transition2_weights = Parameter(Tensor(np.zeros((self.layer_norm_dim, self.num_intermediate)), mstype.float32))
        self.transition2_biases = Parameter(Tensor(np.zeros((self.layer_norm_dim)), mstype.float32))

    def construct(self, act, mask):
        # input_layer_norm_gamma = P.Cast()(self.input_layer_norm_gammas, mstype.float32)
        # input_layer_norm_beta = P.Cast()(self.input_layer_norm_betas, mstype.float32)
        
        transition1_weight = P.Cast()(self.transition1_weights, msfp)
        transition1_bias = P.Cast()(self.transition1_biases, msfp)
        transition2_weight = P.Cast()(self.transition2_weights, msfp)
        transition2_bias = P.Cast()(self.transition2_biases, msfp)

        # (Nseq,Nres,C):
        act = P.Cast()(act, mstype.float32)
        # act, _, _ = self.input_layer_norm(act, input_layer_norm_gamma, input_layer_norm_beta)
        act = self.layer_norm(act)
        act = P.Cast()(act, msfp)
        
        act_shape = P.Shape()(act)
        if len(act_shape) != 2:
            act = P.Reshape()(act, (-1, act_shape[-1]))
        act = self.act_fn(P.BiasAdd()(self.matmul(act, transition1_weight), transition1_bias))
        act = P.BiasAdd()(self.matmul(act, transition2_weight), transition2_bias)
        act = P.Reshape()(act, act_shape)
        return act


class OuterProduct(nn.Cell):
    def __init__(self, act_dim, num_output_channel, dim_outer_product: int = 32):
        super(OuterProduct, self).__init__()

        self.num_output_channel = num_output_channel
        # self.layer_norm_input = P.LayerNorm(begin_norm_axis=-1, begin_params_axis=-1, epsilon=1e-5)
        self.layer_norm = B.LayerNorm(dim_feature=act_dim)
        self.matmul_trans_b = P.MatMul(transpose_b=True)
        self.matmul = P.MatMul()
        self.batch_matmul= P.BatchMatMul()
        self.batch_matmul_trans_b = P.BatchMatMul(transpose_b=True)
        self.act_dim = act_dim
        self.dim_outer_product = dim_outer_product
        
        self._init_parameter()

    def _init_parameter(self):
        # self.layer_norm_input_gammas = Parameter(Tensor(np.ones((self.act_dim)), mstype.float32))
        # self.layer_norm_input_betas = Parameter(Tensor(np.zeros((self.act_dim)), mstype.float32))
        self.left_projection_weights = Parameter(initializer(lecun_init(self.act_dim), [self.dim_outer_product, self.act_dim]))
        self.left_projection_biases = Parameter(Tensor(np.zeros((self.dim_outer_product)), mstype.float32))
        self.right_projection_weights = Parameter(initializer(lecun_init(self.act_dim), [self.dim_outer_product, self.act_dim]))
        self.right_projection_biases = Parameter(Tensor(np.zeros((self.dim_outer_product)), mstype.float32))
        self.linear_output_weights = Parameter(Tensor(np.zeros((self.num_output_channel, self.dim_outer_product * self.dim_outer_product)), mstype.float32))
        self.o_biases = Parameter(Tensor(np.zeros((self.num_output_channel)), mstype.float32))

    def construct(self, act, atom_mask, mask_norm):
        ### act:(B,A,Cm); atom_mask:(B,A); mask_norm=(B,A,A,1);
        
        # (B,A):
        mask = P.Cast()(atom_mask, msfp)

        # layer_norm_input_gamma = P.Cast()(self.layer_norm_input_gammas, mstype.float32)
        # layer_norm_input_beta = P.Cast()(self.layer_norm_input_betas, mstype.float32)
        left_projection_weight = P.Cast()(self.left_projection_weights, msfp)
        left_projection_bias = P.Cast()(self.left_projection_biases, msfp)
        right_projection_weight = P.Cast()(self.right_projection_weights, msfp)
        right_projection_bias = P.Cast()(self.right_projection_biases, msfp)
        linear_output_weight = P.Cast()(self.linear_output_weights, msfp)
        linear_output_bias = P.Cast()(self.o_biases, msfp)
        
        # (B,A,C):
        act = P.Cast()(act, mstype.float32)
        # act, _, _ = self.layer_norm_input(act, layer_norm_input_gamma, layer_norm_input_beta)
        act = self.layer_norm(act)
        act = P.Cast()(act, msfp)

        # (B,A,1):
        mask = P.ExpandDims()(mask, -1)
        
        # (B,A,C):
        act_shape = P.Shape()(act)
        if len(act_shape) != 2:
            # (B*A,C):
            act = P.Reshape()(act, (-1, act_shape[-1]))
        # (B,A,-1):
        out_shape = act_shape[:-1] + (-1,)

        # (B,A,C'==32):
        left_act = mask * P.Reshape()(P.BiasAdd()(self.matmul_trans_b(act, left_projection_weight),
                                                  left_projection_bias),
                                      out_shape)
        # (B,A,C'):
        right_act = mask * P.Reshape()(P.BiasAdd()(self.matmul_trans_b(act,
                                                                       right_projection_weight),
                                                   right_projection_bias),
                                       out_shape)
        B, b, c = left_act.shape
        _, d, e = right_act.shape
        a = 1

        # (B,1,A,C'==32):
        left_act = P.ExpandDims()(left_act,1)
        # (B,1,A,C'==32):
        right_act = P.ExpandDims()(right_act,1)

        # ->(B,C'1,A1,1) -> (B,C'1*A1,1):
        left_act = P.Reshape()(P.Transpose()(left_act, (0, 3, 2, 1)), (B, -1, a))
        # (B,1,A2*C'2):
        right_act = P.Reshape()(right_act, (B, a, -1))

        # (B,C'1*A1,1)@(B,1,A2*C'2)->(B,C'1*A1,A2*C'2)
        # ->(C'1*A1,A2*C'2)@@->(B,C'1,A1,A2,C'2)->(B,A1,A2,C'1,C'2)->(B,A1,A2,C'1*C'2):
        act = P.Reshape()(P.Transpose()(P.Reshape()(self.batch_matmul(left_act, right_act),
                                                    (B, c, b, d, e)), (0, 2, 3, 1, 4)), (B, b, d, c * e))                                            
        
        # (B,A1,A2,C'1*C'2):
        act_shape = P.Shape()(act)
        if len(act_shape) != 2:
            # (B*A1*A2,C'1*C'2):
            act = P.Reshape()(act, (-1, act_shape[-1]))
        # (B,A1,A2,C):
        act = P.Reshape()(P.BiasAdd()(self.matmul_trans_b(act, linear_output_weight),
                                      linear_output_bias), (B, b, d, -1))
        
        epsilon = 1e-3
        # (B,A1,A2,Cz):
        # act = P.RealDiv()(act, epsilon + mask_norm)

        return act


class HyperformerAtomBlock(nn.Cell):
    def __init__(self, config, atom_act_dim, pair_act_dim):
        super(HyperformerAtomBlock, self).__init__()
        self.config = config ### Pass config.model.NIU here
        
        self.hyper_attention = HyperAttention(
            self.config.hyperformer, atom_act_dim, pair_act_dim)
        self.atom_transition = Transition(self.config.atom_transition, atom_act_dim)
    
    def construct(self, atom_act, pair_act, atom_mask, pair_mask):
        atom_act = P.Cast()(atom_act, msfp)
        pair_act = P.Cast()(pair_act, msfp)

        ### 1. Update MSA Rep.:
        atom_act = P.Add()(atom_act, self.hyper_attention(atom_act, pair_act, atom_mask, pair_mask))
        atom_act = P.Add()(atom_act, self.atom_transition(atom_act, atom_mask))
        
        return atom_act

class HyperformerPairBlock(nn.Cell):
    def __init__(self, atom_act_dim, pair_act_dim, 
                 dim_outer_product: int = 32, num_intermediate_factor: int = 4,):
        
        super(HyperformerPairBlock, self).__init__()

        self.outer_product = OuterProduct(atom_act_dim, pair_act_dim, dim_outer_product)
        self.pair_transition = Transition(pair_act_dim, num_intermediate_factor=num_intermediate_factor)

    def construct(self, atom_act, pair_act, atom_mask, pair_mask, mask_norm):
        atom_act = P.Cast()(atom_act, msfp)
        pair_act = P.Cast()(pair_act, msfp)

        pair_act = P.Add()(pair_act, self.outer_product(atom_act, atom_mask, mask_norm))
        pair_act = P.Add()(pair_act, self.pair_transition(pair_act, pair_mask))

        return pair_act

class NeuralInteractionUnit(nn.Cell):
    def __init__(self, config,):
        super(NeuralInteractionUnit, self).__init__()
        self.atom_act_dim = config.atom_act_dim
        self.pair_act_dim = config.pair_act_dim

        self.config = config.NIU ### Pass config.model here
        self.cycles = self.config.cycles

        self.niu_atom_block = HyperformerAtomBlock(self.config, self.atom_act_dim, self.pair_act_dim)
        if global_config.recompute:
            self.niu_atom_block.recompute()
        
        self.niu_pair_block = HyperformerPairBlock(self.config, self.atom_act_dim, self.pair_act_dim)
        if global_config.recompute:
            self.niu_pair_block.recompute()
    
    def construct(self, atom_act, pair_act, atom_mask, pair_mask, mask_norm):
        atom_act_ = P.Cast()(atom_act, msfp)
        pair_act_ = P.Cast()(pair_act, msfp)

        ### 1. Update Pair Rep.:
        pair_act = self.niu_pair_block(atom_act_, pair_act_, atom_mask, pair_mask, mask_norm)

        ### 2. Update MSA Rep.:
        atom_act = F.depend(atom_act_, pair_act) ### Is this right?
        for _i_cycle in range(self.cycles):
            atom_act = self.niu_atom_block(atom_act, pair_act, atom_mask, pair_mask)

        ### 3. Avoid Recompute Bugs:
        atom_act = P.Add()(atom_act, P.ZerosLike()(atom_act))
        pair_act = P.Add()(pair_act, P.ZerosLike()(pair_act))
            
        return atom_act, pair_act
