# Copyright 2021-2023 @ Shenzhen Bay Laboratory &
#                       Peking University &
#                       Huawei Technologies Co., Ltd
#
# This code is a part of Cybertron package.
#
# The Cybertron is open-source software based on the AI-framework:
# PyTorch (https://pytorch.org/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
Main program of Cybertron
"""
import os
from inspect import signature
from typing import Union, List, Tuple, Optional, cast
import numpy as np
from numpy import ndarray

import torch
from torch import nn, Tensor
from torch.nn import ModuleList

from .units import Units, GLOBAL_UNITS
from .utils import get_integer, get_tensor, GetVector, write_yaml, GLOBAL_DEVICE, get_arguments
from .embedding import GraphEmbedding, get_embedding
from .readout import Readout, get_readout
from .model import MolecularGNN, get_molecular_model
from .normalize import ScaleShift
from torchsponge.potential import ForceCell

class Cybertron(nn.Module):
    """
    Main class for Cybertron model.

    Args:
        model : Union[MolecularGNN, dict, str]
            Molecular graph neural network model or its configuration.
        embedding : Union[GraphEmbedding, dict, str], optional
            Graph embedding layer or its configuration.
        readout : Union[Readout, dict, str, List[Readout]], optional
            Readout function or a list of readout functions. Default: 'atomwise'.
        num_atoms : int, optional
            Maximum number of atoms in the system.
        atom_type : Union[Tensor, ndarray, List[int]], optional
            Atom type indices.
        bond_types : Union[Tensor, ndarray, List[int]], optional
            Bond type indices.
        pbc_box : Union[Tensor, ndarray, List[float]], optional
            Periodic boundary condition box.
        use_pbc : bool, optional
            Whether to use periodic boundary conditions.
        scale : Union[float, Tensor, List[Union[float, Tensor]]], optional
            Scaling factor for output. Default: 1.
        shift : Union[float, Tensor, List[Union[float, Tensor]]], optional
            Shift value for output. Default: 0.
        type_ref : Union[Tensor, ndarray, List[Union[Tensor, ndarray]]], optional
            Reference values for each atom type.
        length_unit : Union[str, Units], optional
            Length unit for input coordinates.
        energy_unit : Union[str, Units], optional
            Energy unit for output predictions.
    """
    def __init__(self,
                 embedding: Optional[Union[GraphEmbedding, dict, str]],
                 model: Union[MolecularGNN, dict, str],
                 readout: Union[Readout, dict, str, List[Readout]] = 'atomwise',
                 num_atoms: Optional[int] = None,
                 atom_type: Optional[Union[Tensor, ndarray, List[int]]] = None,
                 bond_types: Optional[Union[Tensor, ndarray, List[int]]] = None,
                 pbc_box: Optional[Union[Tensor, ndarray, List[float]]] = None,
                 use_pbc: Optional[bool] = None,
                 scale: Union[float, Tensor, List[Union[float, Tensor]]] = 1,
                 shift: Union[float, Tensor, List[Union[float, Tensor]]] = 0,
                 type_ref: Optional[Union[Tensor, ndarray, List[Union[Tensor, ndarray]]]] = None,
                 length_unit: Optional[Union[str, Units]] = None,
                 energy_unit: Optional[Union[str, Units]] = None,
                 **kwargs
                 ):
        super().__init__()
        self._kwargs = get_arguments(locals(), kwargs)

        self.device = GLOBAL_DEVICE()
        if length_unit is None:
            length_unit = GLOBAL_UNITS.length_unit
        self._units = Units(length_unit, energy_unit)

        if atom_type is None:
            self.atom_type = None
            self.atom_mask = None
            if num_atoms is None:
                raise ValueError('"num_atoms" must be assigned when "atom_type" is None')
            self.num_atoms = num_atoms
        else:
            # (1,A)
            self.atom_type = get_tensor(atom_type, dtype=torch.int32, device=self.device).reshape(1, -1)
            self.atom_mask = self.atom_type > 0
            natoms = self.atom_type.shape[-1]
            if self.atom_mask.all():
                self.num_atoms = natoms
            else:
                self.num_atoms = atom_type > 0
                self.num_atoms = torch.sum(self.num_atoms+0, dim=-1, keepdim=True)

        self.bonds = None
        self.bond_mask = None
        if bond_types is not None:
            self.bonds = get_tensor(bond_types, dtype=torch.int32, device=self.device).reshape(1, natoms, -1)
            self.bond_mask = bond_types > 0

        model = get_molecular_model(model)
        dim_node_emb = model.dim_node_emb
        dim_edge_emb = model.dim_edge_emb
        self.activation = model.activation

        if embedding is None:
            embedding = model.default_embedding

        self.embedding = get_embedding(embedding,
                                       dim_node=dim_node_emb,
                                       dim_edge=dim_edge_emb,
                                       activation=self.activation,
                                       length_unit=length_unit
                                       )

        self.dim_node_emb = self.embedding.dim_node
        self.dim_edge_emb = self.embedding.dim_edge

        model.set_dimension(self.dim_node_emb, self.dim_edge_emb)
        self.model = model

        self.dim_node_rep = self.model.dim_node_rep
        self.dim_edge_rep = self.model.dim_edge_rep

        self.calc_distance = self.embedding.emb_dis

        self.neighbours = None
        self.neighbour_mask = None
        self.get_neigh_list = None
        self.pbc_box = None
        self.use_pbc = use_pbc
        self.cutoff = None
        self.large_dis = 5e4

        if self.calc_distance:
            self.cutoff = self.embedding.cutoff
            if pbc_box is not None:
                # (1,D)
                self.pbc_box = get_tensor(pbc_box, dtype=torch.float32, device=self.device).reshape(1, -1)
                self.use_pbc = True

            self.get_vector = GetVector(self.use_pbc)
            self.large_dis = self.cutoff * 10

        self.activation = self.model.activation

        self.num_readouts = 0
        self.num_outputs = 2
        self.output_ndim = (2, 3)
        # [(A, F), (A, N, F)]
        self.output_shape = ((self.num_atoms, self.dim_node_rep),
                             (self.num_atoms, self.num_atoms, self.dim_edge_rep))

        self.readout: List[Readout] = None
        self.scaleshift: List[ScaleShift] = None
        if readout is not None:
            if isinstance(readout, (Readout, str, dict)):
                self.num_readouts = 1
                self.num_outputs = 1
                readout = [readout]
            if isinstance(readout, (list, tuple)):
                self.num_readouts = len(readout)
                self.num_outputs = len(readout)
                readout = [get_readout(cls_name=r,
                                       dim_node_rep=self.dim_node_rep,
                                       dim_edge_rep=self.dim_edge_rep,
                                       activation=self.activation,
                                       ) for r in readout]
            else:
                readout = None
                raise TypeError(f'Unsupported `readout` type: {type(readout)}')

            self.output_ndim = []
            self.output_shape = []
            for i in range(self.num_outputs):
                readout[i].set_dimension(self.dim_node_rep, self.dim_edge_rep)
                self.output_ndim.append(readout[i].ndim)
                self.output_shape.append(readout[i].shape)

            self.readout = nn.ModuleList(readout)

            self.set_scaleshift(scale, shift, type_ref)

        self.input_unit_scale = self.embedding.convert_length_from(self._units)
        self.use_scaleshift = True

    @property
    def units(self) -> Units:
        return self._units

    @units.setter
    def units(self, units_: Units):
        self._units = units_
        self.input_unit_scale = self.embedding.convert_length_from(self._units)
        if self.readout is not None:
            self.output_unit_scale = \
                (Tensor(self.scaleshift[i].convert_energy_to(self._units), torch.float32)
                 for i in range(self.num_readouts))

    @property
    def length_unit(self) -> str:
        return self._units.length_unit

    @length_unit.setter
    def length_unit(self, length_unit_: Union[str, Units]):
        self.set_length_unit(length_unit_)

    @property
    def energy_unit(self) -> str:
        return self._units.energy_unit

    @energy_unit.setter
    def energy_unit(self, energy_unit_: Union[str, Units]):
        self.set_energy_unit(energy_unit_)

    @property
    def model_name(self) -> str:
        return self.model._get_name()
    
    def train(self, mode: bool = True):
        super().train(mode)
        self.use_scaleshift = not mode
        return self

    def set_scaleshift(self,
                       scale: Union[float, Tensor, List[Union[float, Tensor]]] = 1,
                       shift: Union[float, Tensor, List[Union[float, Tensor]]] = 0,
                       type_ref: Union[Tensor, ndarray, List[Union[Tensor, ndarray]]] = None,
                       ):
        """set scale, shift and type_ref"""
        if self.readout is None:
            return self

        def _check_data(value, name: str):
            if not isinstance(value, (list, tuple)):
                value = [value]
            if len(value) == self.num_readouts:
                return value
            if len(value) == 1:
                print(f'Warning: The number of {name} {len(value)} is equal to 1, '
                      f'which will be broadcasted to {self.num_readouts}')
                return value * self.num_readouts
            raise ValueError(f'The number of {name} {len(value)} must be equal to '
                             f'the number of readout functions {self.num_readouts}')

        def _check_scaleshift(value, shape: Tuple[int], name: str) -> Tensor:
            if value is None:
                return None
            if not isinstance(value, (float, int, Tensor, nn.Parameter, ndarray)):
                raise TypeError(f'The type of {name} must be float, Tensor or ndarray, '
                                f'but got: {type(value)}')

            value = get_tensor(value, dtype=torch.float32, device=self.device)
            if value.ndim == 0:
                value = torch.reshape(value, (-1,))
            if value.shape == shape:
                return value

            if value.size() == 1:
                # (1, ..., 1) <- (1)
                return torch.reshape(value, (1,) * len(shape) + (-1,))

            raise ValueError(f'The shape of {name} ({value.shape}) does not match '
                             f'the shape of readout function: {shape}')

        def _check_type_ref(ref, shape: Tuple[int]) -> Tensor:
            if ref is None:
                return None
            if not isinstance(ref, (Tensor, nn.Parameter, ndarray)):
                raise TypeError(f'The type of type_ref must be Tensor, Parameter or ndarray, '
                                f'but got: {type(ref)}')

            ref = get_tensor(ref, dtype=torch.float32, device=self.device)
            if ref.ndim < 2:
                raise ValueError(f'The rank (ndim) of type_ref should be at least 2, '
                                 f'but got : {ref.ndim}')
            if ref.shape[1:] != shape:
                raise ValueError(f'The shape of type_ref {ref.shape} does not match '
                                 f'the shape of readout function: {shape}')
            return ref

        scale = _check_data(scale, 'scale')
        shift = _check_data(shift, 'shift')

        scale = [_check_scaleshift(scale[i], self.readout[i].shape, 'scale')
                 for i in range(self.num_readouts)]
        shift = [_check_scaleshift(shift[i], self.readout[i].shape, 'shift')
                 for i in range(self.num_readouts)]

        if not isinstance(type_ref, (list, tuple)):
            type_ref = [type_ref]
        if len(type_ref) != self.num_readouts:
            if len(type_ref) == 1:
                type_ref *= self.num_readouts
            else:
                raise ValueError(f'The number of type_ref {len(type_ref)} must be equal to '
                                 f'the number of readout functions {self.num_readouts}')

        type_ref = [_check_type_ref(type_ref[i], self.readout[i].shape)
                    for i in range(self.num_readouts)]

        if self.scaleshift is None:
            self.scaleshift = nn.ModuleList([
                ScaleShift(scale=scale[i],
                           shift=shift[i],
                           type_ref=type_ref[i],
                           shift_by_atoms=self.readout[i].shift_by_atoms,
                           )
                for i in range(self.num_readouts)
            ])
        else:
            for i in range(self.num_readouts):
                self.scaleshift[i].set_scaleshift(scale=scale[i], shift=shift[i], type_ref=type_ref[i])
        return self

    def readout_ndim(self, readout_idx: int) -> int:
        """returns the rank (ndim) of a specific readout function"""
        if self.readout is None:
            return None
        self._check_readout_index(readout_idx)
        return self.readout[readout_idx].ndim

    def readout_shape(self, readout_idx: int) -> Tuple[int]:
        """returns the shape of a specific readout function"""
        if self.readout is None:
            return None
        self._check_readout_index(readout_idx)
        return self.readout[readout_idx].shape

    def scale(self, readout_idx: int = None) -> Union[Tensor, List[Tensor]]:
        """returns the scale"""
        if self.readout is None:
            return [self.scaleshift[i].scale for i in range(self.num_readouts)]
        self._check_readout_index(readout_idx)
        return self.scaleshift[readout_idx].scale

    def shift(self, readout_idx: int = None) -> Union[Tensor, List[Tensor]]:
        """returns the shift"""
        if self.readout is None:
            return [self.scaleshift[i].shift for i in range(self.num_readouts)]
        self._check_readout_index(readout_idx)
        return self.scaleshift[readout_idx].shift

    def type_ref(self, readout_idx: int = None) -> Union[Tensor, List[Tensor]]:
        """returns the type_ref"""
        if self.readout is None:
            return [self.scaleshift[i].type_ref for i in range(self.num_readouts)]
        self._check_readout_index(readout_idx)
        return self.scaleshift[readout_idx].type_ref

    def set_units(self, length_unit: str = None, energy_unit: str = None):
        """set units"""
        if length_unit is not None:
            self.set_length_unit(length_unit)
        if energy_unit is not None:
            self.set_energy_unit(energy_unit)
        return self

    def set_length_unit(self, length_unit: str):
        """set length unit"""
        self._units = self._units.set_length_unit(length_unit)
        self.input_unit_scale = self.embedding.convert_length_from(self._units)
        return self

    def set_energy_unit(self, energy_units: str):
        """set energy unit"""
        self._units.set_energy_unit(energy_units)
        if self.readout is not None:
            self.output_unit_scale = (
                get_tensor(self.scaleshift[i].convert_energy_to(self._units), dtype=torch.float32, device=self.device)
                for i in range(self.num_readouts))
        return self

    def save_configure(self, filename: str, directory: str = None):
        """save configure to file"""
        write_yaml(self._kwargs, filename, directory)
        return self

    def print_info(self,
                   num_retraction: int = 3,
                   num_gap: int = 3,
                   char: str = ' '
                   ) -> None:
        """print the information of Cybertron"""
        ret = char * num_retraction
        gap = char * num_gap
        print("================================================================================")
        print("Cybertron Engine, Ride-on!")
        print('-'*80)
        if self.atom_type is None:
            print(f'{ret} Using variable atom types with maximum number of atoms: {self.num_atoms}')
        else:
            print(f'{ret} Using fixed atom type index:')
            for i, atom in enumerate(self.atom_type[0]):
                print(ret+gap+' Atom {: <7}'.format(str(i))+f': {atom.cpu().numpy()}')
        if self.bonds is not None:
            print(ret+' Using fixed bond connection:')
            for b in self.bonds[0]:
                print(ret+gap+' '+str(b.cpu().numpy()))
            print(ret+' Fixed bond mask:')
            for m in self.bond_mask[0]:
                print(ret+gap+' '+str(m.cpu().numpy()))
        print('-'*80)
        self.embedding._print_info(num_retraction=num_retraction,
                                  num_gap=num_gap, char=char)
        self.model._print_info(num_retraction=num_retraction,
                              num_gap=num_gap, char=char)

        print(ret+" With "+str(self.num_readouts)+" readout networks: ")
        print('-'*80)
        for i in range(self.num_readouts):
            print(ret+" "+str(i)+(". "+self.readout[i]._get_name()))
            self.readout[i]._print_info(
                num_retraction=num_retraction, num_gap=num_gap, char=char)
            self.scaleshift[i]._print_info(
                num_retraction=num_retraction, num_gap=num_gap, char=char)

        print(f'{ret} Input unit: {self._units.length_unit_name}')
        print(f'{ret} Output unit: {self._units.energy_unit_name}')
        print(f'{ret} Input unit scale: {self.input_unit_scale}')
        print("================================================================================")
        print(flush=True)

    def forward(self,
                coordinate: Tensor = None,
                atom_type: Optional[Tensor] = None,
                pbc_box: Optional[Tensor] = None,
                bonds: Optional[Tensor] = None,
                bond_mask: Optional[Tensor] = None,
                ) -> Union[Tensor, Tuple[Tensor]]:
        """Compute the properties of the molecules.

        Args:
            coordinate (Tensor): Tensor of shape (B, A, D). Data type is float.
                Cartesian coordinates for each atom.
            atom_type (Tensor): Tensor of shape (B, A). Data type is int.
                Type index (atomic number) of atom types.
            pbc_box (Tensor): Tensor of shape (B, D). Data type is float.
                Box size of periodic boundary condition
            bonds (Tensor): Tensor of shape (B, A, A). Data type is int.
                Types index of bond connected with two atoms
            bond_mask (Tensor): Tensor of shape (B, A, A). Data type is bool.
                Mask for bonds

        Returns:
            outputs (Tensor):    Tensor of shape (B, A, O). Data type is float.

        """
        if self.atom_type is None:
            # (B, A)
            atom_mask = atom_type > 0
        else:
            # (1, A)
            atom_type = self.atom_type
            atom_mask = self.atom_mask
        
        num_atoms = atom_mask.shape[-1]

        if self.pbc_box is not None:
            pbc_box = self.pbc_box

        if self.calc_distance:

            coordinate = coordinate * self.input_unit_scale
            # (B, A, A, D) = (B, 1, A, D) - (B, A, 1, D)
            vectors = self.get_vector(coordinate.unsqueeze(-2), coordinate.unsqueeze(-3), pbc_box)

            # (B, A, A) = (B, A, 1) & (B, 1, A)
            dis_mask = torch.logical_and(atom_mask.unsqueeze(-1), atom_mask.unsqueeze(-2))
            # (A, A)
            diagonal = torch.logical_not(torch.eye(num_atoms, num_atoms, dtype=torch.bool, device=self.device))
            # (B, A, A) & (A, A)
            dis_mask = torch.logical_and(dis_mask, diagonal)

            # Add a non-zero value to the neighbour_vector whose mask value is False
            # to prevent them from becoming zero values after Norm operation,
            # which could lead to auto-differentiation errors
            # (B, A, A)
            large_dis = torch.full(dis_mask.shape, self.large_dis, device=self.device)
            large_dis = torch.where(dis_mask, torch.zeros_like(large_dis), large_dis)
            # (B, A, A, D) = (B, A, A, D) + (B, A, A, 1)
            vectors = vectors + large_dis.unsqueeze(-1)
            distance = torch.norm(vectors, dim=-1)

        if self.bonds is not None:
            bonds = self.bonds
            bond_mask = self.bond_mask

        
        node_emb, node_mask, edge_emb, edge_mask, edge_cutoff = self.embedding(atom_type=atom_type,
                                                                               atom_mask=atom_mask,
                                                                               distance=distance,
                                                                               dis_mask=dis_mask,
                                                                               bond=bonds,
                                                                               bond_mask=bond_mask,
                                                                               )

        node_rep, edge_rep = self.model(node_emb=node_emb,
                                        node_mask=node_mask,
                                        edge_emb=edge_emb,
                                        edge_mask=edge_mask,
                                        edge_cutoff=edge_cutoff,
                                        )

        if self.readout is None:
            return node_rep, edge_rep, atom_mask, dis_mask, edge_cutoff, vectors, node_emb, edge_emb, node_mask, edge_mask

        if atom_mask is not None:
            num_atoms = torch.count_nonzero(atom_mask.to(torch.int32), dim=-1).unsqueeze(-1)

        outputs = []
        for i in range(self.num_readouts):
            output = self.readout[i](node_rep=node_rep,
                                     edge_rep=edge_rep,
                                     node_emb=node_emb,
                                     edge_emb=edge_emb,
                                     edge_cutoff=edge_cutoff,
                                     atom_type=atom_type,
                                     atom_mask=atom_mask,
                                     distance=distance,
                                     dis_mask=dis_mask,
                                     dis_vec=vectors,
                                     bond=bonds,
                                     bond_mask=bond_mask,
                                     )

            if self.use_scaleshift and self.scaleshift is not None:
                output = self.scaleshift[i](output, atom_type, num_atoms)

            outputs.append(output)

        if self.num_readouts == 1:
            return outputs[0]
        
        return outputs

    def _check_readout_index(self, readout_idx: int):
        if readout_idx >= self.num_readouts:
            raise ValueError(f'The index ({readout_idx}) is exceed '
                             f'the number of readout ({self.num_readouts})')
        return self

    
# class CybertronFF(ForceCell):    
#     """
#     Main class for Cybertron model.

#     Args:
#         model : Union[MolecularGNN, dict, str]
#             Molecular graph neural network model or its configuration.
#         embedding : Union[GraphEmbedding, dict, str], optional
#             Graph embedding layer or its configuration.
#         readout : Union[Readout, dict, str, List[Readout]], optional
#             Readout function or a list of readout functions. Default: 'atomwise'.
#         num_atoms : int, optional
#             Maximum number of atoms in the system.
#         atom_type : Union[Tensor, ndarray, List[int]], optional
#             Atom type indices.
#         bond_types : Union[Tensor, ndarray, List[int]], optional
#             Bond type indices.
#         pbc_box : Union[Tensor, ndarray, List[float]], optional
#             Periodic boundary condition box.
#         use_pbc : bool, optional
#             Whether to use periodic boundary conditions.
#         scale : Union[float, Tensor, List[Union[float, Tensor]]], optional
#             Scaling factor for output. Default: 1.
#         shift : Union[float, Tensor, List[Union[float, Tensor]]], optional
#             Shift value for output. Default: 0.
#         type_ref : Union[Tensor, ndarray, List[Union[Tensor, ndarray]]], optional
#             Reference values for each atom type.
#         length_unit : Union[str, Units], optional
#             Length unit for input coordinates.
#         energy_unit : Union[str, Units], optional
#             Energy unit for output predictions.
#     """
#     def __init__(self,
#                  atom_type: Optional[Union[Tensor, ndarray, List[int]]],
#                  embedding: Optional[Union[GraphEmbedding, dict, str]],
#                  model: Union[MolecularGNN, dict, str],
#                  readout: Union[Readout, dict, str, List[Readout]] = 'atomwise',
#                  num_atoms: Optional[int] = None,
#                  bond_types: Optional[Union[Tensor, ndarray, List[int]]] = None,
#                  pbc_box: Optional[Union[Tensor, ndarray, List[float]]] = None,
#                  use_pbc: Optional[bool] = None,
#                  scale: Union[float, Tensor, List[Union[float, Tensor]]] = 1,
#                  shift: Union[float, Tensor, List[Union[float, Tensor]]] = 0,
#                  type_ref: Optional[Union[Tensor, ndarray, List[Union[Tensor, ndarray]]]] = None,
#                  num_walker: int = 1,
#                  length_unit: Optional[Union[str, Units]] = None,
#                  energy_unit: Optional[Union[str, Units]] = None,
#                  **kwargs
#                  ):
#         super().__init__()
#         self._kwargs = get_arguments(locals(), kwargs)

#         if readout is None:
#             raise ValueError('The readout function in CybertronFF cannot be None!')

#         # (1,A)
        
#         self.atom_type: Tensor = get_tensor(atom_type, torch.int32, device=self.device).reshape(1, -1)
#         self.atom_mask: Tensor = self.atom_type > 0
#         max_atoms = self.atom_type.shape[-1]
#         if self.atom_mask.all():
#             self.num_atoms = max_atoms
#         else:
#             self.num_atoms = torch.sum(atom_type > 0, -1, keepdim=True)

#         # (B, A, A) = (B, A, 1) & (B, 1, A)
#         dis_mask = torch.logical_and(self.atom_mask.unsqueeze(-1), self.atom_mask.unsqueeze(-2))
#         num_atoms = self.atom_type.shape[-1]
#         self.dis_mask = torch.logical_and(dis_mask, torch.logical_not(torch.eye(num_atoms, num_atoms, dtype=torch.bool, device=self.device)))

#         self.num_walker = get_integer(num_walker)

#         self.bonds = None
#         self.bond_mask = None
#         if bond_types is not None:
#             self.bonds = torch.tensor(bond_types, dtype=torch.int32).reshape(1, max_atoms, -1)
#             self.bond_mask = bond_types > 0

#         model = get_molecular_model(model)
#         dim_node_emb = model.dim_node_emb
#         dim_edge_emb = model.dim_edge_emb
#         self.activation = model.activation

#         if embedding is None:
#             embedding = model.default_embedding

#         self.embedding = get_embedding(embedding,
#                                        dim_node=dim_node_emb,
#                                        dim_edge=dim_edge_emb,
#                                        activation=self.activation,
#                                        )

#         self.dim_node_emb = self.embedding.dim_node
#         self.dim_edge_emb = self.embedding.dim_edge

#         model.set_dimension(self.dim_node_emb, self.dim_edge_emb)
#         self.model = model

#         self.dim_node_rep = self.model.dim_node_rep
#         self.dim_edge_rep = self.model.dim_edge_rep

#         self.cutoff = self.embedding.cutoff
#         self.large_dis = self.cutoff * 10

#         if pbc_box is not None:
#             # (1,D)
#             self.pbc_box = get_tensor(pbc_box, torch.float32, device=self.device).reshape(1, -1)
#             self.use_pbc = True

#         self.activation = self.model.activation

#         self.num_readouts = 1
#         self.num_outputs = 1
#         if isinstance(readout, (list, tuple)):
#             if len(readout) > 1:
#                 raise ValueError(f'The number of readout for CybertronFF must be 1 but got: {len(readout)}')
#             readout = readout[0]
#         elif not isinstance(readout, (Readout, str, dict)):
#             raise TypeError(f'The type of `readout` must be Readout, dict or str but got: {type(readout)}')

#         readout = get_readout(cls_name=readout,
#                               dim_node_rep=self.dim_node_rep,
#                               dim_edge_rep=self.dim_edge_rep,
#                               activation=self.activation,
#                               )
#         self.readout: List[Readout] = ModuleList([readout])

#         self.output_ndim = self.readout[0].ndim
#         self.output_shape = self.readout[0].shape

#         self.scaleshift: List[ScaleShift] = None
#         self.set_scaleshift(scale, shift, type_ref)

#         self.input_unit_scale = self.embedding.convert_length_from(self.length_unit)

#     @property
#     def model_name(self) -> str:
#         return self.model._get_name()

#     @property
#     def scale(self) -> Union[Tensor, List[Tensor]]:
#         """returns the scale"""
#         return self.scaleshift[0].scale

#     @property
#     def shift(self) -> Union[Tensor, List[Tensor]]:
#         """returns the shift"""
#         return self.scaleshift[0].shift

#     @property
#     def type_ref(self) -> Union[Tensor, List[Tensor]]:
#         """returns the type_ref"""
#         return self.scaleshift[0].type_ref

#     def calc_vector(self,
#                     coordinate: Tensor,
#                     ) -> Tensor:
#         """calculate inter-atomic distances"""
#         vectors = coordinate.unsqueeze(-3) - coordinate.unsqueeze(-2)
#         return vectors
    
#     def calc_distance(self, 
#                       vectors: Tensor,
#                       ):
#         distance = torch.norm(vectors, dim=-1)
#         return distance

#     def set_scaleshift(self,
#                        scale: Union[float, Tensor, List[Union[float, Tensor]]] = 1,
#                        shift: Union[float, Tensor, List[Union[float, Tensor]]] = 0,
#                        type_ref: Union[Tensor, ndarray, List[Union[Tensor, ndarray]]] = None,
#                        ):
#         """set scale, shift and type_ref"""

#         def _check_data(value, name: str):
#             if isinstance(value, (list, tuple)):
#                 if len(value) != 1:
#                     raise ValueError(f'The number of {name} must be equal to 1, '
#                                      f'but got: {len(value)}.')
#                 value = value[0]
#             value = get_tensor(value, torch.float32)
#             if value.size() != 1:
#                 raise ValueError(f'The size of {name} must be 1, but got: {value.size()}')
#             return value
        
#         def _check_type_ref(ref) -> Tensor:
#             if ref is None:
#                 return None
#             ref = get_tensor(ref, torch.float32)
#             if ref.ndim != 2:
#                 raise ValueError(f'The rank (ndim) of type_ref must be 2, but got : {ref.ndim}')
#             if ref.shape[-1] != 1:
#                 raise ValueError(f'The last dimension of type_ref {ref.shape} must be 1, '
#                                  f'but got: {ref.shape[-1]}')
#             return ref

#         scale = _check_data(scale, 'scale')
#         shift = _check_data(shift, 'shift')
#         type_ref = _check_type_ref(type_ref)

#         if self.scaleshift is None:
#             self.scaleshift = nn.ModuleList([ScaleShift(scale=scale,
#                                          shift=shift,
#                                          type_ref=type_ref,
#                                          shift_by_atoms=self.readout[0].shift_by_atoms,
#                                         )])
#         else:
#             self.scaleshift[0].set_scaleshift(scale, shift, type_ref)
#         return self

#     def set_units(self, length_unit: str = None, energy_unit: str = None):
#         """set units"""
#         if length_unit is not None:
#             self.set_length_unit(length_unit)
#         if energy_unit is not None:
#             self.set_energy_unit(energy_unit)
#         return self

#     def set_length_unit(self, length_unit: str):
#         """set length unit"""
#         self.units.set_length_unit(length_unit)
#         self.input_unit_scale = self.embedding.convert_length_from(self.units)
#         return self

#     def set_energy_unit(self, energy_units: str):
#         """set energy unit"""
#         self.units.set_energy_unit(energy_units)
#         self.output_unit_scale = self.scaleshift[0].convert_energy_to(self.units)
#         return self
    
#     def print_info(self, num_retraction: int = 3, num_gap: int = 3, char: str = ' '):
#         """print the information of Cybertron"""
#         ret = char * num_retraction
#         gap = char * num_gap
#         print("================================================================================")
#         print("Cybertron Engine, Ride-on!")
#         print('-'*80)
#         if self.atom_type is None:
#             print(f'{ret} Using variable atom types with maximum number of atoms: {self.num_atoms}')
#         else:
#             print(f'{ret} Using variable atom types with maximum number of atoms: {self.atom_type.shape[1]}')
#         if self.bonds is not None:
#             print(ret+' Using fixed bond connection:')
#             for b in self.bonds[0]:
#                 print(ret+gap+' '+str(b.cpu().numpy()))
#             print(ret+' Fixed bond mask:')
#             for m in self.bond_mask[0]:
#                 print(ret+gap+' '+str(m.cpu().numpy()))
#         print('-'*80)
#         self.embedding._print_info(num_retraction=num_retraction,
#                                   num_gap=num_gap, char=char)
#         self.model._print_info(num_retraction=num_retraction,
#                               num_gap=num_gap, char=char)

#         print(ret+" With "+str(self.num_readouts)+" readout networks: ")
#         print('-'*80)
#         print(ret+" "+str(0)+(". "+self.readout[0]._get_name()))
#         self.readout[0]._print_info(
#             num_retraction=num_retraction, num_gap=num_gap, char=char)
#         self.scaleshift[0]._print_info(
#             num_retraction=num_retraction, num_gap=num_gap, char=char)

#         print(f'{ret} Input unit: {self.units.length_unit_name}')
#         print(f'{ret} Output unit: {self.units.energy_unit_name}')
#         print(f'{ret} Input unit scale: {self.input_unit_scale}')
#         print("================================================================================")
#         print(flush=True)
    
#     def forward(self,
#                 coordinate: Tensor,
#                 neighbour_index: Tensor = None,
#                 neighbour_mask: Tensor = None,
#                 neighbour_vector: Tensor = None,
#                 neighbour_distance: Tensor = None,
#                 pbc_box: Tensor = None
#                 ):
        
#         #pylint: disable=unused-argument

#         atom_type = self.atom_type
#         atom_mask = atom_type > 0

#         neighbour_distance = torch.concatenate((torch.zeros(*neighbour_distance.shape[:-1],1,device=neighbour_distance.device),neighbour_distance),axis=-1)
#         neighbour_mask = torch.concatenate((torch.zeros(*neighbour_mask.shape[:-1],1,dtype=bool,device=neighbour_mask.device),neighbour_mask),axis=-1)
#         neighbour_index = torch.concatenate((torch.arange(0,atom_type.shape[-1],device=atom_type.device).broadcast_to(neighbour_index.shape[:-1]).unsqueeze(-1),neighbour_index),axis=-1)
#         neighbour_vector = torch.concatenate((torch.zeros(*neighbour_vector.shape[:-2],1,3,device=neighbour_vector.device),neighbour_vector),axis=-2)

#         with torch.no_grad():
#             node_emb, node_mask, edge_emb, edge_mask, edge_cutoff = self.embedding(atom_type=atom_type,
#                                                                                 atom_mask=atom_mask,
#                                                                                 distance=neighbour_distance,
#                                                                                 dis_mask=neighbour_mask,
#                                                                                 neigh_idx=neighbour_index,
#                                                                                 )

#             node_rep, edge_rep = self.model(node_emb=node_emb,
#                                             node_mask=node_mask,
#                                             edge_emb=edge_emb,
#                                             edge_mask=edge_mask,
#                                             edge_cutoff=edge_cutoff,
#                                             neigh_idx=neighbour_index,
#                                             )

#             output = self.readout[0](node_rep=node_rep,
#                                         edge_rep=edge_rep,
#                                         node_emb=node_emb,
#                                         edge_emb=edge_emb,
#                                         edge_cutoff=edge_cutoff,
#                                         atom_type=atom_type,
#                                         atom_mask=atom_mask,
#                                         distance=neighbour_distance,
#                                         dis_mask=neighbour_mask,
#                                         dis_vec=neighbour_vector,
#                                         neigh_idx=neighbour_index,
#                                         )
            
#             num_atoms = atom_type.shape[-1]
#             if atom_mask is not None:
#                 num_atoms = torch.count_nonzero(atom_mask.to(torch.int16), dim=-1).unsqueeze(-1)

#             if self.scaleshift is not None:
#                 force = self.scaleshift[0](output, atom_type, num_atoms)
            
#             energy = torch.zeros(coordinate.shape[0], 1, dtype=torch.float32, device=coordinate.device)
#             virial = torch.zeros(coordinate.shape[0], 3, dtype=torch.float32, device=coordinate.device)
            
#             return energy, force, virial