# ============================================================================
# Copyright 2021 The AIMM team at Shenzhen Bay Laboratory & Peking University
#
# People: Yi Isaac Yang, Jun Zhang, Diqing Chen, Yaqiang Zhou, Huiyang Zhang,
#         Yupeng Huang, Yijie Xia, Yao-Kun Lei, Lijiang Yang, Yi Qin Gao
# 
# This code is a part of Cybertron-Code package.
#
# The Cybertron-Code is open-source software based on the AI-framework:
# MindSpore (https://www.mindspore.cn/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================

import scipy
import mindspore as ms
import mindspore.numpy as msnp
from mindspore import nn
from mindspore import Tensor
from mindspore.ops import operations as P
from mindspore.ops import functional as F

from cybertroncode.units import units

__all__ = [
    "GaussianSmearing",
    "LogGaussianDistribution",
    "LogLegendreBasisExpansion"
]

# radial_filter in RadialDistribution
class GaussianSmearing(nn.Cell):
    r"""Smear layer using a set of Gaussian functions.

    Args:
        start (float, optional): center of first Gaussian function, :math:`\mu_0`.
        stop (float, optional): center of last Gaussian function, :math:`\mu_{N_g}`
        n_gaussians (int, optional): total number of Gaussian functions, :math:`N_g`.
        centered (bool, optional): If True, Gaussians are centered at the origin and
            the offsets are used to as their widths (used e.g. for angular functions).
        trainable (bool, optional): If True, widths and offset of Gaussian functions
            are adjusted during training process.

    """

    def __init__(
        self, d_min=0, d_max=units.length(1,'nm'), num_rbf=32, sigma=None, centered=False, trainable=False
    ):
        super().__init__()
        # compute offset and width of Gaussian functions
        offset = msnp.linspace(d_min, d_max, num_rbf, dtype=ms.float32)

        if sigma is None:
            sigma = (d_max-d_min) / (num_rbf-1)

        width = sigma * F.ones_like(offset)
        
        self.width = width
        self.offset = offset
        self.centered = centered
        
        if trainable:
            self.width = ms.Parameter(width,"widths")
            self.offset = ms.Parameter(offset,"offset")

        self.exp = P.Exp()

    def construct(self, distances):
        """Compute smeared-gaussian distance values.

        Args:
            distances (torch.Tensor): interatomic distance values of
                (N_b x N_at x N_nbh) shape.

        Returns:
            torch.Tensor: layer output of (N_b x N_at x N_nbh x N_g) shape.

        """

        ex_dis=F.expand_dims(distances,-1)
        if not self.centered:
            # compute width of Gaussian functions (using an overlap of 1 STDDEV)
            coeff = -0.5 / F.square(self.width)
            # Use advanced indexing to compute the individual components
            diff = ex_dis - self.offset
        else:
            # if Gaussian functions are centered, use offsets to compute widths
            coeff = -0.5 / F.square(self.offset)
            # if Gaussian functions are centered, no offset is subtracted
            diff = ex_dis
        # compute smear distance values
        
        gauss = self.exp(coeff * F.square(diff))
        return gauss
        
class LogGaussianDistribution(nn.Cell):
    def __init__(
        self,
        d_min=units.length(0.05,'nm'),
        d_max=units.length(1,'nm'),
        num_rbf=32,
        sigma=None,
        trainable=False,
        min_cutoff=False,
        max_cutoff=False,
    ):
        super().__init__()
        if d_max <= d_min:
            raise ValueError('The argument "d_max" must be larger'+
                'than the argument "d_min" in LogGaussianDistribution!')
            
        if d_min <= 0:
            raise ValueError('The argument "d_min" must be '+
                ' larger than 0 in LogGaussianDistribution!')
            
        self.d_max = d_max
        self.d_min = d_min / d_max
        self.min_cutoff=min_cutoff
        self.max_cutoff=max_cutoff
        
        log_dmin = msnp.log(self.d_min,dtype=ms.float32)
        self.centers = msnp.linspace(log_dmin,0,num_rbf,dtype=ms.float32)
        self.ones = F.ones_like(self.centers)
        
        if sigma is None:
            sigma = -log_dmin / (num_rbf-1)
        self.rescale = -0.5 / (sigma * sigma)

        self.log = P.Log()
        self.exp = P.Exp()
        self.max = P.Maximum()
        self.min = P.Minimum()

    def construct(self, distance):
        dis = distance / self.d_max
        
        if self.min_cutoff:
            dis = self.max(dis,self.d_min)

        exdis = F.expand_dims(dis,-1)
        rbfdis = exdis * self.ones
        
        log_dis = self.log(rbfdis)
        log_diff = log_dis - self.centers
        log_diff2 = F.square(log_diff)
        log_gauss = self.exp( self.rescale * log_diff2  )

        if self.max_cutoff:
            ones = F.ones_like(exdis)
            zeros = F.zeros_like(exdis)
            cuts = F.select(exdis < 1.0, ones, zeros)
            log_gauss = log_gauss * cuts
        
        return log_gauss


class LogLegendreBasisExpansion(nn.Cell):
    def __init__(
        self,
        d_min=0.,
        d_max=units.length(1,'nm'),
        num_rbf=8,
        min_cutoff=True,
        max_cutoff=True,
        ###
        sigma=None,
        trainable=False,
    ):
        super().__init__()
        if d_max <= d_min:
            raise ValueError('The argument "d_max" must be larger'+
                'than the argument "d_min" in LogGaussianDistribution!')
            
        if d_min <= 0:
            raise ValueError('The argument "d_min" must be '+
                ' larger than 0 in LogGaussianDistribution!')
            
        self.d_max = d_max
        self.d_min = d_min / d_max

        self.scale = -2.0 / msnp.log(self.d_min)
        
        # K
        self.num_rbf = num_rbf
        # K+1
        self.nbasis = num_rbf + 1

        ### Create Legendre Table:
        c_pad_list = []
        for i in range(1,self.nbasis):
            # [i+1]
            constant = Tensor(scipy.special.legendre(i).c,ms.float32)
            zero_pad = msnp.zeros([num_rbf - i],dtype=ms.float32)
            # [1,K+1]
            c_pad = msnp.concatenate([zero_pad, constant],axis=0).reshape(1,-1)
            c_pad_list.append( c_pad )

        # [K, K+1]
        self.c_matrix = msnp.concatenate(c_pad_list,axis=1)

        # list(K,K-1,K-2,...,2,1,0)
        power_num = msnp.arange(num_rbf,-1,-1,dtype=ms.int32)
        self.power_num = F.reshape(power_num,(1,1,1,-1))

        self.log = P.Log()
        self.pow = P.Pow()
        self.mmt = nn.MatMul(transpose_x2=True)
    
    def construct(self, distance):
        # [B,A,N]
        r = self.log(distance/self.d_max) * self.scale + 1

        # [B,A,N,K+1]
        new_shape = r.shape + (self.nbasis,)
        r_power = msnp.broadcast_to(F.expand_dims(r,-1),new_shape)
        r_power = self.pow(r_power,self.power_num)

        # [K, K+1] -> [1,1,1,K,K+1]
        c_matrix = F.reshape(self.c_matrix,(1,1,self.num_rbf,self.nbasis))

        # [B,A,N,K+1]@[1,1,K,K+1].T -> [B,A,N,K]
        rbf = self.mmt(r_power,c_matrix)

        return rbf