# Copyright 2021-2023 @ Shenzhen Bay Laboratory &
#                       Peking University &
#                       Huawei Technologies Co., Ltd
#
# This code is a part of MindSPONGE:
# MindSpore Simulation Package tOwards Next Generation molecular modelling.
#
# MindSPONGE is open-source software based on the AI-framework:
# MindSpore (https://www.mindspore.cn/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Dynamic ITS"""

from typing import Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor

from .wrapper import EnergyWrapper
from ... import function as func
from ...function import get_tensor, get_integer


class DynamicITS(EnergyWrapper):
    r"""Energy wrapper for dynammic integrated tempering sampling.

    Math:

    Args:

        sim_temp (float):       Simulation temperature.

        temp_min (float):       Minimum temperature for integration.
                                Only used when `temperature` is None.
                                Default: None

        temp_max (float):       Minimum temperature for integration.
                                Only used when `temperature` is None.
                                Default: None

        temp_bin (int):         Number of temperatures for integrationintergration.
                                Only used when `temperature` is None.
                                Default: None

        unlinear_temp (bool)    Whether to generate unlinear integration temperatures
                                Default: False

        temperatures (Tensor):  Temperatures for integration.
                                The shape of tensor is `(B, T)`, the data type is float.
                                Default: None

        update_pace (int):      Freuency for updating ITS. Default: 100

        num_walker (int):       Number of multiple walkers.
                                Cannot be None when `share_parameter` is False. Default: None

        share_parameter (bool): Whether to share ITS parameters for all walkers. Default: True

        energy_shift (float):   Initial shift value for potential energy. Default: 0

        energy_ratio (Tensor):  Ratio to select the potential energies to be enhanced
                                The shape of tensor is `(B, U)`. The data type is float.
                                Default: 1

        bias_ratio (Tensor):    Ratio to select the bias potential energies to be enhanced.
                                The shape of tensor is `(B, V)`. The data type is float.
                                Default: 1

        ratio_exponent (float): Exponent for calculating the iteration weights of
                                the neighbouring temperatures. Default: 0.5

        step_weight (float):    Weight for iteration step in calculating the iteration weights.
                                Default: 0

        weight_bias (float):    Bias value for the iteration of weighting factors.
                                Defatul: 0

        length_unit (str):      Length unit. If None is given, it will be assigned with the global length unit.
                                Default: None

        energy_unit (str):      Energy unit. If None is given, it will be assigned with the global energy unit.
                                Default: None

    Supported Platforms:

        ``Ascend`` ``GPU``

    Symbols:

        B:  Batchsize, i.e. number of walkers in simulation.

        T:  Number of integration temperatures.

        U:  Dimension of potential energy.

        V:  Dimension of bias potential.

    """

    def __init__(self,
                 num_walker: int,
                 ref_temp: float,
                 max_temp: float,
                 temp_bin: int = 50,
                 update_pace: int = 0,
                 energy_shift: float = 0,
                 threshold: float = 0.1,
                 temp_increase: float = 0.1,
                 memory_time: float = 0.8,
                 peshift_scale: float = 1.5,
                 length_unit: str = None,
                 energy_unit: str = None,
                 ):

        super().__init__(
            update_pace=update_pace,
            length_unit=length_unit,
            energy_unit=energy_unit,
            )

        self.num_walker = get_integer(num_walker)
        self.temp_bin = get_integer(temp_bin)

        self.boltzmann = get_tensor(self.units.boltzmann, torch.float32)

        # (1)
        self.ref_temp = get_tensor(ref_temp, torch.float32).reshape(())

        self.ref_kbt = self.boltzmann * self.ref_temp
        self.ref_beta = 1.0 / self.ref_kbt

        self.max_temp = get_tensor(max_temp, torch.float32).reshape(())
        self.sim_temp = nn.Parameter(self.ref_temp, requires_grad=False)

        # (T)
        temperatures = get_tensor(torch.full((self.temp_bin,), self.ref_temp, dtype=torch.float32))
        temperatures[-1] = self.max_temp
        self.temperatures = nn.Parameter(temperatures, requires_grad=False)

        mask = get_tensor(torch.zeros((self.temp_bin,), dtype=torch.bool))
        mask[-1] = True
        self.temp_mask = nn.Parameter(mask, requires_grad=False)

        # (T)
        # self.weight_factors: \log{n_k}
        self.weight_factors = nn.Parameter(torch.zeros_like(self.beta_array),
                                        requires_grad=False)

        # (B, 1)
        self.energy = nn.Parameter(get_tensor(torch.zeros((self.num_walker, 1), dtype=torch.float32)), requires_grad=False)
        self.eff_energy = nn.Parameter(get_tensor(torch.zeros((self.num_walker, 1), dtype=torch.float32)),
                                    requires_grad=False)

        # (B, 1)
        self.minimum_energy = nn.Parameter(get_tensor(torch.tensor(0, dtype=torch.float32)), requires_grad=False)

        energy_shift = get_tensor(energy_shift, torch.float32).reshape(())
        self.energy_shift = nn.Parameter(energy_shift, requires_grad=False)

        max_index = get_tensor(self.temp_bin-1, dtype=torch.int32)
        # index of current minimum temperature
        self.min_index = nn.Parameter(max_index, requires_grad=False)
        # index of current maximum temperature
        self.max_index = nn.Parameter(max_index, requires_grad=False)

        self.step = nn.Parameter(get_tensor(0, dtype=torch.int32), requires_grad=False)

        self.threshold = threshold
        self.memory_time = memory_time
        self.temp_increase = temp_increase
        self.peshift_scale = peshift_scale

    @property
    def sim_kbt(self) -> Tensor:
        return self.get_sim_kbt()

    @property
    def sim_beta(self) -> Tensor:
        return self.get_sim_beta()

    @property
    def kbt_array(self) -> Tensor:
        return self.get_kbt_array()

    @property
    def beta_array(self) -> Tensor:
        return self.get_beta_array()

    def get_kbt(self, temperature: Tensor) -> Tensor:
        return self.boltzmann * temperature

    def get_beta(self, temperature: Tensor) -> Tensor:
        return 1.0 / (self.boltzmann * temperature)

    def get_sim_kbt(self) -> Tensor:
        return self.boltzmann * self.sim_temp

    def get_sim_beta(self) -> Tensor:
        return 1.0 / (self.boltzmann * self.sim_temp)

    def get_kbt_array(self) -> Tensor:
        return self.boltzmann * self.temperatures

    def get_beta_array(self) -> Tensor:
        return 1.0 / (self.boltzmann * self.temperatures)

    def set_sim_temp(self, temperature: Tensor) -> Tensor:
        self.sim_temp.copy_(temperature)
        return temperature

    def change_energy_shift(self, peshift: Tensor) -> Tensor:
        """change energy shift"""
        # (B, 1)
        peshift = peshift * self.peshift_scale
        # (B, 1)
        fb0 = self.beta_array * self.energy_shift
        # (B, T) = (B, T) * (B, 1) - (B, 1)
        fb_add = self.beta_array * peshift - fb0
        self.weight_factors.add_(fb_add)
        self.energy_shift.copy_(peshift)
        return peshift

    def update(self):
        """update parameters"""
        if self.update_pace == 0:
            return self

        # (B, 1)
        min_energy = self.min_energy
        # (B, T)
        partitions = self.partitions

        if (min_energy + self.energy_shift < 0).any():
            self.change_energy_shift(-min_energy)

        self.step.copy_(self.step + 1)

        # (B, T)
        # p_k(t) = n_k e ^ {-\beta_k E[R(t)]}
        rb = partitions - self.normal

        # (B, T)
        # normal: \log{W_k(t)}
        # W_k(t) = \sum_{\tau}^{t}{w_k(\tau)} = W_k(t-1) + w_k(t)
        normal = torch.logaddexp(self.normal, rb)

        # (B, T)
        # weights: \log{w'_k(t)}
        # w'_k(t) = w_k(t) * r_k(t)
        weights = rb + self.weight_factors

        # (B, T)
        # weights_norm: \log{W'_k(t)}
        # W'_k(t) = W_k(t-1) + w'_k(t)
        weights_norm = torch.logaddexp(self.normal, weights)

        # (B, T)
        # fb_ratio0: \log{r_k(t)}
        # r_k(t) = \frac{m_k(t)}{m_{k+1}(t)}
        fb_ratio0 = self.weight_factors[..., 1:] - self.weight_factors[..., :-1]

        # (B, T)
        # rbfb_ratio0: \log{p_k(t) r_k(t)}
        rbfb_ratio0 = rb[..., :-1] + fb_ratio0

        # (B, T)
        # fb_ratio1: \log{r_k(t+1)}
        # r_k(t+1) = \frac{m_k(t+1)}{m_{k+1}(t+1)}
        #          = r_k(t) \frac{W'_k(t)}{W_{k+1}(t)} \frac{W_{k+1}(t-1)}{W'_{k+1}(t)}
        #
        # At the first iteration step, W_k(0) = 0, then m_k(1) = m'_k(1) = m_k(0) r_k(0)
        fb_ratio1 = fb_ratio0 + torch.logaddexp(self.normal[..., :-1], rbfb_ratio0 + weights[..., :-1]) - weights_norm[..., 1:]

        # (B, T)
        # fb_new: \log{n_k(t+1)}
        # n_k(t+1) = n_1(t+1) \prod_{i=1}^{k-1}{\frac{n_{i+1}(t+1)}{n_i(t+1)}}
        #         = n_1(t+1) \prod_{i=1}^{k-1}{\frac{1}{r_i(t+1)}}
        #         = n_1(t+1) e ^ {-\sum_{i=1}^{k-1}{\log{r_i(t+1)}}}
        fb_new = torch.cumsum(-fb_ratio1, -1)

        # (B, T)
        # fb_new: \log{n_k(t+1)}
        # n_k(t+1) = n_1(t+1) \prod_{i=1}^{k-1}{\frac{n_{i+1}(t+1)}{n_i(t+1)}}
        #         = n_1(t+1) \prod_{i=1}^{k-1}{\frac{1}{r_i(t+1)}}
        #         = n_1(t+1) e ^ {-\sum_{i=1}^{k-1}{\log{r_i(t+1)}}}
        fb_new = torch.cat([torch.zeros_like(fb_new[..., :1]), fb_new], dim=-1)

        # (B, T)
        self.weight_factors.copy_(fb_new)
        self.partitions.copy_(self.zero_rbfb)
        # (B, T)
        self.normal.copy_(normal)

        return self

    def forward(self, potentials: Tensor, biases: Tensor = None) -> Tuple[Tensor, Tensor]:
        """calculate effective energy and bias"""
        # (B, 1)
        potential = potentials[..., 0:1]

        # (B, 1)
        enhanced_energy = potential + self.energy_shift

        if self.update_pace > 0:
            # (B, 1)
            min_energy = enhanced_energy.detach()
            min_energy = torch.where(min_energy < self.min_energy, min_energy, self.min_energy)
            self.min_energy.copy_(min_energy)

        # (B, T)
        # gf: \log{n_k e ^ {-\beta_k E[R(t)]}}
        gf = self.weight_factors + self.beta_array * enhanced_energy

        if self.update_pace > 0:
            # (B, T)
            # P_k(t) = \sum_{\tau}^{t}{p_k(\tau)}
            #        = \sum_{\tau}^{t}{n_k e ^ {-\beta_k E[R(\tau)]}}
            rbfb = torch.logaddexp(self.partitions, gf.detach())
            self.partitions.copy_(rbfb)

        # (B, 1)
        # \log{\sum_k^N {n_k e ^ {-\beta_k U(R)}}}
        gfsum = torch.logsumexp(gf, -1, keepdim=True)

        # (B, 1)
        # U_{eff}(R) = -\frac{1}{\beta_0} \log{\sum_k^N {n_k e ^ {-\beta_k U(R)}}}
        energy = -gfsum / self.ref_beta

        # (B, 1)
        bias = energy - potential

        return energy, bias

    def _check_temp(self, temp, name: str) -> Tensor:
        """check the shape of temperature related variables"""
        temp = get_tensor(temp)
        if temp.ndim > 1:
            raise ValueError(f'The rank(ndim) of {name} cannot be larger than 1 '
                             f'but got: {temp.ndim}')
        if temp.size > 1:
            if temp.size != self.num_parameter:
                raise ValueError(f'The size of {name} ({temp.size}) cannot match '
                                 f'the number of parameter(s) ({self.num_parameter})')
            temp = torch.broadcast_to(temp.reshape((1,)), (self.num_parameter,))
        return temp

    def _check_ratio(self, ratio, name: str) -> Tensor:
        """check the shape of ratio related variables"""
        ratio = get_tensor(ratio)
        if ratio.ndim > 2:
            raise ValueError(f'The rank(ndim) of {name} cannot be larger than 2 '
                             f'but got: {ratio.ndim}')
        if ratio.shape[0] > 1:
            if ratio.shape[0] != self.num_parameter:
                raise ValueError(f'The 1st dimension of {name} ({ratio.shape[0]}) does not match '
                                 f'the number of parameter(s) ({self.num_parameter})')
            ratio = torch.broadcast_to(ratio, (self.num_parameter, ratio.shape[-1]))
        return ratio
