# Copyright 2023 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Simulated Coherent Ising Machine."""
# pylint: disable=invalid-name
import numpy as np
from scipy.sparse import csr_matrix

from .QAIA import QAIA


class SimCIM(QAIA):
    r"""
    Simulated Coherent Ising Machine.

    Reference: `Annealing by simulating the coherent Ising
    machine <https://opg.optica.org/oe/fulltext.cfm?uri=oe-27-7-10288&id=408024>`_.

    Args:
        J (Union[numpy.array, csr_matrix]): The coupling matrix with shape :math:`(N x N)`.
        h (numpy.array): The external field with shape :math:`(N, )`.
        x (numpy.array): The initialized spin value with shape :math:`(N x batch_size)`. Default: ``None``.
        n_iter (int): The number of iterations. Default: ``1000``.
        batch_size (int): The number of sampling. Default: ``1``.
        dt (float): The step size. Default: ``1``.
        momentum (float): momentum factor. Default: ``0.9``.
        sigma (float): The standard deviation of noise. Default: ``0.03``.
        pt (float): Pump parameter. Default: ``6.5``.
    """

    # pylint: disable=too-many-arguments, too-many-instance-attributes
    def __init__(
        self,
        J,
        h=None,
        x=None,
        n_iter=None,
        batch_size=None,
        dt=None,
        momentum=None,
        sigma=None,
        pt=None,
        st_v = None,
        fi_v = None,
        x_sat = None
    ):
        """Construct SimCIM algorithm."""
        super().__init__(J, h, x, n_iter, batch_size)
        self.J = csr_matrix(self.J)
        self.dt = dt
        self.momentum = momentum
        self.sigma = sigma
        self.pt = pt
        self.st_v = st_v
        self.fi_v = fi_v
        self.x_sat = x_sat

        self.initialize()

    def initialize(self):
        """Initialize spin."""
        # Initialization of spin value
        if self.x is None:
            self.x = np.zeros((self.N, self.batch_size))
        # gradient
        self.dx = np.zeros_like(self.x)
        if self.x.shape[0] != self.N:
            raise ValueError(f"The size of x {self.x.shape[0]} is not equal to the number of spins {self.N}")
        # pump-loss factor
        self.p_list = (np.tanh(np.linspace(-self.st_v, self.st_v, self.n_iter)) - 1) * self.pt





    # def update(self, rho=0.95, epsilon=1e-6):
    #     """
    #     Dynamical evolution with Adadelta.
    #
    #     Args:
    #         rho (float): Decay rate for the moving average. Default: ``0.95``.
    #         epsilon (float): Epsilon parameter for numerical stability. Default: ``1e-6``.
    #     """
    #     E_dx2 = np.zeros_like(self.dx)  # Initialize the moving average of the squared gradients
    #     E_update2 = np.zeros_like(self.dx)  # Initialize the moving average of the squared updates
    #
    #     for _, p in zip(range(self.n_iter), self.p_list):
    #         if self.h is None:
    #             newdc = self.x * p + (
    #                 self.J.dot(self.x) * self.dt + np.random.normal(size=(self.N, self.batch_size)) * self.sigma
    #             )
    #         else:
    #             newdc = self.x * p + (
    #                 (self.J.dot(self.x) + self.h) * self.dt
    #                 + np.random.normal(size=(self.N, self.batch_size)) * self.sigma
    #             )
    #
    #         # Accumulate gradient
    #         E_dx2 = rho * E_dx2 + (1 - rho) * newdc**2
    #
    #         # Compute update step
    #         update = (np.sqrt(E_update2 + epsilon) / np.sqrt(E_dx2 + epsilon)) * newdc
    #
    #         # Apply update
    #         self.x -= update
    #         E_update2 = rho * E_update2 + (1 - rho) * update**2


    # def update(self, learning_rate=0.01):
    #     """
    #     Dynamical evolution with SGD with Momentum.
    #
    #     Args:
    #         learning_rate (float): Learning rate. Default: ``0.01``.
    #     """
    #     for _, p in zip(range(self.n_iter), self.p_list):
    #         if self.h is None:
    #             newdc = self.x * p + (
    #                 self.J.dot(self.x) * self.dt + np.random.normal(size=(self.N, self.batch_size)) * self.sigma
    #             )
    #         else:
    #             newdc = self.x * p + (
    #                 (self.J.dot(self.x) + self.h) * self.dt
    #                 + np.random.normal(size=(self.N, self.batch_size)) * self.sigma
    #             )
    #
    #         # Update dx with momentum
    #         self.dx = self.momentum * self.dx - learning_rate * newdc
    #         ind = (np.abs(self.x + self.dx) < 1.0).astype(np.int64)
    #         self.x += self.dx * ind



    # def update(self, learning_rate=0.01):
    #     """
    #     Dynamical evolution with Nesterov Accelerated Gradient (NAG).
    #
    #     Args:
    #         learning_rate (float): Learning rate. Default: ``0.01``.
    #     """
    #     for _, p in zip(range(self.n_iter), self.p_list):
    #         # Lookahead gradient calculation (Nesterov's lookahead step)
    #         lookahead_x = self.x + self.momentum * self.dx
    #
    #         if self.h is None:
    #             newdc = lookahead_x * p + (
    #                     self.J.dot(lookahead_x) * self.dt + np.random.normal(
    #                 size=(self.N, self.batch_size)) * self.sigma
    #             )
    #         else:
    #             newdc = lookahead_x * p + (
    #                     (self.J.dot(lookahead_x) + self.h) * self.dt
    #                     + np.random.normal(size=(self.N, self.batch_size)) * self.sigma
    #             )
    #
    #         # Update dx using NAG
    #         prev_dx = self.dx.copy()
    #         self.dx = self.momentum * self.dx - learning_rate * newdc
    #         ind = (np.abs(self.x + self.dx) < 1.0).astype(np.int64)
    #         self.x += -self.momentum * prev_dx + (1 + self.momentum) * self.dx * ind

    # # pylint: disable=attribute-defined-outside-init
    # def update(self,beta1=0.9, beta2=0.999, epsilon=10e-8):
    #     m_dx = 0
    #     v_dx = 0
    #     """Dynamical evolution."""
    #     for _, p in zip(range(self.n_iter), self.p_list):
    #         if self.h is None:
    #             newdc = self.x * p + (
    #                 self.J.dot(self.x) * self.dt + np.random.normal(size=(self.N, self.batch_size)) * self.sigma
    #             )
    #         else:
    #             newdc = self.x * p + (
    #                 (self.J.dot(self.x) + self.h) * self.dt
    #                 + np.random.normal(size=(self.N, self.batch_size)) * self.sigma
    #             )
    #     # pylint: disable=attribute-defined-outside-init
    #
    #
    #
    #         # gradient + momentum
    #         self.dx = self.dx * self.momentum + newdc * (1 - self.momentum)
    #         ind = (np.abs(self.x + self.dx) < 1.0).astype(np.int64)
    #         self.x += self.dx * ind


            # prev_dx = self.dx.copy()
            # self.dx = self.momentum * self.dx +newdc * (1 - self.momentum)
            # ind = (np.abs(self.x + self.momentum * self.dx) < 1.0).astype(np.int64)
            # self.x += (self.momentum * prev_dx +newdc * (1 - self.momentum)) * ind

            # # Adam optimizer
            # if not hasattr(self, 'm'):
            #     self.m = np.zeros_like(self.dx)
            #     self.v = np.zeros_like(self.dx)
            #     self.t = 0
            #
            # beta1 = 0.9
            # beta2 = 0.999
            # epsilon = 1e-8
            # learning_rate = 0.001
            #
            # self.t += 1
            # self.m = beta1 * self.m + (1 - beta1) * newdc
            # self.v = beta2 * self.v + (1 - beta2) * (newdc ** 2)
            #
            # m_hat = self.m / (1 - beta1 ** self.t)
            # v_hat = self.v / (1 - beta2 ** self.t)
            #
            # self.dx = learning_rate * m_hat / (np.sqrt(v_hat) + epsilon)
            # ind = (np.abs(self.x + self.dx) < 1.0).astype(np.int64)
            # self.x += self.dx * ind
    # pylint: disable=attribute-defined-outside-init





    # #卡方噪音
    # def update(self):
    #     """Dynamical evolution."""
    #     for _, p in zip(range(self.n_iter), self.p_list):
    #         if self.h is None:
    #             newdc = self.x * p + (
    #                     self.J.dot(self.x) * self.dt +  np.random.chisquare(df=2, size=(self.N, self.batch_size)) * self.sigma
    #             )
    #         else:
    #             newdc = self.x * p + (
    #                     (self.J.dot(self.x) + self.h) * self.dt
    #                     +  np.random.chisquare(df=2, size=(self.N, self.batch_size)) * self.sigma
    #             )
    #         # gradient + momentum
    #         self.dx = self.dx * self.momentum + newdc * (1 - self.momentum)
    #         ind = (np.abs(self.x + self.dx) < 1).astype(np.int64)
    #         self.x += self.dx * ind

    # # 高斯噪音
    # def update(self):
    #     """Dynamical evolution."""
    #     for _, p in zip(range(self.n_iter), self.p_list):
    #         if self.h is None:
    #             newdc = self.x * p + (
    #                 self.J.dot(self.x) * self.dt + np.random.normal(size=(self.N, self.batch_size)) * self.sigma
    #             )
    #         else:
    #             newdc = self.x * p + (
    #                 (self.J.dot(self.x) + self.h) * self.dt
    #                 + np.random.normal(size=(self.N, self.batch_size)) * self.sigma
    #             )
    #         # gradient + momentum
    #         self.dx = self.dx * self.momentum + newdc * (1 - self.momentum)
    #         ind = (np.abs(self.x + self.dx) < 1).astype(np.int64)
    #         self.x += self.dx * ind

    # 均匀噪声
    def update(self):
        """Dynamical evolution."""
        for _, p in zip(range(self.n_iter), self.p_list):
            if self.h is None:
                newdc = self.x * p + (
                        self.J.dot(self.x) * self.dt + np.random.uniform(low=-self.sigma, high=self.sigma,
                                                                         size=(self.N, self.batch_size))
                )

            else:
                newdc = self.x * p + (
                        (self.J.dot(self.x) + self.h) * self.dt
                        + np.random.uniform(low=-self.sigma, high=self.sigma,
                                                                         size=(self.N, self.batch_size))
                )
            # gradient + momentum
            self.dx = self.dx * self.momentum + newdc * (1 - self.momentum)
            ind = (np.abs(self.x + self.dx) < self.x_sat).astype(np.int64)
            self.x += self.dx * ind

    # #泊松噪音
    # def update(self):
    #     """Dynamical evolution."""
    #     for _, p in zip(range(self.n_iter), self.p_list):
    #         if self.h is None:
    #             newdc = self.x * p + (
    #                     self.J.dot(self.x) * self.dt + np.random.poisson(lam=self.sigma, size=(self.N, self.batch_size)) - self.sigma
    #             )
    #         else:
    #             newdc = self.x * p + (
    #                     (self.J.dot(self.x) + self.h) * self.dt
    #                     + np.random.poisson(lam=self.sigma, size=(self.N, self.batch_size)) - self.sigma
    #             )
    #         # gradient + momentum
    #         self.dx = self.dx * self.momentum + newdc * (1 - self.momentum)
    #         ind = (np.abs(self.x + self.dx) < 1).astype(np.int64)
    #         self.x += self.dx * ind


#     #拉普拉斯噪音
#     def update(self):
#         """Dynamical evolution."""
#         for _, p in zip(range(self.n_iter), self.p_list):
#             if self.h is None:
#                 newdc = self.x * p + (
#                         self.J.dot(self.x) * self.dt + np.random.laplace(loc=0.0, scale=self.sigma, size=(self.N, self.batch_size)
# )
#                 )
#             else:
#                 newdc = self.x * p + (
#                         (self.J.dot(self.x) + self.h) * self.dt
#                         + np.random.laplace(loc=0.0, scale=self.sigma, size=(self.N, self.batch_size)
# )
#                 )
#             # gradient + momentum
#             self.dx = self.dx * self.momentum + newdc * (1 - self.momentum)
#             ind = (np.abs(self.x + self.dx) < 1).astype(np.int64)
#             self.x += self.dx * ind
