import numpy as np

from optimizer.BaseOptimizer import BaseOptimizer
from utils.Ising import Ising


class Optimizer(BaseOptimizer):
    optimizer_name = 'QSBGA'

    def __init__(self, config: dict):
        self.ising_J = np.zeros((1, 1))
        self.crossover_rate = 0.001
        self.mutation_rate = 0.001
        self.sa_rate = 0.001
        self.fine_tune_rate = 0.001
        self.pso_rate = 0.001
        self.T = 100
        self.no_improve_limit = 100
        super().__init__(config)

        self.detuning_frequency = 1  # KPO的失谐频率
        self.kerr_constant = 1  # KPO的kerr常量
        self.pressure = lambda t: 0.01 * t  # KPO泵涌压力
        self.time_step = 0.25  # 时间步长
        self.symplectic_parameter = 1  # 辛欧拉方法参数
        self.convergence_threshold = 35
        self.sampling_period = 60

        self.xi0 = 0.7 * self.detuning_frequency / (np.std(self.ising_J) * self.n_dim ** (1 / 2))

        self.symplectic_time_step = self.time_step / self.symplectic_parameter
        self.v_t = self.symplectic_time_step * self.detuning_frequency

        self.xs = 0 * np.random.uniform(self.pos_min, self.pos_max, self.xs.shape)
        self.vs = 0.01 * np.random.random(self.xs.shape)

    def run_once(self, actions=None):
        self.clip()
        self.best_update()

        self.factor = self.pressure(self.step_num * self.time_step)
        self.vs -= self.symplectic_time_step * (self.kerr_constant * self.xs ** 3 - self.factor * self.xs)
        self.vs += self.time_step * self.xi0 * (self.ising_J @ self.xs.T).T

        self.xs = self.xs + self.v_t * self.vs

        if np.mean(self.real_clpso_flag) < self.no_improve_limit:
            return

        for i in range(self.n_part):
            # 交叉
            if np.random.random() < self.crossover_rate:
                target_index = 0
                for i in range(3):
                    target_index = np.random.randint(0, self.n_part)
                    if self.fits[target_index] < np.mean(self.fits):
                        break
                if i == target_index or i == self.best_index:
                    pass
                else:
                    r = np.random.random(self.n_dim)
                    new_x1 = np.zeros_like(self.xs[0])
                    new_x2 = np.zeros_like(self.xs[0])
                    new_v1 = np.zeros_like(self.vs[0])
                    new_v2 = np.zeros_like(self.vs[0])
                    for d in range(self.n_dim):
                        if r[d] < 0.5:
                            new_x1[d] = self.xs[i][d]
                            new_v1[d] = self.vs[i][d]
                            new_x2[d] = self.xs[target_index][d]
                            new_v2[d] = self.vs[target_index][d]
                        else:
                            new_x1[d] = self.xs[target_index][d]
                            new_v1[d] = self.vs[target_index][d]
                            new_x2[d] = self.xs[i][d]
                            new_v2[d] = self.vs[i][d]
                    self.xs[i] = new_x1
                    self.vs[i] = new_v1
                    if target_index != self.best_index and self.fits[i] < np.mean(self.fits):
                        self.xs[target_index] = new_x2
                        self.vs[target_index] = new_v2

            # 变异

            if i == self.best_index:
                pass
            else:
                if np.random.random() < self.mutation_rate:
                    r = np.random.random(self.n_dim)
                    r = r * 4 - 2
                    self.xs[i] = r * self.xs[i]
                    self.vs[i] = r * self.vs[i]

            # SA

            if i == self.best_index or np.sum(self.real_clpso_flag) < 0.1 * self.fe_max:
                pass
            else:
                if np.random.random() < self.sa_rate * self.real_clpso_flag[i]:
                    x_new = self.get_new_x(self.xs[i])
                    y_new = self.evaluate(x_new)

                    # Metropolis
                    df = y_new - self.fits[i]
                    if df < 0 or np.exp(-df / self.T) > np.random.rand():
                        self.xs[i], self.fits[i] = x_new, y_new
                        if y_new < self.atom_history_best_fits[i]:
                            self.clpso_flag[i] = 0

            # PSO
            if i != self.best_index and np.random.random() < self.pso_rate:
                w = 0.5
                c1 = c2 = 0.3
                self.vs[i] = w * self.vs[i] \
                             + c1 * np.random.random(self.n_dim) * (self.p_best[i] - self.xs[i]) \
                             + c2 * np.random.random(self.n_dim) * (self.history_best_x - self.xs[i])

            if self.real_clpso_flag[i] > 70:
                if np.random.random() < self.fine_tune_rate * (1 + 5 * (i == self.best_index)):
                    self.fine_tune(i)

        if np.mean(self.clpso_flag) > 150 and np.sum(self.real_clpso_flag) > 0.1 * self.fe_max:
            self.cool_down()
            self.clpso_flag[:] = 0

    def get_new_x(self, x):
        u = np.random.uniform(-1, 1, size=self.n_dim)
        x_new = x + 20 * np.sign(u) * self.T * ((1 + 1.0 / self.T) ** np.abs(u) - 1.0)
        return x_new

    def cool_down(self):
        self.T = self.T * 0.7
        print(self.T)

    def fine_tune(self, i):
        if self.show:
            print(f'finetune {i}')
        need_fine_x = self.xs[i].copy()
        fit = self.fits[i]
        best_update = True
        best_x_cache = need_fine_x
        best_fit_cache = fit
        dim_cache = 0
        fine_epoch = 0
        while best_update:
            best_update = False
            fine_epoch += 1
            for dd in range(self.n_dim):
                d = (dd + dim_cache) % self.n_dim
                new_x = need_fine_x.copy()
                new_x[d] = - new_x[d]
                new_fit = self.evaluate(new_x)
                if new_fit < best_fit_cache:
                    best_x_cache = new_x
                    best_fit_cache = new_fit
                    best_update = True
                    if self.show:
                        print(f'fine_epoch:{fine_epoch} best:{new_fit}')
                    dim_cache = d
                    break

            if best_update:
                need_fine_x = best_x_cache
        if best_fit_cache != fit:
            self.xs[i] = best_x_cache
            self.fits[i] = best_fit_cache
