import platgo as pg
import numpy as np


class FRCG(pg.Algorithm):
    """
    Fletcher-Reeves conjugate gradient
    reference: https://github.com/BIMK/PlatEMO/tree/master/PlatEMO/Algorithms/Single-objective%20optimization/FRCG
    """

    type: dict = {'single': True, 'multi': False, 'many': False, 'real': True, 'binary': False, 'permutation': False,
                  "large": True, 'expensive': False, 'constrained': False, 'preference': False, 'multimodal': False,
                  'sparse': False, 'gradient': True}

    def __init__(self, maxgen:int, problem: pg.Problem, beta: float = 0.6, sigma: float = 0.4) -> None:
        """
        beta  --- 0.6 --- A parameter within [0,1]
        sigma --- 0.4 --- A parameter within [0 0.5]
        """
        self.name = "FRCG"
        self.gk = None
        self.beta = beta
        self.sigma = sigma
        super().__init__(problem=problem, maxgen=maxgen)

    def go(self, N: int = None, population: pg.Population = None) -> pg.Population:
        """
         if population is None, generate a new population with population size
        :param N: population size
        :param population: population to be optimized
        :return:
        """
        assert N or population, "N and population can't be both None"
        if population is None:
            pop = self.problem.init_pop(N=1)
        else:
            pop = population[0]
            self.problem.N = pop.decs.shape[0]
        self.problem.cal_obj(pop)  # 计算目标函数值

        k = 0
        while self.not_terminal(pop):
            self.gk = self.FiniteDifference(pop)
            itern = k - (self.problem.D + 1) * np.floor(k/(self.problem.D + 1)) + 1
            if itern <= 1:
                dk = -self.gk
            else:
                betak = np.dot(self.gk.T, self.gk) / (np.dot(g0.T, g0))
                dk = -self.gk + betak * d0
                gd = np.dot(self.gk.T, dk)
                if gd >= 0:
                    dk = -self.gk
            for m in range(20):
                individual = pg.Population(decs=pop.decs + self.beta ** m * dk.T)
                self.problem.cal_obj(individual)
                if individual.objv <= pop.objv + self.sigma * self.beta ** m * np.dot(self.gk.T, dk):
                    break

            pop.decs = individual.decs
            self.problem.cal_obj(pop)
            g0 = self.gk
            d0 = dk
            k += 1
        return pop

    def FiniteDifference(self, pop: pg.Population) -> np.ndarray:
        # Estimate the gradient of objective by finite difference
        pop1 = pg.Population(decs=pop.decs + np.eye(pop.decs.shape[1]) * 1e-4)
        self.problem.cal_obj(pop1)
        df = (pop1.objv - pop.objv) / 1e-4
        return df
