import platgo as pg
import numpy as np

'''
%------------------------------- Reference --------------------------------
  D. F. Shanno, Conditioning of quasi-Newton methods for function
  minimization, Mathematics of Computation, 1970, 24: 647-656.
'''


class BFGS(pg.Algorithm):

    type: dict = {'single': True, 'multi': False, 'many': False, 'real': True, 'binary': False, 'permutation': False,
                  "large": True, 'expensive': False, 'constrained': False, 'preference': False, 'multimodal': False,
                  'sparse': False, 'gradient': True}

    def __init__(self, maxgen: int, problem: pg.Problem, beta: float = 0.6, sigma: float = 0.4) -> None:
        """
            A quasi-Newton method proposed by Broyden, Fletcher, Goldfarb, and Shanno
            beta  --- 0.6 --- A parameter within [0,1]
            sigma --- 0.4 --- A parameter within [0 0.5]
        """
        self.name = "BFGS"
        self.gk = None
        self.beta = beta
        self.sigma = sigma
        super().__init__(problem=problem, maxgen=maxgen)

    def go(self, N: int = None, population: pg.Population = None) -> pg.Population:
        """
         if population is None, generate a new population with population size
        :param N: population size
        :param population: population to be optimized
        :return:
        """
        assert N or population, "N and population can't be both None"
        if population is None:
            pop = self.problem.init_pop(N=1)
        else:
            pop = population[0]
            self.problem.N = pop.decs.shape[0]
        self.problem.cal_obj(pop)  # 计算目标函数值
        Bk = np.eye(self.problem.D)
        gk = self.FiniteDifference(pop)
        while self.not_terminal(pop):
            dk = -np.dot(np.linalg.inv(Bk), gk)
            for m in range(21):
                pop1 = pg.Population(decs=(pop.decs + self.beta ** m * dk.T))
                self.problem.cal_obj(pop1)
                if pop1.objv <= pop.objv + self.sigma * self.beta ** m * np.dot(gk.T, dk):
                    break
            gk1 = self.FiniteDifference(pop1)
            sk = (pop1.decs - pop.decs).T
            yk = gk1 - gk
            if np.dot(yk.T, sk) > 0:
                Bk = Bk - np.dot(np.dot(np.dot(Bk, sk), sk.T), Bk) / np.dot(np.dot(sk.T, Bk), sk) + np.dot(yk, yk.T) \
                     / np.dot(yk.T, sk)
            pop = pop1
            gk = gk1
        return pop

    def FiniteDifference(self, pop: pg.Population) -> np.ndarray:
        # Estimate the gradient of objective by finite difference
        pop1 = pg.Population(decs=pop.decs + np.eye(pop.decs.shape[1]) * 1e-4)
        self.problem.cal_obj(pop1)
        df = (pop1.objv - pop.objv) / 1e-4
        return df


if __name__ == '__main__':
    problem = pg.problems.Sphere()
    N = 1
    maxgen = 20
    Algorithm = pg.algorithms.BFGS(maxgen=maxgen, problem=problem)
    pop = Algorithm.go(N)
    print(pop.objv)
