import platgo as pg
import numpy as np


class sogo_Adam(pg.Algorithm):

    def __init__(self, maxgen: int, problem: pg.Problem, alpha: int = 1, beta1: float = 0.9,
                 beta2: float = 0.999) -> None:
        self.name = "sogo_Adam"
        self.type['single'], self.type['real'], self.type['large'] = [True] * 3
        self.gk = None
        self.alpha = alpha
        self.beta1 = beta1
        self.beta2 = beta2
        super().__init__(problem=problem, maxgen=maxgen)

    def go(self, N: int = None, population: pg.Population = None) -> pg.Population:
        assert N or population, "N and population can't be both None"
        if population is None:
            pop = self.problem.init_pop(N=1)
        else:
            pop = population[0]
            self.problem.N = pop.decs.shape[0]
        self.problem.cal_obj(pop)
        m0 = np.zeros((1, self.problem.D))
        v0 = np.zeros((1, self.problem.D))
        k = 1
        while self.not_terminal(pop):
            gk = self.FiniteDifference(pop).T
            m = self.beta1 * m0 + (1 - self.beta1) * gk
            v = self.beta2 * v0 + (1 - self.beta2) * gk ** 2
            pop = pg.Population(decs=pop.decs - self.alpha * (m / (1 - self.beta1 ** k)) / (np.sqrt(v / (1 - self.beta2 ** k)) + 1e-8))
            self.problem.cal_obj(pop)
            k = k + 1
        return pop

    def FiniteDifference(self, pop: pg.Population) -> np.ndarray:
        # Estimate the gradient of objective by finite difference
        pop1 = pg.Population(decs=pop.decs + np.eye(pop.decs.shape[1]) * 1e-4)
        self.problem.cal_obj(pop1)
        df = (pop1.objv - pop.objv) / 1e-4
        return df


if __name__ == '__main__':
    problem = pg.problems.Sphere()
    N = 1
    maxgen = 200
    Algorithm = pg.algorithms.sogo_Adam(maxgen=maxgen, problem=problem)
    pop = Algorithm.go(N)
    print(pop.objv)
