#------------------------------------------------------------------------------+
#
#   Nathan A. Rooy
#   An implementation of differential evolution using estimated convergence point with Python
#   September, 2019
#
#------------------------------------------------------------------------------+

#--- IMPORT DEPENDENCIES ------------------------------------------------------+

import random
from EstimatedConvergencePoint import EstimatedConvergencePoint
import numpy as np

#--- EXAMPLE COST FUNCTIONS ---------------------------------------------------+

def func1(x):
    # Sphere function, use any bounds, f(0,...,0)=0
    return sum([x[i]**2 for i in range(len(x))])

def func2(x):
    # Beale's function, use bounds=[(-4.5, 4.5),(-4.5, 4.5)], f(3,0.5)=0.
    term1 = (1.500 - x[0] + x[0]*x[1])**2
    term2 = (2.250 - x[0] + x[0]*x[1]**2)**2
    term3 = (2.625 - x[0] + x[0]*x[1]**3)**2
    return term1 + term2 + term3

def rosenbrock(x):
    # F1
    x = np.asarray(x)
    f = sum(100.0 * (x[1:] - x[:-1] ** 2.0) ** 2.0 + (x[:-1] - 1) ** 2.0)
    return f

def sphere(args):
    # F3
    f = sum([np.power(x, 2.) for x in args])
    return f

def rastrigin(args):
    # F4
    f = 10 * len(args) + sum([np.power(x, 2.) - 10 * np.cos(2 * np.pi * x) for x in args])
    return f

def F1(x):
    """Shifted sphere."""

    bias = -450.0

    # offsets = [-39.3119, 58.8999, -46.3224, -74.6515, -16.7997, -80.5441,
    #            -10.5935, 24.9694, 89.8384, 9.1119, -10.7443, -27.8558, -12.5806,
    #            7.593, 74.8127, 68.4959, -53.4293, 78.8544, -68.5957, 63.7432,
    #            31.347, -37.5016, 33.8929, -88.8045, -78.7719, -66.4944, 44.1972,
    #            18.3836, 26.5212, 84.4723, 39.1769, -61.4863, -25.6038, -81.1829,
    #            58.6958, -30.8386, -72.6725, 89.9257, -15.1934, -4.3337, 5.343,
    #            10.5603, -77.7268, 52.0859, 40.3944, 88.3328, -55.8306, 1.3181,
    #            36.025, -69.9271, -8.6279, -56.8944, 85.1296, 17.6736, 6.1529,
    #            -17.6957, -58.9537, 30.3564, 15.9207, -18.0082, 80.6411,
    #            -42.3912, 76.2776, -50.1652, -73.5736, 28.3369, -57.9905,
    #            -22.7327, 52.0269, 39.2599, 10.8679, 77.8207, 66.0395, -50.0667,
    #            55.7063, 73.7141, 38.5296, -56.7865, -89.6477, 37.9576, 29.472,
    #            -35.4641, -31.7868, 77.3235, 54.7906, -48.2794, 74.2714, 72.6103,
    #            62.964, -14.1446, 20.4923, 46.5897, -83.6021, -46.4809, 83.7373,
    #            -79.6611, 24.3479, -17.2303, 72.3404, -36.4022]
    dim = len(x)
    su=0
    pro=1
    # z1=[]
    z1 = x
    # for i in range(0,30):
    #     z1.append(x[i]-offsets[i])
    for i in range(0, dim):
        su=su+z1[i]**2
    s=su+bias
    return s

def F2(x):
    bias = -450.0

    # offsets = [35.6267, -82.9123, -10.6423, -83.5815, 83.1552, 47.048, -89.4359,
    #            -27.4219, 76.1448, -39.0595, 48.8857, -3.9828, -71.9243, 64.1947,
    #            -47.7338, -5.9896, -26.2828, -59.1811, 14.6028, -85.478,
    #            -50.4901, 0.924, 32.3978, 30.2388, -85.0949, 60.1197, -36.2183,
    #            -8.5883, -5.1971, 81.5531, -23.4316, -25.3505, -41.2485, 8.8018,
    #            -24.2222, -87.9807, 78.0473, -48.0528, 14.0177, -36.6405,
    #            12.2168, 18.1449, -64.5647, -84.8493, -76.6088, -1.7042,
    #            -36.0761, 37.0336, 18.4431, -64.359, -39.3692, -17.714, 30.1985,
    #            -18.5483, 9.6866, 82.6009, -45.5256, 5.1443, 74.204, 66.8103,
    #            -63.4704, 13.0329, -5.6878, 29.5271, -0.4353, -26.1652, -6.6847,
    #            -80.2291, -29.5815, 82.0422, 77.177, -11.277, 32.0759, -2.6858,
    #            81.5096, 64.077, -26.1294, -84.782, -62.8768, -37.6355, 76.8916,
    #            53.417, -25.3311, -38.0702, -84.1738, -11.2246, -83.4619,
    #            -17.5508, -36.5285, 89.5528, 25.8794, 68.6252, 55.7968, -29.5975,
    #            -58.0976, 65.7413, -8.8703, -5.3281, 74.0661, 4.0338]
    dim = len(x)
    su=0
    su2=0
    pro=1
    z1=x
    # for i in range(0,30):
    #     z1.append(x[i]-offsets[i])
    for i in range(0,dim):
        su=0
        for j in range(i):
            su=su+z1[j]
        su2=su2+su**2
    s=su2+bias
    return s


#--- FUNCTIONS ----------------------------------------------------------------+


def ensure_bounds(vec, bounds):

    vec_new = []
    #  cycle through each variable in vector 
    for i in range(len(vec)):

        # variable exceedes the minimum boundary
        if vec[i] < bounds[i][0]:
            vec_new.append(bounds[i][0])

        # variable exceedes the maximum boundary
        if vec[i] > bounds[i][1]:
            vec_new.append(bounds[i][1])

        # the variable is fine
        if bounds[i][0] <= vec[i] <= bounds[i][1]:
            vec_new.append(vec[i])

    return vec_new


#--- MAIN ---------------------------------------------------------------------+

def main(cost_func, bounds, popsize, mutate, recombination, trial_runs):

    #--- INITIALIZE A POPULATION (step #1) ----------------+

    population = []
    for i in range(0, popsize):
        indv = []
        for j in range(len(bounds)):
            indv.append(random.uniform(bounds[j][0], bounds[j][1]))
        population.append(indv)

    #--- SOLVE --------------------------------------------+

    # cycle through each generation (step #2)
    trial_scores = []
    trial_sols = []
    for i in range(1, trial_runs+1):
         
        print("trial runs num:", i)
        NFC = 0
        while NFC < MAX_NFC:
            # print("GENERATION:", i)
            # print("the number of fitness evaluations:", NFC)
            #print(population)
            gen_scores = []  # score keeping
            last_population = population.copy()  # 保存上一代population
            best_trial_score = np.inf
    
            # cycle through each individual in the population
            for j in range(0, popsize):
    
                #--- MUTATION (step #3.A) ---------------------+
    
                # select three random vector index positions [0, popsize), not including current vector (j)
                canidates = list(range(0, popsize))
                canidates.remove(j)
                random_index = random.sample(canidates, 3)
    
                x_1 = population[random_index[0]]
                x_2 = population[random_index[1]]
                x_3 = population[random_index[2]]
                x_t = population[j]     # target individual
    
                # subtract x3 from x2, and create a new vector (x_diff)
                x_diff = [x_2_i - x_3_i for x_2_i, x_3_i in zip(x_2, x_3)]
    
                # multiply x_diff by the mutation factor (F) and add to x_1
                v_donor = [x_1_i + mutate * x_diff_i for x_1_i, x_diff_i in zip(x_1, x_diff)]
                v_donor = ensure_bounds(v_donor, bounds)
    
                #--- RECOMBINATION (step #3.B) ----------------+
    
                v_trial = []
                for k in range(len(x_t)):
                    crossover = random.random()
                    if crossover <= recombination:
                        v_trial.append(v_donor[k])
    
                    else:
                        v_trial.append(x_t[k])
    
                #--- GREEDY SELECTION (step #3.C) -------------+
    
                score_trial = cost_func(v_trial)
                score_target = cost_func(x_t)
                NFC += 2
    
                if score_trial < score_target:
                    population[j] = v_trial
                    gen_scores.append(score_trial)
                    # print('   >', score_trial, v_trial)
    
                else:
                    # print('   >', score_target, x_t)
                    gen_scores.append(score_target)
            
            #---Estimate convergence point-------------------+
            moving_vector = np.array(population) - np.array(last_population)
            convergencePoint = EstimatedConvergencePoint(np.array(last_population), moving_vector)
            if type(convergencePoint) is not int:
                score_convergencePoint = cost_func(convergencePoint)
                NFC += 1
                rank_convergencePoint = sum(score_convergencePoint > gen_scores)
                # print('       > convergence point rank:', rank_convergencePoint, '\n')
            
            
            #---replace the worst point in the population with the estimated convergence point----------------+
                if max(gen_scores) > score_convergencePoint:
                    idx_worst = np.argmax(gen_scores)
                    population[idx_worst] = list(np.squeeze(convergencePoint))
                    gen_scores[idx_worst] = score_convergencePoint
            
            
            #--- SCORE KEEPING --------------------------------+
            gen_avg = sum(gen_scores) / popsize                         # current generation avg. fitness
            gen_best = min(gen_scores)                                  # fitness of best individual
            # gen_sol = population[gen_scores.index(min(gen_scores))]   # solution of best individual
            # gen_sol = population[gen_scores.index(gen_best)]            # solution of best individual
            if gen_best < best_trial_score:
                best_trial_score = gen_best
                best_trial_sol = population[gen_scores.index(gen_best)]
        trial_scores.append(best_trial_score)
        trial_sols.append(best_trial_sol)
        
    trial_best = min(trial_scores)
    trial_avg = np.sum(trial_scores) / trial_runs
    trail_sol = trial_sols[trial_scores.index(trial_best)]
    print ('      > TRIAL AVERAGE:', trial_avg)
    print ('      > TRAIL BEST:', trial_best)
    print ('         > BEST SOLUTION:', trail_sol, '\n')

    return trail_sol

#--- CONSTANTS ----------------------------------------------------------------+

# cost_func = func1                   # Cost function
# bounds = [(-1, 1), (-1, 1)]         # Bounds [(x1_min, x1_max), (x2_min, x2_max),...]
# popsize = 10                        # Population size, must be >= 4
# mutate = 0.5                        # Mutation factor [0,2]
# recombination = 0.7                 # Recombination rate [0,1]
# maxiter = 20                        # Max number of generations (maxiter)

cost_func = F2                   # Cost function
dim = 30
bounds = [(-100, 100)] * dim        # Bounds [(x1_min, x1_max), (x2_min, x2_max),...]
popsize = 100                        # Population size, must be >= 4
mutate = 1                        # Mutation factor [0,2],对应原文F
recombination = 0.9                 # Recombination rate [0,1]
trial_runs = 30                        # Max number of trial runs (maxiter)
MAX_NFC = 200000

# dim = 2
# bounds = [(-100, 100)] * dim        # Bounds [(x1_min, x1_max), (x2_min, x2_max),...]
# popsize = 20                        # Population size, must be >= 4
# mutate = 1                        # Mutation factor [0,2],对应原文F
# recombination = 0.9                 # Recombination rate [0,1]
# trial_runs = 30                        # Max number of trial runs (maxiter)
# MAX_NFC = 2000

# dim = 10
# bounds = [(-100, 100)] * dim        # Bounds [(x1_min, x1_max), (x2_min, x2_max),...]
# popsize = 50                        # Population size, must be >= 4
# mutate = 1                        # Mutation factor [0,2],对应原文F
# recombination = 0.9                 # Recombination rate [0,1]
# trial_runs = 30                        # Max number of trial runs (maxiter)
# MAX_NFC = 50000
#--- RUN ----------------------------------------------------------------------+

main(cost_func, bounds, popsize, mutate, recombination, trial_runs)