import numpy as np
import random
import copy
from cec17_functions import cec17_test_func
import sys


particle_size = eval(sys.argv[1])  # the swarm size obtained from keyboard
particle_dim = eval(sys.argv[2])  # the dimension of the solution obtained from keyboard
beta = 0.5  # the parameter beta in stochastic cognitive dominance learning
maxFEs = 10000 * particle_dim  # maximum number of function evaluations
x_min = -100.0  # the lower bound of feasible solutions
x_max = 100.0   # the upper bound of feasible solutions
a = 0.9  # the upper bound of inertia weight w
b = 0.4  # the lower bound of inertia weight w


def init_Population():
    global g_fitness, gbest, pbest, position, speed, fes, position_fitness
    # initialize position and speed
    position = np.random.uniform(x_min, x_max, (particle_size, particle_dim))
    speed = np.zeros((particle_size, particle_dim))
    pbest = copy.deepcopy(position)
    for i in range(particle_size):
        # get a temporary best solution
        temporary_best = cec17_test_func(x=position[i], f=[0], nx=particle_dim, mx=1, func_num=fn) - fn * 100
        p_fitness[i] = temporary_best  # record fitness of pbest
        position_fitness[i] = temporary_best  # record fitness of position
        if temporary_best < g_fitness:
            g_fitness = temporary_best
            gbest = copy.deepcopy(position[i])
    file.write("{:.2e}\t".format(g_fitness))
    fes += particle_size


def update_position():
    global g_fitness, gbest, pbest, position, speed
    global fes, position_fitness, num_strategy1, num_strategy2, num_strategy3
    while fes < maxFEs:
        # linear decay of inertia weight
        w = a - (a - b) * fes / maxFEs
        for i in range(particle_size):
            choice_list = [x for x in range(0, particle_size)]
            choice_list.remove(i)
            choice = random.sample(choice_list, 2)  # generate 2 different numbers
            # random selected 2 different particles as the learning samples
            pick1 = choice[0]
            pick2 = choice[1]
            # calculate the fitness of the selected samples
            f_px1 = p_fitness[pick1]
            f_px2 = p_fitness[pick2]
            # set the better one as px1，another is px2
            if f_px1 < f_px2:
                px1 = pbest[pick1]
                px2 = pbest[pick2]
            else:
                px1 = pbest[pick2]
                px2 = pbest[pick1]
                f_px1, f_px2 = f_px2, f_px1
            f_pbi = p_fitness[i]  # get the current particle's historical best fitness
            # perform stochastic cognitive dominance learning
            if f_px2 <= f_pbi:
                speed[i] = w * speed[i] + beta * (np.random.random(particle_dim) * (px1-position[i]) +
                                                  np.random.random(particle_dim) * (px2-position[i]))
            elif f_px1 <= f_pbi:
                speed[i] = w * speed[i] + beta * (np.random.random(particle_dim) * (px1 - position[i]) +
                                                  np.random.random(particle_dim) * (pbest[i] - position[i]))
            else:
                pass
            position[i] = copy.deepcopy(position[i] + speed[i])
            # handle the problem of crossing boundaries
            position[position > x_max] = x_max
            position[position < x_min] = x_min
            temporary_best = cec17_test_func(x=position[i], f=[0], nx=particle_dim, mx=1, func_num=fn) - fn *100
            position_fitness[i] = temporary_best
            if temporary_best < p_fitness[i]: # update pbest
                p_fitness[i] = temporary_best
                pbest[i] = copy.deepcopy(position[i])
            if p_fitness[i] < g_fitness:  # update gbest
                gbest = copy.deepcopy(position[i])
                g_fitness = copy.deepcopy(p_fitness[i])
            fes += 1
            if fes % 5000 == 0:  # The fitness value is recorded every 5000 generations
                file.write("{:.2e}\t".format(g_fitness))


for fn in range(1, 31):  # the test function serial number is 1-30
    if fn == 2:  # function 2 has been removed from cec2017
        continue
    for run in range(30):  # 30 independent experiment
        fes = 0  # current number of function evaluations
        pbest = np.zeros((particle_size, particle_dim))  # historical best position
        gbest = np.zeros((1, particle_dim))  # global best position
        p_fitness = np.zeros(particle_size)  # historical best fitness of each particle
        g_fitness = float('inf')  # set initial global best fitness
        position_fitness = np.zeros(particle_size)  # fitness of the current particles
        file = open("SCDLPSO/f{} NP{} dim{}.txt".format(fn, particle_size, particle_dim), "a")
        init_Population()
        update_position()
        file.write("\n")
        file.close()
        print(g_fitness)