/*
Federal University of Pernambuco - UFPE
Center of Informatics (Cin)

University of Pernambuco - UPE
Engenharia da Computa��o - Ecomp

This code was created in order to study the scalability
of the Multiobjective Evolutionary Algorithms in
problems with many conflicting objectives


 */

package hidra.metaheuristics.moapsocdr;

import hidra.core.population.HIDRAPopulationAlgorithm;
import hidra.jmetal.core.Operator;
import hidra.jmetal.core.Problem;
import hidra.jmetal.core.Solution;
import hidra.jmetal.core.SolutionSet;
import hidra.jmetal.core.Variable;
import hidra.metaheuristics.mopsocdr.util.ParetoFrontUtil;
import hidra.metaheuristics.mopsocdr.util.RoletteWheelSelection;
import hidra.metaheuristics.mopsocdr.util.TurbulenceMutation;

import java.util.Arrays;
import java.util.Comparator;
import java.util.HashMap;

import jmetal.util.Distance;
import jmetal.util.JMException;
import jmetal.util.PseudoRandom;
import jmetal.util.archive.CrowdingArchive;
import jmetal.util.comparators.CrowdingDistanceComparator;
import jmetal.util.comparators.DominanceComparator;
import jmetal.util.wrapper.XReal;

/**
 * @author Elliackin Figueiredo
 * @email  emnf@cin.ufpe.br
 */
public class MOAPSOCDR extends HIDRAPopulationAlgorithm{

	private enum State {
		CONVERGE,
		PROFUNDIDADE,
		AMPLITUDE,
		ESCAPE;
	}

	private enum Simulation {
		ADAPTIVE,
		STANDARD;
	}

	private static final double CMAX = 2.5;
	private static final double CMIN = 1.5;

	private int swarmSize_;

	private int archiveSize_;

	private SolutionSet particles_;

	private Solution[] best_;

	private CrowdingArchive leaders_;

	private double[][] speed_;

	private Comparator dominance_;

	private Comparator crowdingDistanceComparator_;

	private Distance distance_;

	private Operator turbulence_;	

	private RoletteWheelSelection rollete;

	private double C1_;
	private double C2_;
	private double wMax_;
	private double wMin_;
	private double EFactor_;	

	private static final double MUTATION_PERCENTUAL_MOPSO = 0.5;

	public MOAPSOCDR(Problem problem) {                
		super (problem);		
	}

	public void initParams() {

		swarmSize_ = ((Integer) getInputParameter("swarmSize")).intValue();
		archiveSize_ = ((Integer) getInputParameter("archiveSize")).intValue();
		maxIterations_ = ((Integer) getInputParameter("maxIterations")).intValue();

		this.rollete = new RoletteWheelSelection();

		iteration_ = 0 ;

		particles_ = new SolutionSet(swarmSize_);
		best_ = new Solution[swarmSize_];
		leaders_ = new CrowdingArchive(archiveSize_, problem_.getNumberOfObjectives());

		// Create comparators for dominance and crowding distance
		dominance_ = new DominanceComparator();
		crowdingDistanceComparator_ = new CrowdingDistanceComparator();
		distance_ = new Distance();
		//distance = new UtilDistance();

		// Create the speed vector
		speed_ = new double[swarmSize_][problem_.getNumberOfVariables()];

		HashMap<String, Object> parameters = new HashMap<String, Object>();
		parameters.put("maxIterations",maxIterations_);
		parameters.put("percMutation", MUTATION_PERCENTUAL_MOPSO);

		this.turbulence_ = new TurbulenceMutation(parameters);
		this.C1_ = 1.49445;
		this.C2_ = 1.49445;
		this.wMax_ = 0.4;
		this.wMin_ = 0.0;
	}

	@Override
	protected SolutionSet getParetoFront() {
		// TODO Auto-generated method stub
		return this.leaders_;
	}

	@Override
	protected SolutionSet initializationAlgorithm()
			throws ClassNotFoundException, JMException {

		initParams();

		//->Step 1 (and 3) Create the initial population and evaluate
		for (int i = 0; i < swarmSize_; i++){
			Solution particle = new Solution(problem_);
			problem_.evaluate(particle);
			problem_.evaluateConstraints(particle);
			particles_.add(particle);                   
		}

		//-> Step2. Initialize the speed_ of each particle to 0
		for (int i = 0; i < swarmSize_; i++) {
			for (int j = 0; j < problem_.getNumberOfVariables(); j++) {
				speed_[i][j] = 0.0;
			}
		}

		// Step4 and 5   
		for (int i = 0; i < particles_.size(); i++) {
			Solution particle = new Solution(particles_.get(i));
			leaders_.add(particle);
		}

		//-> Step 6. Initialize the memory of each particle
		for (int i = 0; i < particles_.size(); i++) {
			Solution particle = new Solution(particles_.get(i));
			best_[i] = particle;
		}

		//Crowding the leaders_
		ParetoFrontUtil.crowdingDistanceAssignmentToMOPSOCDR(leaders_, problem_.getNumberOfObjectives());

		return this.leaders_;

	}

	@Override
	protected SolutionSet runIteration() throws JMException {

		//adaptive behavior
		performAdaptiveBehavior();

		computeSpeed();

		System.out.println(iteration_);

		//Compute the new positions for the particles_            
		computeNewPositions();

		//Mutate the particles_          
		mopsoMutation();

		//Evaluate the new particles_ in new positions
		for (int i = 0; i < particles_.size(); i++) {
			Solution particle = particles_.get(i);
			problem_.evaluate(particle);
			problem_.evaluateConstraints(particle);
		}

		//Actualize the archive          
		for (int i = 0; i < particles_.size(); i++) {
			Solution particle = new Solution(particles_.get(i));
			leaders_.add(particle);
		}

		//Assign crowding distance to the leaders_
		ParetoFrontUtil.crowdingDistanceAssignmentToMOPSOCDR(leaders_,
				problem_.getNumberOfObjectives());

		iteration_++;

		//Update Particles of the Memory
		for (int i = 0; i < particles_.size(); i++) {

			int flag = dominance_.compare(particles_.get(i), best_[i]);
			if (flag == -1) { // the new particle is best than the older pBest        
				Solution particle = new Solution(particles_.get(i));
				best_[i] = particle;
			}else if(flag == 0){

				Solution solNearParticle = 
						ParetoFrontUtil.getNearSolution(this.leaders_, particles_.get(i));

				Solution solNearPBest = 
						ParetoFrontUtil.getNearSolution(this.leaders_, best_[i]);


				if(solNearParticle.getCrowdingDistance() > solNearPBest.getCrowdingDistance()){
					best_[i] =new Solution(particles_.get(i));
				}
			}
		}

		return this.leaders_;
	}

	// Adaptive inertia 
	private double inertiaWeight(Simulation tipo) {
		double w = 0;
		
		if (tipo.equals(Simulation.ADAPTIVE)) {
			double eFactor = this.EFactor_;
			w = 1 / ( 1 + 1.5 * (Math.pow( Math.E, -2.6*eFactor) ) );
		} else {
			w = wMax_ - (((wMax_-wMin_)*(double)iteration_)/(double)maxIterations_);
			if ( w < 0.0 ) {
				w = 0.0;          
			} 
		}
		
		return w;
	} // inertiaWeight

	private void computeSpeed() throws JMException{
		double r1, r2;
		double wmax, wmin, W;
		XReal bestGlobal;

		wmax = wMax_;
		wmin = wMin_;

		W = inertiaWeight(Simulation.ADAPTIVE);

		double summatory = 0.0;


		SolutionSet front = new SolutionSet(leaders_.size());
		for (int i = 0; i < leaders_.size(); i++){
			front.add(leaders_.get(i));
		}

		front.sort(crowdingDistanceComparator_);

		for(int i=0; i < front.size() ;i++){
			summatory = summatory + front.get(i).getCrowdingDistance();
		}

		for (int i = 0; i < swarmSize_; i++) {
			XReal particle = new XReal(particles_.get(i)) ;
			XReal bestParticle = new XReal(best_[i]) ;

			bestGlobal = new XReal(rollete.execute(front, summatory));

			for (int var = 0; var < particle.getNumberOfDecisionVariables(); var++) {

				// generate stochastic components for each dimension
				r1 = PseudoRandom.randDouble(0.0, 1.0);
				r2 = PseudoRandom.randDouble(0.0, 1.0);


				//Computing the velocity of this particle 
				speed_[i][var] =  (W *
						speed_[i][var] +
						C1_ * r1 * (bestParticle.getValue(var) -
								particle.getValue(var)) +
								C2_ * r2 * (bestGlobal.getValue(var) -
										particle.getValue(var)));

				double vmax = particle.getUpperBound(var) - particle.getLowerBound(var);

				speed_[i][var] = 
						Math.signum(speed_[i][var]) * 
						Math.min(Math.abs(speed_[i][var]),vmax);




			}
		}

	} // computeSpeed

	/**
	 * Apply a mutation operator to some particles in the swarm
	 * @throws JMException 
	 */
	private void mopsoMutation() throws JMException {

		turbulence_.setParameter("currentIteration", iteration_);

		for (int i = 0; i < particles_.size(); i++) {
			turbulence_.execute(particles_.get(i));
		}

	} // mopsoMutation

	/**
	 * Update the position of each particle
	 * @throws JMException 
	 */
	private void computeNewPositions() throws JMException{
		for (int i = 0; i < swarmSize_; i++){
			Variable[] particle = particles_.get(i).getDecisionVariables();
			//particle.move(speed_[i]);
			for (int var = 0; var < particle.length; var++){
				particle[var].setValue(particle[var].getValue()+ speed_[i][var]);
				if (particle[var].getValue() < problem_.getLowerLimit(var)){
					particle[var].setValue(problem_.getLowerLimit(var));                    
					speed_[i][var] = speed_[i][var] * -1.0;    
				}
				if (particle[var].getValue() > problem_.getUpperLimit(var)){
					particle[var].setValue(problem_.getUpperLimit(var));                    
					speed_[i][var] = speed_[i][var] * -1.0;    
				}                                             
			}
		}
	} // computeNewPositions

	// convergence pertinence function
	private double EConverge (double evolFactor) {
		double retValue = 0;
		if (evolFactor >= 0.0 && evolFactor <= 0.1) {
			retValue = 1.0;
		} else if (evolFactor > 0.1 && evolFactor <= 0.3) {
			retValue = -5 * evolFactor + 1.5;
		}
		return retValue;
	}

	// exploitation pertinence function
	private double EProfundidade(double evolFactor) {
		double retValue = 0;
		if (evolFactor > 0.2 && evolFactor <= 0.3) {
			retValue = 10 * evolFactor - 2.0;
		} else if (evolFactor > 0.3 && evolFactor <= 0.4) {
			retValue = 1.0;
		} else if (evolFactor > 0.4 && evolFactor <= 0.6) {
			retValue = -5 * evolFactor + 3.0;
		}
		return retValue;
	}

	// exploration pertinence function
	private double EAmplitude (double evolFactor) {
		double retValue = 0;
		if (evolFactor > 0.4 && evolFactor <= 0.6) {
			retValue = 5 * evolFactor - 2.0;
		} else if (evolFactor > 0.6 && evolFactor <= 0.7) {
			retValue = 1.0;
		} else if (evolFactor > 0.7 && evolFactor <= 0.8) {
			retValue = -10 * evolFactor + 8;
		}
		return retValue;
	}

	// escaping pertinence function
	private double EEscape (double evolFactor) {
		double retValue = 0;
		if (evolFactor > 0.7 && evolFactor <= 0.9) {
			retValue = 5 * evolFactor - 3.5;
		} else if (evolFactor > 0.9 && evolFactor <= 1.0) {
			retValue = 1.0;
		}
		return retValue;
	}

	// selecting swarm state according to evolutionary factor
	private State selectState (double evolFactor) {

		double[] states = new double[4]; 

		double convVal = EConverge(evolFactor);
		double amplVal = EAmplitude(evolFactor);
		double profVal = EProfundidade(evolFactor);
		double escVal = EEscape(evolFactor);

		states[0] = convVal;
		states[1] = amplVal;
		states[2] = profVal;
		states[3] = escVal;

		// finding max value in _states
		Arrays.sort(states);
		double higherVal = states[3];

		// checking the swarm state
		if (higherVal == convVal) {
			return State.CONVERGE;
		} else if (higherVal == amplVal) {
			return State.AMPLITUDE;
		} else if (higherVal == profVal) {
			return State.PROFUNDIDADE;
		} else {
			return State.ESCAPE;
		}
	}

	// incrementing cFactor (times 0.5)
	private void incFactorC1() {
		this.C1_ += PseudoRandom.randDouble(0.05, 0.01);
	}

	// incrementing cFactor (times 0.5)
	private void incFactorC2() {
		this.C2_ += PseudoRandom.randDouble(0.05, 0.01);
	} 

	// decrementing cFactor
	private void decFactorC1() {
		this.C1_ -= PseudoRandom.randDouble(0.05, 0.01);
	}

	// decrementing cFactor
	private void decFactorC2() {
		this.C2_ -= PseudoRandom.randDouble(0.05, 0.01);
	}

	// incrementing cFactor (times 0.5)
	private void incLevFactorC1() {
		this.C1_ += 0.5 * PseudoRandom.randDouble(0.05, 0.01);
	}

	// incrementing cFactor (times 0.5)
	private void incLevFactorC2() {
		this.C2_ += 0.5 * PseudoRandom.randDouble(0.05, 0.01);
	}

	// decrementing cFactor
	private void decLevFactorC2() {
		this.C2_ -= 0.5 * PseudoRandom.randDouble(0.05, 0.01);
	}

	// updating c1 and c2 values according to evolutionary factor
	private void updateFactor (State state) throws JMException {
		if (state.equals(State.AMPLITUDE)) {
			incFactorC1();
			decFactorC2();
		} else if (state.equals(State.PROFUNDIDADE)) {
			incLevFactorC1();
			decLevFactorC2();
		} else if (state.equals(State.CONVERGE)) {
			incLevFactorC1();
			incLevFactorC2();
		} else if (state.equals(State.ESCAPE)){
			decFactorC1();
			incFactorC2();
		} else {
			// do nothing. probably will never happen
		}
	}

	// checks (and updates if necessary) the values of the acceleration
	// coefficients
	private void checkAccBounds (){
		// checking C1 boundaries
		if (this.C1_ > CMAX) {
			this.C1_ = CMAX;
		}
		if (this.C1_ < CMIN) {
			this.C1_ = CMIN;
		}

		// checking C2 boundaries
		if (this.C2_ > CMAX) {
			this.C2_ = CMAX;
		}
		if (this.C2_ < CMIN) {
			this.C2_ = CMIN;
		}
	}

	private void normAccCoefficients() {
		// if the sum of C1 and C2 is higher than the sum of C1MAX e C2MAX
		// a acceleration coefficients normalization must be performed
		if ((this.C1_ + this.C2_) > (CMAX + CMIN)) {
			// normalizing C1
			this.C1_ = ( this.C1_ * ( CMAX + CMIN ) ) / (this.C1_ + this.C2_);
			// normalizing C2			
			this.C2_ = ( this.C2_ * ( CMAX + CMIN ) ) / (this.C1_ + this.C2_);
		}
	}

	// calculating the average pareto front distance
	private double[] averageDistanceEA () throws JMException {
		int size = leaders_.size();
		double ret[] = new double[size];
		double sqrd_value = 0;
		double distance = 0;
		for (int i = 0; i < size; i++) {
			distance = 0;
			for (int j = 0; j < size && i != j; j++) {
				Variable[] solSource = leaders_.get(i).getDecisionVariables();
				Variable[] solDestination = leaders_.get(j).getDecisionVariables();

				sqrd_value = 0;
				for (int k = 0; k < solSource.length; k++) {
					sqrd_value += Math.pow(solSource[k].getValue() - solDestination[k].getValue(), 2); 
				}
				distance += Math.sqrt(sqrd_value);
			}
			ret[i] =  distance / (size - 1);
		}

		return ret;
	}

	// finding min value within distances
	private double getMinDistance (double[] distances) {
		double[] _distances = distances.clone();
		Arrays.sort(_distances);
		return _distances[0];
	}

	// finding max value within distances
	private double getMaxDistance (double[] distances) {
		double[] _distances = distances.clone();
		Arrays.sort(_distances);
		int size = _distances.length;
		return _distances[size - 1];
	}

	// calculating the average distance from the best particle to the others
	private double bestDistanceEA() throws JMException {
		Variable[] bestSolution = null;
		int index = 0;

		double summatoryDF = 0.0;
		SolutionSet front = new SolutionSet(leaders_.size());
		for (int i = 0; i < leaders_.size(); i++){
			front.add(leaders_.get(i));
		}

		front.sort(crowdingDistanceComparator_);
		for (int i=0; i < front.size() ;i++) {
			summatoryDF = summatoryDF + front.get(i).getDiversityFactor();
		}

		// retrieving index of best solution, found using roulette wheel
		// on the pareto front sorted according to its diversity factor
		index = rollete.retriveBestIndex(front, summatoryDF);
		bestSolution =  leaders_.get(index).getDecisionVariables();

		double dist_ret = 0;
		double dist = 0;
		double aux = 0;
		for (int i = 0; i < leaders_.size() && i != index; i++) {
			Variable[] sol = leaders_.get(i).getDecisionVariables();

			aux = 0;
			for (int dim = 0; dim < sol.length; dim++) {
				aux += Math.pow(bestSolution[dim].getValue() - sol[dim].getValue() , 2);
			}
			dist += Math.sqrt(aux);
		}

		dist_ret = dist / (leaders_.size() - 1);

		return dist_ret;
	}

	// calculating evolutionary factor
	private double calculateEFactorEA() throws JMException {
		double EFactor; 
		double min = 0;
		double max = 0;
		double bestDistance = 0;
		double[] distances;

		// calculating average distance within the external archive
		distances = averageDistanceEA();

		// finding min and max distances
		min = getMinDistance(distances);
		max = getMaxDistance(distances);
		bestDistance = bestDistanceEA();

		// performing calculation of evolutionary factor
		EFactor = (bestDistance - min) / (max - min);
		updateGlobalEFactor(EFactor);

		return EFactor;
	}

	private void updateGlobalEFactor(double value) {
		this.EFactor_ = value;
	}

	private void performAdaptiveBehavior() throws JMException{
		//calculate evolutionary factor
		double EvolFactor = calculateEFactorEA();
		State estadoEnxame = selectState(EvolFactor);
		updateFactor(estadoEnxame);
		checkAccBounds();
		normAccCoefficients();
	}

}
