package org.hit.burkun.swalk.simulation;

import java.util.ArrayList;

import org.apache.commons.math3.optim.PointValuePair;
import org.hit.burkun.file.FileHelper;

import cern.colt.matrix.tdouble.DoubleMatrix1D;
import cern.colt.matrix.tdouble.impl.DenseDoubleMatrix1D;
import edu.stanford.nlp.optimization.DiffFunction;
import edu.stanford.nlp.optimization.QNMinimizer;

/**
 * 
 * The main class where everything is being initialized and where optimization
 * is being done
 * 
 */
public class CopyOfLinkpredictionOptimization {

	static int n = 1000; // number of nodes per graph
	static int f1 = 2; // number of features for the first graph

	static int s = 0; // the node whose links we learn, in this case 0 for each
						// graph
	static double alpha = 0.2; // we use no damping factor within the multiplex,
								// interlayer jump instead
	static double b = 1;// e-6; // WMW function parameter
	static double lambda = 1; // regularization parameter
	static double learningRate = 1; // learningRate

	static double[] param = {1, -1}; // parameters vector 2 for
													// the first graph, 3 for
													// the second
	static DoubleMatrix1D parameters = new DenseDoubleMatrix1D(param);

	static int topN = 20;
	static SimpleLapcNetwork lgraph;
	static Network mgrpah;


	/**
	 * @param args
	 */
	public static void main(String[] args) {
		int i=100;
		ArrayList<String> res = new ArrayList<>();
		while (i > 0) {
			ArtificialGraphGenerator.initialize(f1);
			mgrpah = (Network) ArtificialGraphGenerator.generate(n, f1, s);
			ArtificialGraphGenerator
					.buildDandL(mgrpah, topN, parameters, alpha);
			lgraph = new SimpleLapcNetwork(mgrpah);
			ArtificialGraphGenerator.buildDandL(lgraph, topN, parameters, alpha);
			lapc();
			nolapc();
			res.add(nolapc() + "$" + lapc());
			i--;
			FileHelper.writeFile("data/cmp100simple.txt", res);
		}
		for(i=0;i<res.size();i++){
			System.out.println(res.get(i));
		}

	}

	private static double nolapc() {
		QNMinimizer qn = new QNMinimizer(/* 15, true */);
		// qn.shutUp();
		qn.useMinPackSearch();
		qn.terminateOnAverageImprovement(false);
		double convergenceTolerance = 1e-10;
		double[] initialGuess = new double[f1];
		int maxFunctionEvaluations = 25;
		LinkPredictionTrainer lp = new LinkPredictionTrainer(
				new RandomWalkGraph[] { mgrpah }, f1, alpha, lambda, b,
				learningRate);
		DiffFunction dfunction = new OptimizationFunction(lp);
		int restarts = 3;
		double[] optimum = null;
		double[] currentOptimum;
		double optimalValue = Double.MAX_VALUE;
	
		GradientDescent gd = new GradientDescent(lp, 50, 1e-6, 1);
		PointValuePair opt = null;
	
		// OPTIMIZATION START
		long start = System.nanoTime();
	
		while (restarts-- > 0 && optimalValue > 6) {
			for (int i = 0; i < initialGuess.length; i++)
				initialGuess[i] = Math.random() * 2 - 1;
	
			lp.setB(1);
			lp.setLearningRate(1);
			currentOptimum = qn.minimize(dfunction, convergenceTolerance,
					initialGuess, maxFunctionEvaluations);
			lp.setB(1e-3);
			lp.setLearningRate(0.003);
			try {
				opt = gd.optimize(currentOptimum);
			} catch (InterruptedException e) {
				e.printStackTrace();
			}
	
			if (opt.getSecond() < optimalValue) {
				optimalValue = opt.getSecond();
				optimum = opt.getFirst();
			}
			if (qn.wasSuccessful()) {
				break;
			}
		}
	
		long end = System.nanoTime();
		// OPTIMIZATION END
	
		System.out.println();
		System.out.println("\nResults in in " + (end - start) / 60E9
				+ " minutes.");
		System.out.println("Cost: " + optimalValue);
	
		System.out.println();
		System.out.println("TRUE PARAMETERS:");
		for (int i = 0; i < param.length; i++)
			System.out.print(param[i] + " ");
		System.out.println();
		System.out.println("PREDICTED PARAMETERS");
		for (int i = 0; i < optimum.length; i++)
			System.out.print(optimum[i] + " ");
	
		System.out.println();
	
		double error = 0;
		for (int i = 0; i < param.length; i++) {
			error += Math.abs(param[i] - optimum[i]);
		}
		return error;
	}

	private static double lapc() {
		QNMinimizer qn = new QNMinimizer(/* 15, true */);
		// qn.shutUp();
		qn.useMinPackSearch();
		qn.terminateOnAverageImprovement(false);
		double convergenceTolerance = 1e-10;
		double[] initialGuess = new double[f1];
		int maxFunctionEvaluations = 25;
		LinkPredictionTrainer lp = new LinkPredictionTrainer(
				new RandomWalkGraph[] { lgraph }, f1, alpha, lambda, b,
				learningRate);
		DiffFunction dfunction = new OptimizationFunction(lp);
		int restarts = 3;
		double[] optimum = null;
		double[] currentOptimum;
		double optimalValue = Double.MAX_VALUE;

		GradientDescent gd = new GradientDescent(lp, 50, 1e-6, 1);
		PointValuePair opt = null;

		// OPTIMIZATION START
		long start = System.nanoTime();

		while (restarts-- > 0 && optimalValue > 6) {
			for (int i = 0; i < initialGuess.length; i++)
				initialGuess[i] = Math.random() * 2 - 1;

			lp.setB(1);
			lp.setLearningRate(1);
			currentOptimum = qn.minimize(dfunction, convergenceTolerance,
					initialGuess, maxFunctionEvaluations);
			lp.setB(1e-3);
			lp.setLearningRate(0.003);
			try {
				opt = gd.optimize(currentOptimum);
			} catch (InterruptedException e) {
				e.printStackTrace();
			}

			if (opt.getSecond() < optimalValue) {
				optimalValue = opt.getSecond();
				optimum = opt.getFirst();
			}
			if (qn.wasSuccessful()) {
				break;
			}
		}

		long end = System.nanoTime();
		// OPTIMIZATION END

		System.out.println();
		System.out.println("\nResults in in " + (end - start) / 60E9
				+ " minutes.");
		System.out.println("Cost: " + optimalValue);

		System.out.println();
		System.out.println("TRUE PARAMETERS:");
		for (int i = 0; i < param.length; i++)
			System.out.print(param[i] + " ");
		System.out.println();
		System.out.println("PREDICTED PARAMETERS");
		for (int i = 0; i < optimum.length; i++)
			System.out.print(optimum[i] + " ");
		System.out.println();

		double error = 0;
		for (int i = 0; i < param.length; i++) {
			error += Math.abs(param[i] - optimum[i]);
		}
		return error;
	}
}