package tp2;

import org.encog.Encog;
import org.encog.engine.network.activation.ActivationFunction;
import org.encog.engine.network.activation.ActivationLOG;
import org.encog.ml.data.MLData;
import org.encog.ml.data.MLDataPair;
import org.encog.ml.data.MLDataSet;
import org.encog.ml.data.basic.BasicMLData;
import org.encog.ml.data.basic.BasicMLDataSet;
import org.encog.neural.networks.BasicNetwork;
import org.encog.neural.networks.layers.BasicLayer;
import org.encog.neural.networks.training.propagation.resilient.ResilientPropagation;

public class Red {
	
	/* Estos son los que deberíamos cambiar para ir probando */
	private static final double MARGEN_ERROR = 2;
	private static final int CAPAS_INTERMEDIAS = 1;
	private static final int NODOS_POR_CAPA = 3;
	private static final int LIMITE_DE_ITERACIONES = 10000;
	private static final ActivationFunction FUNCION = new ActivationLOG();
	
	/* Estos nos quedan fijos para el problema que queremos resolver */
	private static final int NODOS_CAPA_ENTRADA = 2;
	private static final int NODOS_CAPA_SALIDA = 1;
	
	// para que sea singleton
	static private Red _instance = null;
	
	// para que sea singleton
	private BasicNetwork red = null;
	
	static public Red instance() {
		
		if(null == _instance) {
			_instance = new Red();
		}
		
		return _instance;
	}
	
	// Se crea y entrena la red
	private Red() {
		
		// create a neural network, without using a factory
		red = new BasicNetwork();
		
		// entrada
		red.addLayer(new BasicLayer(null, true, NODOS_CAPA_ENTRADA));
		
		// nodos intermedios
		for (int i=0; i < CAPAS_INTERMEDIAS; i++) {
			red.addLayer(new BasicLayer(FUNCION, true, NODOS_POR_CAPA));
		}
		
		// salida
		red.addLayer(new BasicLayer(null, false, NODOS_CAPA_SALIDA));
		
		red.getStructure().finalizeStructure();
		
		red.reset();
		
		// create training data
		MLDataSet trainingSet = new BasicMLDataSet(DatosDePartidos.INPUT, DatosDePartidos.IDEAL);
		
		// train the neural network
		final ResilientPropagation train = new ResilientPropagation(red, trainingSet);

		int epoch = 1;

		do {
			train.iteration();
			
			System.out.println("Epoch #" + epoch + " Error:" + train.getError());
			
			epoch++;
			
		} while(train.getError() > MARGEN_ERROR && epoch < LIMITE_DE_ITERACIONES);
		
		train.finishTraining();
		
		// test the neural network
		System.out.println("Neural Network Results:");
		
		String acerto = "";
		
		for(MLDataPair pair: trainingSet ) {
			
			final MLData output = red.compute(pair.getInput());
			
			double idAcertado = Math.abs(output.getData(0)-pair.getInput().getData(0)) <= Math.abs(output.getData(0)-pair.getInput().getData(1))? pair.getInput().getData(0):pair.getInput().getData(1);
			
			acerto = (idAcertado == pair.getIdeal().getData(0))? "	OK" : "	MAL";
			
			System.out.println(pair.getInput().getData(0)
								+ "	" + pair.getInput().getData(1)
								+ "	actual=" + output.getData(0)
								+ "	ideal=" + pair.getIdeal().getData(0)
								+ "	Acerto:	" + idAcertado + "	" + acerto);

		}
		
		Encog.getInstance().shutdown();
		
	}
	
	public double getGanador(double idJug1, double idJug2) {
		
		double jugadores[] = { idJug1, idJug2};
		
		MLData dataJugadores = new BasicMLData(jugadores);
		
		double ganadorId = red.compute(dataJugadores).getData(0);
		
		System.out.println(idJug1 + " --- " + idJug2 + " -> " + ganadorId);
		
		return ganadorId;
	}

}
