package som;

import io.DataSet;

import java.util.ArrayList;
import java.util.List;
import java.util.Random;

public abstract class Network {

	protected static final double INITIAL_LEARNING_RATIO = 0.1;
	protected static final double TAU_LEARNING_RATIO = 500;
	protected static final double INITIAL_NEURON_DISPERSION = 5;
	protected int width;
	protected int height;
	protected int dimension;
	protected double initial_shrink_ratio;
	
	protected DataSet dataSet = null;
	protected List<List<Neuron>> inputLayer;
	protected Neuron vigilanceRadius;
	protected Random rand = new Random(1L);
	protected int[][] classification; //Final classification of the neurons.
	
	protected int[][] neuronCount; //Holds the number of times a neuron has been excited during training.
	
	public Network(int width, int height, int dimension) {
		this.width = width;
		this.height = height;
		this.dimension = dimension;
		initial_shrink_ratio = 0.2*Math.sqrt(Math.pow(this.width, 2) + Math.pow(this.height, 2));
		
		classification = new int[width][height];
		
		neuronCount = new int[width][height];
		
		inputLayer = new ArrayList<>(height);
		
		double[] init_pos = null;
		for(int q=0; q<height; q++) {
			List<Neuron> row = new ArrayList<Neuron>(width);
			for(int p=0; p<width; p++) {
				init_pos = new double[dimension];
				for(int i=0; i<dimension; i++) {
					init_pos[i] =  rand.nextDouble()*INITIAL_NEURON_DISPERSION;
				}
				row.add(new Neuron(dimension, init_pos));
			}
			inputLayer.add(row);
		}
		vigilanceRadius = new VigilanceNeuron(dimension, init_pos); //Position doesn't really matter for this type of Neuron, since the output is fixed.
	}
	
	public double euclideanDistance(int p1, int q1, int p2, int q2) {
		return Math.sqrt(Math.pow(p1-p2, 2) + Math.pow(q1-q2, 2));
	}
	
	public double learningRatio(int iteration) {
		return INITIAL_LEARNING_RATIO*Math.exp(-(iteration/TAU_LEARNING_RATIO));
	}
	
	public double shrinkRatio(int iteration) {
		return initial_shrink_ratio*Math.exp(-(iteration/(1000/Math.log(initial_shrink_ratio))));
	}

	public double neighborhood(int iteration, double distance) {
		double result = Math.exp(-(Math.pow(distance, 2)/(2*Math.pow(shrinkRatio(iteration), 2)))); 
		return result > 0.01 ? result : 0.0; //Cutoff.
	}
	
	public void train(int iteration, Double ... input) {
		if(input.length != dimension)
			throw new IllegalArgumentException("position array must have the same size as dimension");
		
		int p_winner = -1;
		int q_winner = -1;
		double u_winner = Double.NEGATIVE_INFINITY;
		
		//Step 1
		for(int q=0; q<height; q++)
			for(int p=0; p<width; p++) {
				Neuron n = inputLayer.get(q).get(p);
				Double u = n.excite(input);
				if(u > u_winner) {
					u_winner = u;
					p_winner = p;
					q_winner = q;
				}
			}
		
		neuronCount[p_winner][q_winner]++;
		
		//Step 2
		for(int q=0; q<height; q++)
			for(int p=0; p<width; p++) {
				double distance = euclideanDistance(p, q, p_winner, q_winner);
				double neighborhoodValue = neighborhood(iteration, distance);
				double learningRatio = learningRatio(neuronCount[p_winner][q_winner]);
				
				//Step 3
				Neuron n = inputLayer.get(q).get(p);
				List<Double> w_old = n.getSynapse();
				double[] w_new =  new double[dimension];
				for(int i=0; i<dimension; i++) {
					w_new[i] = w_old.get(i) + learningRatio*neighborhoodValue*(input[i] - w_old.get(i));
				}
				n.setSynapse(w_new);
			}
	}
	
	public void testInput(Double ... input) {
		if(input.length != dimension)
			throw new IllegalArgumentException("position array must have the same size as dimension");
		
		int p_winner = -1;
		int q_winner = -1;
		double u_winner = VigilanceNeuron.VIGILANCE_RADIUS;
		
		for(int q=0; q<height; q++)
			for(int p=0; p<width; p++) {
				Neuron n = inputLayer.get(q).get(p);
				double u = n.excite(input);
				if(u > u_winner) {
					u_winner = u;
					p_winner = p;
					q_winner = q;
				}
			}
		
		System.out.println("\nWinning neuron is: " + p_winner + " x " + q_winner + "\nWith u=" + u_winner + " (class: " + classification[p_winner][q_winner] + ")");
	}
	
	/**
	 * Does the training.
	 */
	public abstract void run();
	
	/**
	 * Does the classification, nominally.
	 */
	public abstract void runCategorization();
	
	/**
	 * Does the classification, continuously.
	 */
	public abstract void runContinuousCategorization();
	
	/**
	 * Asserts if the network gives the correct output for the entered row.
	 * @param row Row to be tested.
	 * @return true if correct, false otherwise.
	 */
	public abstract boolean assertCorrectness(int row);
	
	/**
	 * Asserts the error between the continuously classified neuron excited by the entered row, and the correct output.
	 * @param row Row to be tested.
	 * @return The error between row and the excited neuron.
	 */
	public abstract double assertError(int row);
	
	/**
	 * Prints the U-Matrix.
	 */
	public abstract void runUMatrix();
}
