﻿using System;
using System.Collections.Generic;

namespace TLD_NN {
    public class SOM {
        private int inputCount, outputCount;
        public double learningFactor { get; private set; }
        public Neuron[] outputNodes { get; private set; }

        private const double LEARNING_RATE_REDUCTION = 0.9995;
        private const double LEARNING_FACTOR = 0.55;

        /// <summary>
        /// Creates an untrained self-organizing map with the specified number of input and output neurons.
        /// </summary>
        /// <param name="inputCount">The amount of input neurons.</param>
        /// <param name="outputCount">The amount of output neurons.</param>
        public SOM(int inputCount, int outputCount) {
            this.inputCount = inputCount;
            this.outputCount = outputCount;
            outputNodes = new Neuron[outputCount];
        }

        /// <summary>
        /// Intializes the entire network, setting all weights to a random value between -0.5 and 0.5.
        /// </summary>
        public void InitializeNetwork() {
            Random rand = new Random();
            for (int i = 0; i < outputNodes.Length; ++i) {
                Neuron n = new Neuron(inputCount);
                for (int j = 0; j < n.Weights.Length; ++j) {
                    n.Weights[j] = rand.NextDouble() - 0.5;
                }

                outputNodes[i] = n;
            }
            learningFactor = LEARNING_FACTOR;
        }

        /// <summary>
        /// Trains the network on the trainingSet using the specified maximum number of iterations.
        /// </summary>
        /// <param name="maxIterations">The maximum amount of iterations that may be used.</param>
        /// <param name="trainingSet">The normalized <see cref="Normalizer"/>sample set that should be used for training.</param>
        public void Train(int maxIterations, double[][] trainingSet) {
            InitializeNetwork();

            for (int i = 0; i < maxIterations; i++) {
                Train(trainingSet);
            }
        }

        /// <summary>
        /// Performs a single training step, consisting of running each of the samples in the dataset once.
        /// </summary>
        /// <param name="trainingSet">The normalized <see cref="Normalizer"/>sample set that should be used for training.</param>
        public void Train(double[][] trainingSet) {

            Random rand = new Random();
            int numSamples = trainingSet.Length;
            List<int> indices = new List<int>(numSamples);
            for (int j = 0; j < numSamples; indices.Add(j++)) ;

            for (int j = 0; j < numSamples; j++) {
                int index = indices[rand.Next(indices.Count)];
                indices.Remove(index);

                double[] sample = trainingSet[index];
                int winner = WinnerDotProduct(sample);

                UpdateWeightsAdditive(outputNodes[winner], sample, learningFactor);
            }

            if (learningFactor > 0.1) {
                learningFactor *= LEARNING_RATE_REDUCTION;
            }
        }


        /// <summary>
        /// Updates weights based on the Incremental SOM algorithm descibed by Kohonen.
        /// </summary>
        /// <param name="neuron">The neurons whose weights should be updated.</param>
        /// <param name="input">The input sample the weight calculation should be based on.</param>
        /// <param name="learningFactor">The learning factor for this process.</param>
        private void UpdateWeightsDist(Neuron neuron, double[] input, double learningFactor) {
            for (int i = 0; i < neuron.Weights.Length; ++i) {
                neuron.Weights[i] = neuron.Weights[i] + learningFactor * (input[i] - neuron.Weights[i]);
            }
        }

        /// <summary>
        /// Updates weights based on the Dot-Product SOM algorithm descibed by Kohonen.
        /// </summary>
        /// <param name="neuron">The neurons whose weights should be updated.</param>
        /// <param name="input">The input sample the weight calculation should be based on.</param>
        /// <param name="learningFactor">The learning factor for this process.</param>
        private void UpdateWeightsAdditive(Neuron neuron, double[] input, double learningFactor) {
            double[] vect = new double[input.Length];
            
            // This method is an implementation of the following formula:
            // lambda = old weight + learning factor * input sample
            // new weight = lambda / Length(lambda)
            
            for (int i = 0; i < vect.Length; vect[i] = neuron.Weights[i] + input[i++] * learningFactor) ;

            double len = 0;
            for (int i = 0; i < vect.Length; len += vect[i] * vect[i++]) ;
            len = Math.Sqrt(len);

            for (int i = 0; i < neuron.Weights.Length; ++i) {
                neuron.Weights[i] = vect[i] / len;
            }
        }


        /// <summary>
        /// Calculates the winning neuron (or BMU) for the given input based on Kohonen's Incremental SOM algorithm.
        /// </summary>
        /// <param name="input">The sample for which a winner should be determined.</param>
        /// <returns>The index of the winning neuron.</returns>
        public int WinnerDist(double[] input) {
            double minDistance = outputNodes[0].GetDistance(input);
            int index = 0;

            for (int i = 1; i < outputCount; i++) {
                double dist = outputNodes[i].GetDistance(input);
                if (dist < minDistance) {
                    minDistance = dist;
                    index = i;
                }
            }

            return index;
        }

        /// <summary>
        /// Calculates the winning neuron (or BMU) for the given input based on Kohonen's Dot-product algorithm.
        /// </summary>
        /// <param name="input">The sample for which a winner should be determined.</param>
        /// <returns>The index of the winning neuron.</returns>
        public int WinnerDotProduct(double[] input) {
            double[] responses = new double[outputCount];
            for (int i = 0; i < outputCount; ++i) {
                for (int j = 0; j < input.Length; ++j) {
                    responses[i] += input[j] * outputNodes[i].Weights[j];
                }
            }

            double max = responses[0];
            int index = 0;
            for (int i = 1; i < responses.Length; ++i) {
                if (responses[i] > max) {
                    max = responses[i];
                    index = i;
                }
            }

            return index;
        }

        /// <summary>
        /// Maps each record of the given sampleSet to the index of its best matching unit (BMU).
        /// </summary>
        /// <param name="sampleSet">A set of samples that should be mapped to output neurons.</param>
        /// <returns>Array mapping the index of the input sampleSet to the index of its BMU.</returns>
        public int[] MapInput(double[][] sampleSet) {
            int[] inputMap = new int[sampleSet.Length];
            for (int i = 0; i < inputMap.Length; ++i) {
                inputMap[i] = WinnerDist(sampleSet[i]);
            }

            return inputMap;
        }
    }
}
