/*
 * To change this template, choose Tools | Templates
 * and open the template in the editor.
 */

package com.leadiv.ai.neuralnet;

import java.io.IOException;
import java.util.Iterator;
import java.util.Vector;
import com.leadiv.ai.minesweeper.util.Params;
import org.xml.sax.SAXException;

/**
 *
 * @author pborrego
 */
public class CNeuralNet {
	private int m_NumInputs;
	private int m_NumOutputs;
	private int m_NumHiddenLayers;
	private int m_NeuronsPerHiddenLyr;

	// storage for each layer of neurons including the output layer
	private Vector<SNeuronLayer> m_vecLayers = new Vector<SNeuronLayer>();
        
        //------------------------------default ctor --------------------------
        //
        //	creates a ANN based on the default values in params.ini
        //---------------------------------------------------------------------
        public CNeuralNet() throws SAXException, IOException
        {
            Params params = new Params();
            m_NumInputs = (int)params.iNumInputs;
            m_NumOutputs = (int)params.iNumOutputs;
            m_NumHiddenLayers = (int)params.iNumHidden;
            m_NeuronsPerHiddenLyr = (int)params.iNeuronsPerHiddenLayer;

            CreateNet();
        }
        
        //------------------------------createNet()----------------------------
        //
        // this method builds the ANN. The weights are all initially set to 
        // random values -1 < w < 1
        //---------------------------------------------------------------------
        public void CreateNet()
        {
            //create the layers of the network
            if (m_NumHiddenLayers > 0)
            {
                //create first hidden layer
                m_vecLayers.add(new SNeuronLayer(m_NeuronsPerHiddenLyr, 
                        m_NumInputs));

                for (int i=0; i<m_NumHiddenLayers-1; ++i)
                {
                    m_vecLayers.add(new SNeuronLayer(m_NeuronsPerHiddenLyr, 
                            m_NeuronsPerHiddenLyr));
                }

                //create output layer
                m_vecLayers.add(new SNeuronLayer(m_NumOutputs, 
                        m_NeuronsPerHiddenLyr));
            }
            else
            {
                  //create output layer
                  m_vecLayers.add(new SNeuronLayer(m_NumOutputs, m_NumInputs));
            }
        }
        
        //---------------------------------GetWeights--------------------------
        //
        //	returns a vector containing the weights
        //
        //---------------------------------------------------------------------
	// gets the weights from the NN
        // THIS SHOULD NOT MODIFY ANY MEMBER VARIABLES.
	public Vector<Double> GetWeights()
        {
            //this will hold the weights
            Vector<Double> weights = new Vector<Double>();

            //for each layer
            for (int i=0; i<m_NumHiddenLayers + 1; ++i)
            {
                //for each neuron
                for (int j=0; j<m_vecLayers.get(i).m_NumNeurons; ++j)
                {
                    //for each weight
                    for (int k=0; k<m_vecLayers.get(i).m_vecNeurons.get(j).m_NumInputs; ++k)
                    {
                        weights.add(m_vecLayers.get(i).m_vecNeurons.get(j).m_vecWeight.get(k));
                    }
                }
            }

            return weights;
        }

        //---------------------------------GetNumberOfWeights------------------
        //
        //	returns the total number of weights needed for the net
        //
        //---------------------------------------------------------------------
	// returns total number of weights in net
        // THIS SHOULD NOT MODIFY ANY MEMBER VARIABLES.
	public int GetNumberOfWeights()
        {
            int weights = 0;

            //for each layer
            for (int i=0; i<m_NumHiddenLayers + 1; ++i)
            {
                //for each neuron
                for (int j=0; j<m_vecLayers.get(i).m_NumNeurons; ++j)
                {
                    //for each weight
                    for (int k=0; k<m_vecLayers.get(i).m_vecNeurons.get(j).m_NumInputs; ++k)
                    {
                        weights++;
                    }
                }
            }

            return weights;
        }

        //-----------------------------------PutWeights------------------------
        //
        // given a vector of doubles this function replaces the weights in the
        //  NN with the new values
        //
        //---------------------------------------------------------------------
	// replaces the weights with new ones
        // SINCE VECTOR IS NOT A PRIMITIVE IT IS PASSED IN BY REFERENCE.
	public void PutWeights(Vector<Double> weights)
        {
            int cWeight = 0;

            //for each layer
            for (int i=0; i<m_NumHiddenLayers + 1; ++i)
            {
                //for each neuron
                for (int j=0; j<m_vecLayers.get(i).m_NumNeurons; ++j)
                {
                    //for each weight
                    for (int k=0; k<m_vecLayers.get(i).m_vecNeurons.get(j).m_NumInputs; ++k)
                    {
                        m_vecLayers.get(i).m_vecNeurons.get(j).m_vecWeight.set(k, weights.get(cWeight++));
                    }
                }
            }
        }

        //-------------------------------Update--------------------------------
        //
        // given an input vector this function calculates the output vector
        //
        //---------------------------------------------------------------------
	// calculates the outputs from a set of inputs
        // SINCE VECTOR IS NOT A PRIMITIVE IT IS PASSED IN BY REFERENCE.
	public Vector<Double> Update(Vector<Double> inputs) throws SAXException, IOException
        {
            Params param = new Params();
            //stores the resultant outputs from each layer
            Vector<Double> outputs = new Vector<Double>();

            int cWeight = 0;

            //first check that we have the correct amount of inputs
            if (inputs.size() != m_NumInputs) 
            {
                
                return outputs;
            }

            //For each layer....
            for (int i = 0; i < m_NumHiddenLayers + 1; ++i) 
            {
                if (i > 0) 
                {
                    inputs.clear();
                    Iterator<Double> vals = outputs.iterator();
                    
                    while(vals.hasNext())
                    {
                        inputs.add(vals.next());
                    }
                }

                outputs.clear();

                cWeight = 0;

                //for each neuron sum the (inputs * corresponding weights).Throw 
                //the total at our sigmoid function to get the output.
                for (int j = 0; j < m_vecLayers.get(i).m_NumNeurons; ++j) 
                {
                    double netinput = 0;

                    int NumInputs = m_vecLayers.get(i).m_vecNeurons.get(j).m_NumInputs;

                    //for each weight
                    for (int k = 0; k < NumInputs - 1; ++k) 
                    {
                        //sum the weights x inputs
                        netinput += m_vecLayers.get(i).m_vecNeurons.get(j).m_vecWeight.get(k) * inputs.get(cWeight++);
                    }

                    //add in the bias
                    netinput += m_vecLayers.get(i).m_vecNeurons.get(j).m_vecWeight.get(NumInputs - 1) * param.dBias;

                    //we can store the outputs from each layer as we generate them. 
                    //The combined activation is first filtered through the sigmoid 
                    //function
                    outputs.add(Sigmoid(netinput, param.dActivationResponse));

                    cWeight = 0;
                }
            }

            return outputs;
        }

        //-------------------------------Sigmoid function----------------------
        //
        //---------------------------------------------------------------------
	// sigmoid response curve
	public double Sigmoid(double netinput, double response)
        {
            return ( 1 / ( 1 + Math.exp(-netinput / response)));
        }
}
