/*
 * JANN - a Java toolkit for creating arbitrary Artificial Neural Networks.
 * 
 * Copyright (c) 2009 Matthijs Snel
 * 
 * This program is free software: you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation, either version 3 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
 */
package net.jann;

import java.io.Serializable;
import java.util.Observable;
import java.util.Random;


public abstract class NeuralElement extends Observable implements Serializable {

	private static final long serialVersionUID = 7385624549080523124L;

	protected double weight;
	
	/**
	 * Previous weight change. Used for applying momentum in weight updates.
	 */
	protected transient double prevWeightChange;
	
	/**
	 * Prior over the weights, used as regularisation parameter to prevent unbound weight growth.
	 */
	protected double alpha;
	
	/**
	 * Learning rate, influences magnitude of weight change in direction of error gradient
	 */
	protected double learnRate;
	
	/**
	 * Momentum parameter for faster convergence.
	 */
	protected double momentum;
	
	/**
	 * Random number generator for weight initialisation
	 */
	protected Random rand;
	
	protected NeuralElement() {
		initParameters();
		prevWeightChange = 0;
		
	}
	
	protected NeuralElement( Parameters params ) {
		setParameters( params );
		prevWeightChange = 0;
	}

	public double getWeight() {
		return weight;
	}

	public void setWeight(double weight) {
		this.weight = weight;
		setChanged();
		notifyObservers();
	}

	public void setParameters( Parameters params ) {
		setAlpha( (Double) params.get(Parameters.ALPHA) );
		setLearnRate( (Double) params.get(Parameters.LEARNING_RATE) );
		setMomentum( (Double) params.get(Parameters.MOMENTUM) );
		setRandomNumberGenerator( (Random) params.get(Parameters.RNG) );
	}
	
	public double getAlpha() {
		return alpha;
	}

	public void setAlpha(double alpha) {
		this.alpha = alpha;
	}

	public double getLearnRate() {
		return learnRate;
	}
	
	public void setLearnRate( double rate ) {
		this.learnRate = rate;
	}
	
	public double getMomentum() {
		return momentum;
	}

	public void setMomentum(double momentum) {
		this.momentum = momentum;
	}
	
	public void setRandomNumberGenerator( Random r ) {
		rand = r;
	}
	
	public abstract void backProp( double delta );
	
	public abstract void feedForward( double input );
	
	public void initWeight() {
		//setWeight( -1 + rand.nextDouble() * 2 );
		setWeight( 0.2 * rand.nextDouble() - 0.1 ); //[-0.25, 0.25]
	}
	
	/**
	 * Returns the activation of the neural element that sends to this element. For links, this
	 * is the activation of the presynaptic neuron. For neurons, this is 1, since the sending
	 * element is taken to be the neuron's bias unit.
	 * 
	 * This function is used in the calculation of the weight change of the element.
	 * 
	 * @return the activation of the neural element that sends to this element.
	 */
	protected abstract double getSendingActivation();
	
	protected void initParameters() {
		setAlpha(0);
		setLearnRate(0.1);
		setMomentum(0);
		setRandomNumberGenerator( new Random( System.currentTimeMillis() ));
	}
	
	protected void updateWeight( double delta ) {
		double weightChange = -learnRate * (delta * getSendingActivation() + alpha * weight) 
								+ momentum * prevWeightChange;
		setWeight( weight + weightChange );
		prevWeightChange = weightChange;
	}
}
