package de.mlp_distributed.mlp.core;

import de.mlp_distributed.mlp.math.function.Functions;
import de.mlp_distributed.mlp.math.mahout.Vector;

public enum Squashing {
	//
	LINEAR {
		@Override
		public Vector apply(final Vector v) {
			return v;
		}

		@Override
		public Vector applyGradient(final Vector v) {
			throw new UnsupportedOperationException();
		}
	},
	SIGMOID // standard output shashing for classification of n idependent
	// targets
	{
		@Override
		public Vector apply(final Vector v) {
			return v.assign(Functions.SIGMOID);
		}

		@Override
		public Vector applyGradient(final Vector v) {
			return v.assign(Functions.SIGMOIDGRADIENT);
		}
	},

	TANH // standard squashing of the hidden units
	{ // TODO modify tanh, see Yann LeCun et. al.: "Efficient BackProp"
		@Override
		public Vector apply(final Vector v) {
			return v.assign(Functions.TANH);
		}

		@Override
		public Vector applyGradient(final Vector v) {
			return v.assign(Functions.TANHGRADIENT);
		}
	},

	SOFTMAX // standard output squashing for classification of n mutually
	// exclusive classes
	{ //
		@Override
		public Vector apply(final Vector v) {
			final double partitionFunction = v.aggregate(Functions.PLUS, Functions.EXP);
			v.assign(Functions.EXP);
			return v.assign(Functions.DIV, partitionFunction);
		}

		// gradient should never be used:
		@Override
		public Vector applyGradient(final Vector v) {
			throw new UnsupportedOperationException();
		}
	};
	/**
	 * Apply the squashing / activation function
	 * 
	 * @param v
	 * @return
	 */
	public abstract Vector apply(Vector v);

	/**
	 * Apply the derivation of the squashing / activation function
	 * 
	 * @param a
	 *            : already the activations of the units
	 * @return
	 */
	public abstract Vector applyGradient(Vector a);
}
