/*
Copyright 2013 Lin, Chung-Ming (aka Alva Lin. E-mail: alva0930@gmail.com)

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
import java.io.*;
import Jama.*;	// The 'Jama' package could be downloaded from: http://math.nist.gov/javanumerics/jama/
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.io.*;
import org.apache.hadoop.io.compress.*;
import org.apache.hadoop.util.ReflectionUtils;

/**
 * This class implements {@link CostGradient} interface to evaluate the cost and the partial derivatives for a given set of parameters,
 * which represent the weights of a single-hidden-layer logistic-units neural network,
 * subject to a set of training data and a specific lambda value.
 * The user of this class specifies the shape of neural network(input layer size, hidden layer size, number of output labels),
 * the set of training data(input features and target values) and the lambda value in the constructor,
 * and then invokes evaluate(...) to evaluate the cost and the partial derivatives for a given set of parameters.
 * <P>
 * The following example illustrates how to get the trained weights for a 3-class classification neural network and check the train accuracy.
 * <DL>
 * <DT><B>Sample Code(Standalone version):</B></DT>
 * <DD>// The number of train cases is 6 and the number of features (input layer size) is 4.</DD>
 * <DD>double[][] train_feature = {</DD>
 * <DD>{3.1, 4.1, 5.9, 2.6},</DD>
 * <DD>{5.3, 5.8, 9.7, 9.3},</DD>
 * <DD>{2.3, 8.4, 6.2, 6.4},</DD>
 * <DD>{3.3, 8.3, 2.7, 9.5},</DD>
 * <DD>{0.2, 8.8, 4.1, 9.7},</DD>
 * <DD>{1.6, 9.3, 9.9, 3.7}};</DD>
 * <DD>int[] train_target = {0, 0, 1, 1, 2, 2};</DD>
 * <DD>int input_layer_size = 4;</DD>
 * <DD>int hidden_layer_size = 5; // adjustable parameter</DD>
 * <DD>int num_labels = 3;</DD>
 * <DD>double lambda = 1.0; // adjustable parameter</DD>
 * <DD>{@link CostGradient} cg = new SingleHiddenLayerNNCostGradient(input_layer_size, hidden_layer_size, num_labels, train_feature, train_target, lambda);</DD>
 * <DD>double range = 0.12; // adjustable parameter</DD>
 * <DD>{@link Matrix} theta1 = {@link MatrixFunction}.random(hidden_layer_size, input_layer_size + 1, range); // 1 for bias unit</DD>
 * <DD>{@link Matrix} theta2 = {@link MatrixFunction}.random(num_labels, hidden_layer_size + 1, range); // 1 for bias unit</DD>
 * <DD>double[] inital_parameters = {@link MatrixFunction}.unroll(theta1, theta2);</DD>
 * <DD>int MaxIter = 100; // adjustable parameter</DD>
 * <DD>{@link Fmincg} fmincg = new {@link Fmincg}();</DD>
 * <DD>fmincg.minimize(cg, inital_parameters, MaxIter); // cg.evaluate(...) would be invoked inside this function</DD>
 * <DD>double[] cost_minimized_parameters = fmincg.getParameter();</DD>
 * <DD>int weight_size_input_to_hidden = (input_layer_size + 1) * hidden_layer_size;</DD>
 * <DD>theta1 = {@link MatrixFunction}.reshape(cost_minimized_parameters, 0, weight_size_input_to_hidden - 1, hidden_layer_size, input_layer_size + 1);</DD>
 * <DD>theta2 = {@link MatrixFunction}.reshape(cost_minimized_parameters, weight_size_input_to_hidden, cost_minimized_parameters.length - 1, num_labels, hidden_layer_size + 1);</DD>
 * <DD>{@link Matrix} p = {@link MatrixFunction}.predictSingleHiddenLayerNN(new {@link Matrix}(train_feature), theta1, theta2);</DD>
 * <DD>System.out.println("Train Accuracy: " + {@link MatrixFunction}.mean({@link MatrixFunction}.equal(p, new {@link Matrix}(train_target, train_target.length))));</DD>
 * </DL>
 * <P>
 * If an active hadoop cluster is available,
 * the user may improve the speed of evaluate(...) by providing the path of Hadoop home directory and the path of SingleHiddenLayerNN.jar in the constructor.
 * The user may also provide the fully qualified name of a {@link CompressionCodec} implementation to compress the intermediate data of the map-reduce process.
 * <DL>
 * <DT><B>Sample Code(Hadoop cluster map-reduce version):</B></DT>
 * <DD>... // This part is the same as above example.</DD>
 * <DD>double lambda = 1.0; // adjustable parameter</DD>
 * <DD>{@link String} hadoop_home = "/home/ec2-hadoop/hadoop-1.0.4";</DD>
 * <DD>{@link String} jar_path = "/home/ec2-hadoop/hadoop-1.0.4/SingleHiddenLayerNN.jar";</DD>
 * <DD>{@link String} codec_name = "org.apache.hadoop.io.compress.GzipCodec";</DD>
 * <DD>{@link CostGradient} cg = new SingleHiddenLayerNNCostGradient(input_layer_size, hidden_layer_size, num_labels, train_feature, train_target, lambda, hadoop_home, jar_path, codec_name);</DD>
 * <DD>... // This part is the same as above example.</DD>
 * </DL>
 *
 * @see	Fmincg
 * @see	Matrix
 * @see	MatrixFunction
 * @author	Lin, Chung-Ming (aka Alva Lin. E-mail: alva0930@gmail.com)
 */
public class SingleHiddenLayerNNCostGradient implements CostGradient
{
	private static final double EPS = 1e-12;

	private int input_layer_size;
	private int hidden_layer_size;
	private int num_labels;
	private Matrix X;
	private Matrix y;
	private double lambda;
	private String tempDir;
	private String command;
	private CompressionCodec codec;

	private double cost = Double.POSITIVE_INFINITY;
	private double[] gradient = null;

/* ------------------------
   Constructors
 * ------------------------ */

	/**
	 * Construct a standalone version to evaluate the cost and the partial derivatives for a given set of parameters.
	 * The input_layer_size and hidden_layer_size should be greater than 0.
	 * The num_labels should be greater than 1.
	 * The number of rows of X should be the same as the length of y.
	 * The number of columns of X should be the same as the input_layer_size.
	 * The value of lambda should be greater than or equal to 0.
	 * All the values of array y should be in the range of 0(inclusive) and num_labels(exclusive).
	 * Any violation of above restrictions will cause an {@link AssertionError} when assertions are enabled,
	 * or cause unexpected result when assertions are disabled.
	 *
	 * @param	input_layer_size	the number of input units of the neural network (not including the bias unit)
	 * @param	hidden_layer_size	the number of hidden units of the neural network (not including the bias unit)
	 * @param	num_labels	the number of output units of the neural network
	 * @param	X	the training features, of size &ltnumber of train cases&gt X input_layer_size
	 * @param	y	the training targets, of size &ltnumber of train cases&gt
	 * @param	lambda	the regularization parameter to prevent over-fitting; the higher this value is, the stronger the regularization is.
	 */
	public SingleHiddenLayerNNCostGradient(int input_layer_size, int hidden_layer_size, int num_labels, double[][] X, int[] y, double lambda)
	{
		this(input_layer_size, hidden_layer_size, num_labels, X, y, lambda, null, null, null);
	}

	/**
	 * Construct a Hadoop cluster map-reduce version to evaluate the cost and the partial derivatives for a given set of parameters.
	 * The input_layer_size and hidden_layer_size should be greater than 0.
	 * The num_labels should be greater than 1.
	 * The number of rows of X should be the same as the length of y.
	 * The number of columns of X should be the same as the input_layer_size.
	 * The value of lambda should be greater than or equal to 0.
	 * All the values of array y should be in the range of 0(inclusive) and num_labels(exclusive).
	 * Any violation of above restrictions will cause an {@link AssertionError} when assertions are enabled,
	 * or cause unexpected result when assertions are disabled.
	 *
	 * @param	input_layer_size	the number of input units of the neural network (not including the bias unit)
	 * @param	hidden_layer_size	the number of hidden units of the neural network (not including the bias unit)
	 * @param	num_labels	the number of output units of the neural network
	 * @param	X	the training features, of size &ltnumber of train cases&gt X input_layer_size
	 * @param	y	the training targets, of size &ltnumber of train cases&gt
	 * @param	lambda	the regularization parameter to prevent over-fitting; the higher this value is, the stronger the regularization is.
	 * @param	hadoop_home	the path of Hadoop home directory; if null then standalone version would be used
	 * @param	jar_path	the path of SingleHiddenLayerNN.jar; if null then standalone version would be used
	 */
	public SingleHiddenLayerNNCostGradient(int input_layer_size, int hidden_layer_size, int num_labels, double[][] X, int[] y, double lambda, String hadoop_home, String jar_path)
	{
		this(input_layer_size, hidden_layer_size, num_labels, X, y, lambda, hadoop_home, jar_path, null);
	}

	/**
	 * Construct a Hadoop cluster map-reduce version to evaluate the cost and the partial derivatives for a given set of parameters.
	 * The input_layer_size and hidden_layer_size should be greater than 0.
	 * The num_labels should be greater than 1.
	 * The number of rows of X should be the same as the length of y.
	 * The number of columns of X should be the same as the input_layer_size.
	 * The value of lambda should be greater than or equal to 0.
	 * All the values of array y should be in the range of 0(inclusive) and num_labels(exclusive).
	 * Any violation of above restrictions will cause an {@link AssertionError} when assertions are enabled,
	 * or cause unexpected result when assertions are disabled.
	 *
	 * @param	input_layer_size	the number of input units of the neural network (not including the bias unit)
	 * @param	hidden_layer_size	the number of hidden units of the neural network (not including the bias unit)
	 * @param	num_labels	the number of output units of the neural network
	 * @param	X	the training features, of size &ltnumber of train cases&gt X input_layer_size
	 * @param	y	the training targets, of size &ltnumber of train cases&gt
	 * @param	lambda	the regularization parameter to prevent over-fitting; the higher this value is, the stronger the regularization is.
	 * @param	hadoop_home	the path of Hadoop home directory; if null then standalone version would be used
	 * @param	jar_path	the path of SingleHiddenLayerNN.jar; if null then standalone version would be used
	 * @param	codec_name	the fully qualified name of a {@link CompressionCodec} implementation; if null then no compression would be used
	 */
	public SingleHiddenLayerNNCostGradient(int input_layer_size, int hidden_layer_size, int num_labels, double[][] X, int[] y, double lambda, String hadoop_home, String jar_path, String codec_name)
	{
		assert(input_layer_size > 0);
		assert(hidden_layer_size > 0);
		assert(num_labels > 1);
		assert(X.length == y.length);
		assert(X[0].length == input_layer_size);
		assert(lambda > (-EPS));

		double[] Y = new double[y.length];
		for (int i = 0; i < Y.length; i++)
		{
			assert(0 <= y[i] && y[i] < num_labels);
			Y[i] = y[i];
		}

		this.input_layer_size = input_layer_size;
		this.hidden_layer_size = hidden_layer_size;
		this.num_labels = num_labels;
		this.X = MatrixFunction.appendH(new Matrix(X.length, 1, 1), new Matrix(X));
		this.y = new Matrix(Y, Y.length);
		this.lambda = lambda;
		command = null;

		if (null != hadoop_home && null != jar_path)
		{
			tempDir = "./SingleHiddenLayerNN/" + System.nanoTime() + "/";

			StringBuffer sb = new StringBuffer(hadoop_home);
			if (hadoop_home.charAt(hadoop_home.length() - 1) != '/')
			{
				sb.append("/");
			}
			sb.append("bin/hadoop jar ");
			sb.append(jar_path);
			sb.append(" SingleHiddenLayerNNGradientBackpropagation ");
			sb.append(tempDir);
			if (null != codec_name)
			{
				sb.append(" ");
				sb.append(codec_name);
			}
			command = sb.toString();

			try
			{
				if (null == codec_name)
				{
					MatrixFunction.hdfswrite(new Path(tempDir + "X"), this.X);
					MatrixFunction.hdfswrite(new Path(tempDir + "y"), this.y);
				}
				else
				{
					codec = (CompressionCodec)ReflectionUtils.newInstance(Class.forName(codec_name), new Configuration());
					MatrixFunction.hdfswrite(new Path(tempDir + "X"), this.X, SequenceFile.CompressionType.BLOCK, codec);
					MatrixFunction.hdfswrite(new Path(tempDir + "y"), this.y, SequenceFile.CompressionType.BLOCK, codec);
				}
			}
			catch (Exception e)
			{
				e.printStackTrace();
				System.exit(1);
			}
		}
	}

/* ------------------------
   Public Methods
 * ------------------------ */

	/**
	 * Calculate the cost and the partial derivatives for a given set of parameters.
	 * The first (hidden_layer_size X (input_layer_size + 1)) elements represent the weights of the input layer(including the bias unit) to the hidden layer.
	 * The last (num_labels X (hidden_layer_size + 1)) elements represent the weights of the hidden layer(including the bias unit) to the output layer.
	 *
	 * @param	parameter	a set of parameters
	 */
	public void evaluate(double[] parameter)
	{
		cost = 0;

		int theta1Size = (input_layer_size + 1) * hidden_layer_size;
		int theta2Size = (hidden_layer_size + 1) * num_labels;

		assert(parameter.length == (theta1Size + theta2Size));

		// Reshape parameter back into theta1 and theta2, the weight matrices for our single hidden layer neural network
		Matrix theta1 = MatrixFunction.reshape(parameter, 0, theta1Size - 1, hidden_layer_size, input_layer_size + 1);
		Matrix theta2 = MatrixFunction.reshape(parameter, theta1Size, parameter.length - 1, num_labels, hidden_layer_size + 1);

		// Setup some useful variables
		int m = X.getRowDimension();

		// Feedforward Cost
		Matrix a2 = MatrixFunction.sigmoid(X.times(theta1.transpose()));
		a2 = MatrixFunction.appendH(new Matrix(m, 1, 1), a2);
		Matrix a3 = MatrixFunction.sigmoid(a2.times(theta2.transpose()));
		for (int c = 0; c < num_labels; c++)
		{
			Matrix y2 = MatrixFunction.equal(y, c);
			Matrix h_theta = a3.getMatrix(0, a3.getRowDimension() - 1, c, c);
			cost += MatrixFunction.minus(y2, 1).transpose().times(MatrixFunction.log(MatrixFunction.minus(1, h_theta))).get(0, 0);
			cost -= y2.transpose().times(MatrixFunction.log(h_theta)).get(0, 0);
		}
		cost /= m;

		// Regularized Cost
		Matrix theta1a = theta1.getMatrix(0, hidden_layer_size - 1, 1, input_layer_size);
		Matrix theta2a = theta2.getMatrix(0, num_labels - 1, 1, hidden_layer_size);
		double square_sum_1 = MatrixFunction.sum(theta1a.arrayTimes(theta1a));
		double square_sum_2 = MatrixFunction.sum(theta2a.arrayTimes(theta2a));
		cost += ((square_sum_1 + square_sum_2) * lambda / (2 * m));

		// Gradient (Backpropagation)
		Matrix theta1_grad = new Matrix(hidden_layer_size, input_layer_size + 1);
		Matrix theta2_grad = new Matrix(num_labels, hidden_layer_size + 1);
		if (null == command)
		{
			for (int t = 0; t < m; t++)
			{
				Matrix a1 = X.getMatrix(t, t, 0, input_layer_size);
				Matrix z2 = a1.times(theta1.transpose());
				a2 = MatrixFunction.appendH(new Matrix(1, 1, 1), MatrixFunction.sigmoid(z2));
				Matrix z3 = a2.times(theta2.transpose());
				a3 = MatrixFunction.sigmoid(z3);

				Matrix y2 = new Matrix(1, num_labels);
				y2.set(0, (int)y.get(t, 0), 1);
				Matrix delta3 = a3.minus(y2);

				Matrix delta2 = delta3.times(theta2a).arrayTimes(MatrixFunction.sigmoidGradient(z2));

				theta1_grad.plusEquals(delta2.transpose().times(a1));
				theta2_grad.plusEquals(delta3.transpose().times(a2));
			}
		}
		else
		{
			try
			{
				if (null == codec)
				{
					MatrixFunction.hdfswrite(new Path(tempDir + "theta1"), theta1);
					MatrixFunction.hdfswrite(new Path(tempDir + "theta2"), theta2);
				}
				else
				{
					MatrixFunction.hdfswrite(new Path(tempDir + "theta1"), theta1, SequenceFile.CompressionType.BLOCK, codec);
					MatrixFunction.hdfswrite(new Path(tempDir + "theta2"), theta2, SequenceFile.CompressionType.BLOCK, codec);
				}

				/* Map-Reduce Process Invocation */
				Process p = Runtime.getRuntime().exec(command);
				String line = null;

				BufferedReader stderr = new BufferedReader(new InputStreamReader(p.getErrorStream()));
				System.out.println("<stderr>");
				while((line = stderr.readLine()) != null)
				{
					System.out.println(line);
				}
				System.out.println("</stderr>");

				BufferedReader stdout = new BufferedReader(new InputStreamReader(p.getInputStream()));
				System.out.println("<stdout>");
				while ((line = stdout.readLine ()) != null)
				{
					System.out.println(line);
				}
				System.out.println("</stdout>");

				int exitVal = p.waitFor();
				System.out.println("map-reduce process exit value: " + exitVal);

				theta1_grad = MatrixFunction.hdfsread(new Path(tempDir + "theta1_grad"));
				theta2_grad = MatrixFunction.hdfsread(new Path(tempDir + "theta2_grad"));
			}
			catch (Exception e)
			{
				e.printStackTrace();
				System.exit(1);
			}
		}
		theta1_grad.timesEquals(1.0 / m);
		theta2_grad.timesEquals(1.0 / m);

		// Regularized Gradient
		theta1_grad.plusEquals(MatrixFunction.appendH(new Matrix(hidden_layer_size, 1), theta1a).times(lambda / m));
		theta2_grad.plusEquals(MatrixFunction.appendH(new Matrix(num_labels, 1), theta2a).times(lambda / m));

		// Unroll gradients
		gradient = MatrixFunction.unroll(theta1_grad, theta2_grad);
	}

	/**
	 * Get the cost of the set of parameters in the last invocation of evaluate(...).
	 *
	 * @return	the cost of the set of parameters in the last invocation of evaluate(...), or {@link Double}.POSITIVE_INFINITY if evaluate(...) has never been invoked
	 */
	public double getCost()
	{
		return cost;
	}

	/**
	 * Get the derivatives of the set of parameters in the last invocation of evaluate(...).
	 * The first (hidden_layer_size X (input_layer_size + 1)) elements represent the derivatives of weights of the input layer(including the bias unit) to the hidden layer.
	 * The last (num_labels X (hidden_layer_size + 1)) elements represent the derivatives of weights of the hidden layer(including the bias unit) to the output layer.
	 *
	 * @return	the derivatives of the set of parameters in the last invocation of evaluate(...), or null if evaluate(...) has never been invoked
	 */
	public double[] getGradient()
	{
		return gradient;
	}
}