package de.distMLP;

import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.util.ArrayList;
import java.util.List;

import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.SequenceFile.CompressionType;
import org.apache.hadoop.io.Text;
import org.apache.hama.HamaConfiguration;
import org.apache.hama.bsp.BSPJob;
import org.apache.hama.bsp.BSPJobClient;
import org.apache.hama.bsp.ClusterStatus;
import org.apache.hama.bsp.FileOutputFormat;
import org.apache.hama.bsp.TextInputFormat;
import org.apache.hama.bsp.TextOutputFormat;

import de.distMLP.train.Base_MLP_Trainer;
import de.distMLP.train.MLP_MiniBatchGD;
import de.distMLP.train.MLP_RPROP;

public class Train_MultilayerPerceptron {

	private static String OUTPUTPATH = "/tmp/pi-" + System.currentTimeMillis();
	private static Path TMP_OUTPUT = null;

	private static final Log LOG = LogFactory.getLog(Train_MultilayerPerceptron.class);

	private static String partitionAdjacencyTextFile(final int sizeOfCluster, final String fileToPartition, final Configuration conf,
			final int batchSize) throws IOException {

		Train_MultilayerPerceptron.LOG.info("Creating " + sizeOfCluster + " splits from file " + fileToPartition);

		// setup the paths where the grooms can find their input
		final List<Path> partPaths = new ArrayList<Path>(sizeOfCluster);
		final List<SequenceFile.Writer> writers = new ArrayList<SequenceFile.Writer>(sizeOfCluster);
		final FileSystem fs = FileSystem.get(conf);

		final String paths = Train_MultilayerPerceptron.createInputPath(sizeOfCluster, fileToPartition, partPaths);
		// create a seq writer for that
		for (final Path p : partPaths) {
			fs.delete(p, true);
			writers.add(SequenceFile.createWriter(fs, conf, p, LongWritable.class, Text.class, CompressionType.NONE));
		}

		final int totalCount = Train_MultilayerPerceptron.countInput(fileToPartition, conf);
		final int rest;
		if (batchSize == 0) {
			rest = totalCount % sizeOfCluster;
		} else {
			final int tmprest = (totalCount % sizeOfCluster);
			rest = ((((totalCount - tmprest) / sizeOfCluster) % batchSize) * sizeOfCluster) + tmprest;
		}

		// parse our file
		final FSDataInputStream data = fs.open(new Path(fileToPartition));
		final BufferedReader br = new BufferedReader(new InputStreamReader(data));
		String line = null;
		int modCounter = 0;
		int lineCounter = 0;
		while ((line = br.readLine()) != null) {
			lineCounter++;
			if (lineCounter == ((totalCount - rest) + 1)) {
				break;
			}
			if (modCounter == sizeOfCluster) {
				modCounter = 0;
			}
			writers.get(modCounter).append(new LongWritable(line.length()), new Text(line));
			modCounter++;
		}
		br.close();
		data.close();

		for (final SequenceFile.Writer w : writers) {
			w.syncFs();
			w.close();
		}
		return paths;
	}

	private static int countInput(final String fileToPartition, final Configuration conf) throws IOException {
		final FileSystem fs = FileSystem.get(conf);
		// parse our file
		final FSDataInputStream data = fs.open(new Path(fileToPartition));
		final BufferedReader br = new BufferedReader(new InputStreamReader(data));
		@SuppressWarnings("unused")
		String line = null;
		int lineCounter = 0;
		while ((line = br.readLine()) != null) {
			lineCounter++;
		}
		br.close();
		data.close();
		return lineCounter;
	}

	private static String createInputPath(final int sizeOfCluster, final String fileToPartition, final List<Path> partPaths) {
		final String inputPath = Train_MultilayerPerceptron.getInputPath(fileToPartition);

		final StringBuilder paths = new StringBuilder();
		for (int i = 0; i < sizeOfCluster; i++) {
			final String tmpPath = inputPath + "part" + i;
			partPaths.add(new Path(tmpPath));
			paths.append(tmpPath);
			if (i != (sizeOfCluster - 1)) {
				paths.append(',');
			}
		}
		return paths.toString();
	}

	private static String getInputPath(final String fileToPartition) {
		final int index = fileToPartition.lastIndexOf("/");
		final String fileName = fileToPartition.substring(index);

		final String inputPath = Train_MultilayerPerceptron.OUTPUTPATH + fileName;
		return inputPath;
	}

	private static String createInputPath(final int sizeOfCluster, final String fileToPartition) {
		final String inputPath = Train_MultilayerPerceptron.getInputPath(fileToPartition);
		final StringBuilder paths = new StringBuilder();
		for (int i = 0; i < sizeOfCluster; i++) {
			final String tmpPath = inputPath + "part" + i;
			paths.append(tmpPath);
			if (i != (sizeOfCluster - 1)) {
				paths.append(',');
			}
		}
		return paths.toString();
	}

	private static void resetOutputPath(final int iterationNumber) {
		Train_MultilayerPerceptron.TMP_OUTPUT = new Path(Train_MultilayerPerceptron.OUTPUTPATH + "/" + iterationNumber);
	}

	private static void setTotalNbTrainingsExamples(final HamaConfiguration conf) throws IOException {
		final String inputFile = conf.get(de.distMLP.train.Configuration.TRAININGS_DATA_INPUT_PATH);
		final int totalNB = countInput(inputFile, conf);
		conf.setInt(de.distMLP.train.Configuration.TOTAL_NB_TRAININGSDATA, totalNB);
	}

	public static void main(final String[] args) throws IOException, ClassNotFoundException, InterruptedException {
		Train_MultilayerPerceptron.resetOutputPath(0);

		HamaConfiguration conf = Helper.getConfig(args);
		setTotalNbTrainingsExamples(conf);

		final int repeatTraining = conf.getInt(de.distMLP.train.Configuration.REPEAT_WHOLE_TRAINING, 0);

		if (repeatTraining <= 0) {
			final BSPJob bsp = Train_MultilayerPerceptron.createBSPJob(conf, true);

			final long startTime = System.currentTimeMillis();
			Train_MultilayerPerceptron.printOutput(conf, bsp, startTime);
		} else {
			for (int i = 1; i < (repeatTraining + 1); i++) {
				boolean splitInput = true;
				if (i > 1) {
					splitInput = false;
				}
				Train_MultilayerPerceptron.resetOutputPath(i);
				conf = Helper.getConfig(args);
				conf.setInt(de.distMLP.train.Configuration.SEED_VALUE, i);

				final BSPJob bsp = Train_MultilayerPerceptron.createBSPJob(conf, splitInput);

				final long startTime = System.currentTimeMillis();
				Train_MultilayerPerceptron.printOutput(conf, bsp, startTime);
			}
		}
	}

	public static void printOutput(final HamaConfiguration conf, final BSPJob bsp, final long startTime) throws IOException,
			InterruptedException, ClassNotFoundException {
		if (bsp.waitForCompletion(true)) {
			Helper.printOutput(conf, Train_MultilayerPerceptron.TMP_OUTPUT, false);
			System.out.println("saving output to: " + Train_MultilayerPerceptron.TMP_OUTPUT);
			System.out.println("Job Finished in " + ((System.currentTimeMillis() - startTime) / 1000.0) + " seconds");
		}
	}

	private static BSPJob createBSPJob(final HamaConfiguration conf, final boolean splitFile) throws IOException {
		final Class<? extends Base_MLP_Trainer> bspClass = Train_MultilayerPerceptron.setTrainingMethod(conf);

		final BSPJobClient jobClient = new BSPJobClient(conf);
		final ClusterStatus cluster = jobClient.getClusterStatus(true);

		final BSPJob bsp = new BSPJob(conf, Train_MultilayerPerceptron.class);

		bsp.setJobName("Dist_MultilayerPerceptron");

		final int batchSize = conf.getInt(de.distMLP.train.Configuration.BATCH_SIZE, 0);

		Train_MultilayerPerceptron.setInputFiles(conf, cluster, bsp, splitFile, batchSize);

		bsp.setBspClass(bspClass);

		bsp.setInputKeyClass(LongWritable.class);
		bsp.setInputValueClass(Text.class);

		bsp.setOutputKeyClass(NullWritable.class);
		bsp.setOutputValueClass(Text.class);
		bsp.setOutputFormat(TextOutputFormat.class);
		FileOutputFormat.setOutputPath(bsp, Train_MultilayerPerceptron.TMP_OUTPUT);

		Train_MultilayerPerceptron.LOG.info("Maximum Number of tasks: " + cluster.getMaxTasks());
		Train_MultilayerPerceptron.LOG.info("Number of servers: " + cluster.getGroomServers());

		final int nbOf_BSPTasks = conf.getInt(de.distMLP.train.Configuration.NUMBER_OF_BSP_TASKS, 0);
		if (nbOf_BSPTasks == 0) {
			bsp.setNumBspTask(cluster.getMaxTasks());
		} else {
			bsp.setNumBspTask(nbOf_BSPTasks);
		}
		return bsp;
	}

	private static Class<? extends Base_MLP_Trainer> setTrainingMethod(final HamaConfiguration conf) {
		Class<? extends Base_MLP_Trainer> bspClass;
		boolean batchLearning = true;
		final int batchsize = conf.getInt(de.distMLP.train.Configuration.BATCH_SIZE, 0);

		if (conf.getBoolean(de.distMLP.train.Configuration.R_PROP, false)) {
			bspClass = MLP_RPROP.class;
		} else {
			bspClass = MLP_MiniBatchGD.class;
			if (batchsize != 0) {
				batchLearning = false;
			}
		}

		conf.setInt(de.distMLP.train.Configuration.BATCH_SIZE, batchsize);
		conf.setBoolean(de.distMLP.train.Configuration.BATCH_LEARNING, batchLearning);
		return bspClass;
	}

	private static void setInputFiles(final HamaConfiguration conf, final ClusterStatus cluster, final BSPJob bsp, final boolean splitFile,
			final int batchSize) throws IOException {
		if (conf.getBoolean(de.distMLP.train.Configuration.SPLIT_INPUT, false) == false) {
			bsp.setInputFormat(TextInputFormat.class);
			bsp.setInputPath(new Path(conf.get(de.distMLP.train.Configuration.TRAININGS_DATA_INPUT_PATH)));
		} else {
			int nbOf_BSPTasks = conf.getInt(de.distMLP.train.Configuration.NUMBER_OF_BSP_TASKS, 0);
			if (nbOf_BSPTasks == 0) {
				nbOf_BSPTasks = cluster.getMaxTasks();
			}
			final String inputFile = conf.get(de.distMLP.train.Configuration.TRAININGS_DATA_INPUT_PATH);
			final String pathList;
			if (splitFile) {
				pathList = Train_MultilayerPerceptron.partitionAdjacencyTextFile(nbOf_BSPTasks, inputFile, conf, batchSize);
			} else {
				pathList = Train_MultilayerPerceptron.createInputPath(nbOf_BSPTasks, inputFile);
			}
			bsp.setInputFormat(org.apache.hama.bsp.SequenceFileInputFormat.class);
			bsp.set("bsp.input.dir", pathList);
			// FileInputFormat.addInputPaths(bsp, pathList);
		}
	}
}
