package de.distMLP.train;

import java.io.IOException;
import java.net.URISyntaxException;
import java.util.ArrayList;
import java.util.Date;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Random;

import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hama.bsp.BSP;
import org.apache.hama.bsp.BSPPeer;
import org.apache.hama.bsp.sync.SyncException;
import org.apache.mahout.common.RandomUtils;

import de.distMLP.communication.BtreeMessageDistributor;
import de.distMLP.communication.MessageDistributor;
import de.distMLP.communication.SimpleMessageDistributor;
import de.distMLP.data.TrainingExample;
import de.distMLP.messages.DoubleMessage;
import de.distMLP.messages.HamaMessage;
import de.distMLP.messages.WeightMatrixMessage;
import de.mlp_distributed.mlp.core.MockMultiLayerPerceptron;
import de.mlp_distributed.mlp.core.MultiLayerPerceptron;
import de.mlp_distributed.mlp.core.SparseAutoencoder;
import de.mlp_distributed.mlp.math.Factory;
import de.mlp_distributed.mlp.math.mahout.Matrix;
import de.mlp_distributed.mlp.math.mahout.Vector;
import de.mlp_distributed.mlp.math.mahout.Vector.Element;

// KeyInput, ValueInput, KeyOutput, ValueOutput and MessageType
public abstract class Base_MLP_Trainer extends BSP<LongWritable, Text, NullWritable, Text, HamaMessage> {

	private static final Log LOG = LogFactory.getLog(Base_MLP_Trainer.class);

	private static final String MLP_FILE_EXTENSION = ".mlp";
	protected int inputUnits;
	protected int outputUnits;
	protected int nbIterations;
	protected boolean mutualExclusiveClasses;
	protected int[] hiddenLayerNeuronCount;
	protected String masterTask;
	protected boolean saveMLP = false;
	protected boolean loadMLP = false;
	protected String mlpInputPath;
	protected String mlpOutputPath;
	protected int iterationCount = 0;
	protected MultiLayerPerceptron mlp;

	protected Matrix[] weightBackup;
	protected int saveInterval;
	protected double error;
	protected double lastError;
	protected int batchSize;

	// caching
	protected boolean useCache = true;
	private boolean dataCached = false;
	private final List<TrainingExample> cache = new ArrayList<TrainingExample>();
	private int cacheCounter = 0;

	protected boolean batchLearning;

	// Denoise Input
	final double delRatio = 0.5, onlyOneInputRatio = 0.2;
	private boolean denoiseInput;

	protected boolean btreeCommunication;

	protected HashMap<Integer, String> peerMap = null;
	protected HashMap<String, Integer> peerToIntMap = null;

	protected boolean calculateErrorOnEachIteration;

	// cost time measure
	protected long maxCalculateCost = 0;
	protected long minCalculateCost = Integer.MAX_VALUE;
	protected long averageCalculateCost = 0;

	protected float targetError;

	protected MessageDistributor messageDistributor;

	protected boolean mockupTest = false;

	protected WeightMatrixMessage calculateWeightUpdate(final BSPPeer<LongWritable, Text, NullWritable, Text, HamaMessage> peer,
			final WeightMatrixMessage other) throws IOException {
		if (!peer.getPeerName().equals(this.masterTask)) {
			return null;
		}

		this.mlp.initWeights(this.weightBackup);

		if (other != null) {
			if (other.isStopTraining()) {
				// Cost is only correct doing batch learning
				if ((this.batchLearning == true) && this.calculateErrorOnEachIteration) {
					this.iterationCount++;
					this.lastError = this.error;
					this.error = other.getCost();
					peer.write(NullWritable.get(), new Text("Iteration: " + this.iterationCount + "| Date: " + new Date() + "| Cost: "
							+ (this.error)));
					Base_MLP_Trainer.LOG.info("Iteration-BigBatch: " + this.iterationCount + "| Date: " + new Date() + "| Cost: "
							+ (this.error));
				}
			}
			if (other.getMatrix() != null) {
				this.updateWeightMatrix(other.getMatrix());
			}

			try {
				this.saveMLP(peer, this.iterationCount);
			} catch (final URISyntaxException e) {
				LOG.error(e, e);
			}

			this.weightBackup = Training_Helper.getCopyOfMatrixArray(this.mlp.getWeights());
		}
		final WeightMatrixMessage msg = new WeightMatrixMessage(this.mlp.getWeights(), other.isStopTraining(), 0);
		return msg;
	}

	/**
	 * Called to update the weight matrices on the master before the new weights
	 * are sent to all clients.
	 * 
	 * @param gradients
	 */
	protected void updateWeightMatrix(final Matrix[] gradients) {
		this.mlp.assignRegularizationAndLearningrate(gradients);
	}

	protected static double computeNumberOfLocalTrainingExamples(final BSPPeer<LongWritable, Text, NullWritable, Text, HamaMessage> peer,
			final String masterTask) throws IOException, SyncException, InterruptedException {
		final LongWritable key = new LongWritable();
		final Text value = new Text();
		double numberOfLocalTrainingExamples = 0.0;
		while (peer.readNext(key, value)) {
			numberOfLocalTrainingExamples++;
		}

		peer.send(masterTask, new DoubleMessage(numberOfLocalTrainingExamples));

		peer.sync();

		if (peer.getPeerName().equals(masterTask)) {
			double numItems = 0.0;
			while (peer.getNumCurrentMessages() > 0) {
				final DoubleMessage msg = (DoubleMessage) peer.getCurrentMessage();
				numItems = numItems + msg.getDoubleValue();
			}
			peer.write(NullWritable.get(), new Text("Total number of items: " + (numItems)));
		}
		peer.sync();
		return numberOfLocalTrainingExamples;
	}

	protected void saveMLP(final BSPPeer<LongWritable, Text, NullWritable, Text, HamaMessage> peer, final int iterationNumber)
			throws IOException, URISyntaxException {
		if (peer.getPeerName().equals(this.masterTask)) {
			if (this.saveMLP && ((iterationNumber % this.saveInterval) == 0) && (iterationNumber != 0)) {
				String path = this.mlpOutputPath;
				path = path.replace(Base_MLP_Trainer.MLP_FILE_EXTENSION, "");
				final String newPath = path + "_iteration" + iterationNumber + Base_MLP_Trainer.MLP_FILE_EXTENSION;
				peer.write(NullWritable.get(), new Text("peer: " + peer.getPeerName() + " writing file to: " + newPath));
				Training_Helper.writeNetwork(peer.getConfiguration(), new Path(newPath), this.mlp);
			}
		}
	}

	protected void calculateAndWriteCost(final BSPPeer<LongWritable, Text, NullWritable, Text, HamaMessage> peer, final String masterTask1,
			final MultiLayerPerceptron mlp1, final double batchSize) throws IOException, SyncException, InterruptedException {
		final long startCalculateCost = System.currentTimeMillis();

		this.reopenInput(peer);
		this.calculateErrorOnTrainingsSet(peer, masterTask1, mlp1, batchSize);
		peer.sync();

		if (peer.getPeerName().equals(masterTask1)) {

			double cost = 0.0;

			while (peer.getNumCurrentMessages() > 0) {
				final DoubleMessage msg = (DoubleMessage) peer.getCurrentMessage();

				final double tmpCost = msg.getDoubleValue();
				cost += tmpCost;
			}
			this.iterationCount++;

			this.lastError = this.error;
			this.error = cost;
			peer.write(NullWritable.get(), new Text("Iteration: " + this.iterationCount + "| Date: " + new Date() + "| Cost: "
					+ (this.error)));
			Base_MLP_Trainer.LOG.info("Iteration: " + this.iterationCount + "| Date: " + new Date() + "| Cost: " + (this.error));
		}

		this.measureCalculateCost(startCalculateCost);
		peer.sync();
	}

	private void measureCalculateCost(final long startCalculateCost) {
		final long calculateCost = System.currentTimeMillis() - startCalculateCost;
		if (calculateCost > this.maxCalculateCost) {
			this.maxCalculateCost = calculateCost;
		}
		if (calculateCost < this.minCalculateCost) {
			this.minCalculateCost = calculateCost;
		}
		this.averageCalculateCost += calculateCost;
	}

	/**
	 * Get the cost without regularization cost.
	 * 
	 * @param trainingExample
	 *            containing target and feature vectors
	 * @param mlp
	 * @return cost
	 */
	protected static double calculateError(final TrainingExample trainingExample, final MultiLayerPerceptron mlp) {
		final Vector out = mlp.classifyFull(trainingExample.getFeatureVector());
		return (mlp.getCost(out, trainingExample.getTargetVector()));
	}

	@SuppressWarnings("boxing")
	private void calculateErrorOnTrainingsSet(final BSPPeer<LongWritable, Text, NullWritable, Text, HamaMessage> peer,
			final String masterTask1, final MultiLayerPerceptron mlp1, final double batchSize) throws IOException {
		final TrainingExample trainingExample = new TrainingExample();
		double cost = 0.0;
		int totalNumber = 0;
		int errors = 0;
		while (this.readNextTrainingExample(peer, trainingExample)) {
			final double tmpCost = Base_MLP_Trainer.calculateError(trainingExample, mlp1);
			if (!Double.isInfinite(tmpCost) && !Double.isNaN(tmpCost)) {
				cost += tmpCost;
				totalNumber++;
			} else {
				errors++;
			}
		}
		Base_MLP_Trainer.LOG.info(peer.getPeerName() + " Cost: errors " + errors + " correct: " + totalNumber);
		if (totalNumber > 0) {
			cost = cost / batchSize;
		}

		peer.send(masterTask1, new DoubleMessage(cost));
	}

	protected boolean readNextTrainingExample(final BSPPeer<LongWritable, Text, NullWritable, Text, HamaMessage> peer,
			final TrainingExample ex) throws IOException {
		if (!this.useCache || !this.dataCached) {
			final LongWritable key = new LongWritable();
			final Text value = new Text();
			if (peer.readNext(key, value)) {
				final Vector targetVector = Training_Helper.getTargetVectorFromTrainingsdata(value, this.mlp.getNumberOfOutputUnits());
				final Vector featureVector = Training_Helper.getFeatureVectorFromTrainingsdata(value, this.mlp.getNumberOfInputUnits());
				if (this.denoiseInput == true) {
					ex.copy(this.denoiseInput(new TrainingExample(featureVector, targetVector)));
				} else {
					ex.assign(featureVector.clone(), targetVector.clone());
				}
				if (this.useCache) {
					this.cache.add(new TrainingExample(featureVector, targetVector));
				}
				return true;
			}
			this.dataCached = true;
			return false;
		}
		if (this.useCache && this.dataCached) {
			if (this.cacheCounter < this.cache.size()) {
				final TrainingExample tmp = this.cache.get(this.cacheCounter);
				if (this.denoiseInput == true) {
					ex.copy(this.denoiseInput(tmp));
				} else {
					ex.copy(tmp);
				}

				this.cacheCounter++;
				return true;
			}
		}

		return false;
	}

	private TrainingExample denoiseInput(final TrainingExample ex) {
		final Iterator<Element> iterator = ex.getFeatureVector().iterateNonZero();
		int dataSize = 0;
		while (iterator.hasNext()) {
			iterator.next();
			dataSize++;
		}
		if (dataSize <= 2) {
			// nothing to denoize
			return ex;
		}

		final TrainingExample denoisedTE = new TrainingExample();
		final Vector targetVector = ex.getTargetVector();
		final Vector featureVector = Factory.getInstance().getVectorFactory().construct(ex.getFeatureVector().size());

		final Random gen = RandomUtils.getRandom();

		if (this.onlyOneInputRatio > gen.nextDouble()) {
			final int elementNumber = gen.nextInt(dataSize);
			final Element element = Base_MLP_Trainer.getElement(targetVector, elementNumber);
			featureVector.set(element.index(), element.get());
		} else {
			final int maxDeleted = (int) (dataSize * this.delRatio);
			for (int j = 0; j < maxDeleted; j++) {
				final int elementNumber = gen.nextInt(dataSize);
				final Element element = Base_MLP_Trainer.getElement(targetVector, elementNumber);
				featureVector.set(element.index(), element.get());
			}
		}

		denoisedTE.assign(featureVector, targetVector);
		return denoisedTE;
	}

	private static Element getElement(final Vector v, final int i) {
		final Iterator<Element> iterator = v.iterateNonZero();
		int counter = 0;
		while (iterator.hasNext()) {
			final Element el = iterator.next();
			if (i == counter) {
				return el;
			}
			counter++;
		}
		return null;
	}

	protected void reopenInput(final BSPPeer<LongWritable, Text, NullWritable, Text, HamaMessage> peer) throws IOException {
		peer.reopenInput();
		this.cacheCounter = 0;
	}

	@Override
	public void setup(final BSPPeer<LongWritable, Text, NullWritable, Text, HamaMessage> peer) throws IOException, SyncException,
			InterruptedException {
		final Configuration conf = peer.getConfiguration();
		// Choose one as a master
		this.masterTask = peer.getPeerName(0);
		this.inputUnits = conf.getInt(de.distMLP.train.Configuration.NB_INPUT_UNITS, 10);
		this.outputUnits = conf.getInt(de.distMLP.train.Configuration.NB_OUTPUT_UNITS, 10);

		this.batchLearning = conf.getBoolean(de.distMLP.train.Configuration.BATCH_LEARNING, false);

		this.mutualExclusiveClasses = conf.getBoolean(de.distMLP.train.Configuration.MUTUALLY_EXCLUSIVE_CLASSES, false);
		this.nbIterations = conf.getInt(de.distMLP.train.Configuration.NB_ITERATIONS, 10);

		this.hiddenLayerNeuronCount = Training_Helper.getNumberOfHiddenUnits(conf);
		this.loadMLP = conf.getBoolean(de.distMLP.train.Configuration.LOAD_MLP, false);
		this.saveMLP = conf.getBoolean(de.distMLP.train.Configuration.SAVE_MLP, false);
		this.mlpInputPath = conf.get(de.distMLP.train.Configuration.MLP_INPUT_PATH);
		this.mlpOutputPath = conf.get(de.distMLP.train.Configuration.MLP_OUTPUT_PATH);

		this.saveInterval = conf.getInt(de.distMLP.train.Configuration.SAVE_INTERVAL, 10);

		this.batchSize = conf.getInt(de.distMLP.train.Configuration.BATCH_SIZE, 1);

		this.denoiseInput = conf.getBoolean(de.distMLP.train.Configuration.DENOISE_INPUT, false);

		this.btreeCommunication = conf.getBoolean(de.distMLP.train.Configuration.USE_BINARY_TREE_COLLECTIVE_COMMUNICATION, false);

		this.targetError = conf.getFloat(de.distMLP.train.Configuration.TARGET_ERROR, -1.0f);

		if (this.btreeCommunication) {
			this.messageDistributor = new BtreeMessageDistributor(peer, this.masterTask);
		} else {
			this.messageDistributor = new SimpleMessageDistributor(peer, this.masterTask);
		}

		this.calculateErrorOnEachIteration = conf.getBoolean(de.distMLP.train.Configuration.CALCULATE_ERROR_ON_EACH_ITERATION, true);

		if (this.targetError > 0) {
			this.calculateErrorOnEachIteration = true;
			this.nbIterations = Integer.MAX_VALUE;
		}

		this.mockupTest = conf.getBoolean(de.distMLP.train.Configuration.MOCKUP_TEST, false);

		try {
			this.initializeMLP(peer);
		} catch (final Exception e) {

		}

		if (peer.getPeerName().equals(this.masterTask)) {
			this.weightBackup = Training_Helper.getCopyOfMatrixArray(this.mlp.getWeights());
		}
	}

	private void initializeMLP(final BSPPeer<LongWritable, Text, NullWritable, Text, HamaMessage> peer) throws Exception {
		if (this.loadMLP) {
			this.mlp = Training_Helper.readNetwork(peer.getConfiguration(), new Path(this.mlpInputPath));
		} else if (this.mockupTest) {
			this.mlp = new MockMultiLayerPerceptron(this.inputUnits, this.outputUnits, this.hiddenLayerNeuronCount,
					this.mutualExclusiveClasses);
			this.mlp.initWeightsRandomly(null);
		} else {
			final double momentum = Double.parseDouble(peer.getConfiguration().get(de.distMLP.train.Configuration.MOMENTUM, "0.8"));
			final double regularization = Double.parseDouble(peer.getConfiguration().get(de.distMLP.train.Configuration.REGULARIZATION,
					"0.01"));
			final double learningRate = Double.parseDouble(peer.getConfiguration().get(de.distMLP.train.Configuration.LEARNINGRATE, "0.1"));

			final double eta = Double.parseDouble(peer.getConfiguration().get(de.distMLP.train.Configuration.ADAGRAD_ETA, "0.01"));
			final boolean useAdagrad = peer.getConfiguration().getBoolean(de.distMLP.train.Configuration.USE_ADAGRAD, false);
			final boolean useTestSeed = peer.getConfiguration().getBoolean(de.distMLP.train.Configuration.USE_TEST_SEED, false);
			this.useCache = peer.getConfiguration().getBoolean(de.distMLP.train.Configuration.USE_CACHE, false);

			if (peer.getConfiguration().getBoolean(de.distMLP.train.Configuration.SPARSE_AUTOENCODER, false)) {
				this.mlp = new SparseAutoencoder(this.inputUnits, momentum);
			} else {

				this.mlp = new MultiLayerPerceptron(this.inputUnits, this.outputUnits, this.hiddenLayerNeuronCount,
						this.mutualExclusiveClasses, momentum);
			}
			if (!peer.getConfiguration().getBoolean(de.distMLP.train.Configuration.R_PROP, false)) {
				this.mlp.setUseAdagrad(useAdagrad);
			} else {
				// RPROP does not use a learning rate.
				this.mlp.setUseAdagrad(false);
			}
			// Ranomizer
			final boolean useNgWidrow = peer.getConfiguration().getBoolean(de.distMLP.train.Configuration.NG_WIDROW_RANDOMIZER, false);
			final boolean useLorotBengio = peer.getConfiguration()
					.getBoolean(de.distMLP.train.Configuration.LOROT_BENGIO_RANDOMIZER, false);
			this.mlp.setUseLorotBengioRandomizer(useLorotBengio);
			this.mlp.setUseNgWidrowRandomizer(useNgWidrow);

			this.mlp.setEta(eta);
			this.mlp.learningRate(learningRate);
			this.mlp.regularization(regularization);

			final int seedValue = peer.getConfiguration().getInt(de.distMLP.train.Configuration.SEED_VALUE, -1);

			if (useTestSeed && (seedValue == -1)) {
				RandomUtils.useTestSeed();
			}
			final Random gen;
			if (seedValue == -1) {
				gen = RandomUtils.getRandom();
			} else {
				gen = RandomUtils.getRandom(seedValue);
			}
			this.mlp.initWeightsRandomly(gen);
			System.out.println();
		}
	}

	protected static int getTotalBatchSize(final BSPPeer<LongWritable, Text, NullWritable, Text, HamaMessage> peer) {
		return peer.getConfiguration().getInt(de.distMLP.train.Configuration.TOTAL_NB_TRAININGSDATA, 1);
	}

	@Override
	public void cleanup(final BSPPeer<LongWritable, Text, NullWritable, Text, HamaMessage> peer) throws IOException {
		if (peer.getPeerName().equals(this.masterTask)) {
			if (this.saveMLP) {
				try {
					Training_Helper.writeNetwork(peer.getConfiguration(), new Path(this.mlpOutputPath), this.mlp);
				} catch (final URISyntaxException e) {
					// TODO Auto-generated catch block
					LOG.error(e, e.getCause());
				}
			}
		}
	}
}
