/*
 *  $Id: ContextManager.h 3276 2011-02-11 11:00:34Z evert $
 *
 *  Created on: Dec 17, 2010
 *      Author: ehaasdi
 *
 * Copyright (c) 2010, VU University Amsterdam
 * All rights reserved.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions are met:
 *     * Redistributions of source code must retain the above copyright
 *       notice, this list of conditions and the following disclaimer.
 *     * Redistributions in binary form must reproduce the above copyright
 *       notice, this list of conditions and the following disclaimer in the
 *       documentation and/or other materials provided with the distribution.
 *     * Neither the name of the <organization> nor the
 *       names of its contributors may be used to endorse or promote products
 *       derived from this software without specific prior written permission.
 *
 * THIS SOFTWARE IS PROVIDED BY <copyright holder> ''AS IS'' AND ANY
 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
 * DISCLAIMED. IN NO EVENT SHALL <copyright holder> BE LIABLE FOR ANY
 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 *
 */

#ifndef CONTEXTMANAGER_H_
#define CONTEXTMANAGER_H_

#include "Context.h"
//#include <genetics/Context.h>
#include <utilities/Logger.h>

#include <functional>
#include <numeric>
#include <iostream>
#include <set>
#include <list>

#include <boost/foreach.hpp>

#include <fstream>

namespace Genetics
{
/**
 * Maintains list of Context over a run, creates new _contexts as needed.
 */
template<typename real, unsigned dimensions>
class ContextManager
{
public:
	//some logging stuff
	std::ofstream _historylog;
	std::ofstream _mergelog;
	int _firstCount;
	bool _firstIteration;

	typedef Context<real, dimensions> context;
	typedef std::vector<context> contexts;
	typedef typename contexts::iterator contextIterator;
	typedef typename context::Point Point;

	//CHANGED MINPROB VALUE RECENTLY AS DITCHES WERE NOT DETECTED!!!
    ContextManager(unsigned minSize = 10, double minProbability = 0.0001) : //default: minSize = 30 & minProbability = 0.0001
    	_currentContextPosition(0),
		_historySize(minSize),
		_minimumProbability(minProbability),
		//_log(LIOUtilities::Logger::getCategory("ContextManager")), //DISABLED FOR RUNNING ON LISA
		_firstIteration(true),
		_firstCount(0),
		_idCounter(0),
		_currentContextID(-1),
		_bestMergeProbFound(1),
		_cmiteration(1)
	{
    	_historylog.open ("logs/historylog.log");
    	_mergelog.open ("logs/mergelog.log");
		//_log.setPriority(log4cpp::Priority::NOTICE); //DISABLED FOR RUNNING ON LISA
	}

	/**
	 * Add point \a x to the history. If enough points (i.e. more than \a _historySize)
	 * are in the history, the oldest point in the history is clustered into
	 * the proper context.
	 * Will create a new context from all points in the history if needed.
	 * The current context switches to the (possibly new) context
	 * to which the oldest history point belongs.
	 *
	 * @return the index of the current context. Note that if this is 0, it is
	 * possible that no context has been identified yet.
	 */
	size_t consider(const Point& x)
	{
		++_cmiteration;
		//here we continue normally
		history.push_back(x);

		if (history.size() < _historySize){
			//do some serious logging
			//need corresponding_cluster,cluster_n,context_count,history_size,p_value,best_prob
			//_historylog << _currentContextID << "," ;

			_firstCount ++;
			if(_firstCount<_historySize){//the first 30 iterations, you're missing the context n, so don't print it
				printToHistoryLog(x);
			}else{
				printToHistoryLog(x, _contexts[_currentContextPosition].getN());
				updateClusterUsage(_contexts[_currentContextPosition]);
				updateClustersBasedOnAge();
			}

			//return _currentContextPosition;
			return _currentContextID;
		}// else

		//
		//verify that history is normal distribution, otherwise discard oldest history point and return
		//
		context historyAsContext(history.begin(), history.end(), _idCounter+1);

		BOOST_FOREACH(Point p, history)
		{
			//_log.debugStream() << "(x,y): (" << p[0] << ',' << p[1] << ")"; //DISABLED FOR RUNNING ON LISA
		}

		//CURRENTLY, THE NORMAL DISTRIBUTION TEST HAS BEEN DISABLED
/*		double p = ksTest(history.begin(), history.end(), historyAsContext); //doing this here seems unefficient
		//std::cout << p  << " ";
		if (ksTest(history.begin(), history.end(), historyAsContext) <= 0.1) //default: 0.1
		{
			//_log.debugStream() << "History not normally distributed (likelihood "<< p << "), discarding oldest entry"; //DISABLED FOR RUNNING ON LISA

			history.pop_front();

			//do some serious logging
			printToHistoryLog(x);

			updateClusterUsage(_contexts[_currentContextPosition]);
			updateClustersBasedOnAge();

			//return _currentContextPosition;
			return _currentContextID;
		}*/

		//
		// Find closest context
		//
		/*double bestProbability(0.0);

		//std::cout<< "First: Current cluster: " << _currentContextPosition <<" ";
		for (size_t i = 0; i < _contexts.size(); ++i)
		{
			double probability = ksTestprint(history.begin(), history.end(), _contexts[i]);
			//std::cout<< i<< ": "<<probability<< "; ";
			if (probability > bestProbability)
			{
				bestProbability = probability;
				_currentContextPosition = i;
				_currentContextID = _contexts[_currentContextPosition].getID();
			}
		}*/

		double bestProbability = getClosestClusterBasedOnKSTest();
		//double bestProbability = getClosestClusterBasedOnSimpleComparison(historyAsContext);

		//CURRENTLY, IT SEEMS THAT BESTPROBABILITY IS ZERO WHEN DATA IS EXACTLY THE SAME! THIS SHOULD NOT BE THE CASE!

		//std::cout << std::endl;
		//std::cout << bestProbability<<std::endl;

		//if even the most similar cluster is not similar enough (minimumprob defines how similar they should at least be), create a new cluster
		if (bestProbability < _minimumProbability)
		//if (bestProbability > 0.1)
		{
			//std::cerr << bestProbability <<std::endl;
			// create new context from history
			_contexts.push_back(historyAsContext);
			_idCounter++;
			_currentContextID = _idCounter;
			_currentContextPosition = _contexts.size() - 1;
			history.clear();
		} else {
			// add point to best fitting context and remove from history
			_contexts[_currentContextPosition].addPoint(*(history.begin()));
			history.pop_front();

			//merge contexts if close enough
			clusterMergingBasedOnSimpleComparison();
			//clusterMergingBasedOnKSTest();

/*			//below: check to see if the cluster matches another sufficiently. If so, merge clusters
			for (size_t i = 0; i < _contexts.size(); ++i)
			{
				double compareProb(0.0);
				compareProb = ksTestDoubleContext(_contexts[i]); //CURRENTLY NEW WELCH TEST APPLIED
				//compareProb = welchTTestMax(_contexts[i], _contexts[_currentContextPosition]);
				if((compareProb >= 0.99) && (_contexts[i].getID() != _currentContextID)){ //other often used setting: 0.95
				//if((compareProb <= 0.05) && (_contexts[i].getID() != _currentContextID)){
					std::cerr << "merge!";
					//make sure the remaining cluster get's the lowest ID
					if(_contexts[i].getID()< _currentContextID){
						_mergelog << _contexts[i].getID() << " merged with " << _currentContextID << " with prob: " << compareProb;
						_mergelog << std::endl;

						_contexts[i].absorb(_contexts[_currentContextPosition]);
						_contexts.erase(_contexts.begin()+_currentContextPosition);
						_currentContextPosition = i;
						_currentContextID = _contexts[i].getID();
					}else{
						_mergelog << _currentContextID << " merged with " << _contexts[i].getID() << " with prob: " << compareProb;
						_mergelog << std::endl;

						_contexts[_currentContextPosition].absorb(_contexts[i]);
						_contexts.erase(_contexts.begin()+i);
					}
				}
				if(compareProb<_bestMergeProbFound){
					_bestMergeProbFound = compareProb;
				}
			}*/
		}

		//do some serious logging
		printToHistoryLog(x, _contexts[_currentContextPosition].getN(), bestProbability);

		//update cluster usages and remove old clusters
		updateClusterUsage(_contexts[_currentContextPosition]);
		updateClustersBasedOnAge();

		//std::cerr << averageDistanceSigma(_contexts[_currentContextPosition], _contexts[_currentContextPosition]);
		//		std::cerr<<std::endl;

/*		std::cerr << _contexts.size() ;
				std::cerr << std::endl;*/
		//return _currentContextPosition;

		return _currentContextID;
	}

	/**
	 * Prints details of all _contexts on \a os
	 */
	std::ostream& printOn(std::ostream& os)
	{
		BOOST_FOREACH(contextIterator context, _contexts)
		{
			context->printOn(os);
		}

		return os;
	}

    std::vector<context> getContexts() const
    {
        return _contexts;
    }

    int getHistorySize(){
    	return _historySize;
    }

    double getMinimumProbability(){
    	return _minimumProbability;
    }

    int getClusterCount(){
    	return _contexts.size();
    }

#ifndef UNITTEST
private:
#endif

    /// Known _contexts
	contexts _contexts;

	int _idCounter;
	int _currentContextID;
	double _bestMergeProbFound;
	int _cmiteration;

	/// Last \a _historySize points visited
	std::list<Point> history;

	/// index of the current context in _contexts
	size_t _currentContextPosition;

	/// History size
	unsigned _historySize;

	/// Probability threshold for clustering
	double _minimumProbability;

	//log4cpp::Category& _log; //DISABLED FOR RUNNING ON LISA

	double getClosestClusterBasedOnKSTest(){
		double bestProbability(0.0);

		//std::cout<< "First: Current cluster: " << _currentContextPosition <<" ";
		for (size_t i = 0; i < _contexts.size(); ++i)
		{
			double probability = ksTestprint(history.begin(), history.end(), _contexts[i]);
			//std::cout<< i<< ": "<<probability<< "; ";
			if (probability > bestProbability)
			{
				bestProbability = probability;
				_currentContextPosition = i;
				_currentContextID = _contexts[_currentContextPosition].getID();
			}
		}

		return bestProbability;
	}

	double getClosestClusterBasedOnSimpleComparison(context& historyAsContext){
		if(_contexts.size()<1){
			std::cerr<<"here!";
			return 1;
		}
		int closestContext = getClostestContextPosition(historyAsContext,1);
		double compareValue = averageDistanceMeans(historyAsContext, _contexts[closestContext]);
		_currentContextPosition = closestContext;
		_currentContextID = _contexts[_currentContextPosition].getID();
		std::cerr << _currentContextID <<std::endl;
		return compareValue;
	}

	void clusterMergingBasedOnKSTest(){
		//below: check to see if the cluster matches another sufficiently. If so, merge clusters
		for (size_t i = 0; i < _contexts.size(); ++i)
		{
			double compareProb(0.0);
			compareProb = ksTestDoubleContext(_contexts[i]); //CURRENTLY NEW WELCH TEST APPLIED
			//compareProb = welchTTestMax(_contexts[i], _contexts[_currentContextPosition]);
			if((compareProb >= 0.975) && (_contexts[i].getID() != _currentContextID)){ //other often used setting: 0.95
			//if((compareProb <= 0.05) && (_contexts[i].getID() != _currentContextID)){
				std::cerr << "merge!";
				//make sure the remaining cluster get's the lowest ID
				if(_contexts[i].getID()< _currentContextID){
					_mergelog << _contexts[i].getID() << " merged with " << _currentContextID << " with prob: " << compareProb;
					_mergelog << std::endl;

					_contexts[i].absorb(_contexts[_currentContextPosition]);
					_contexts.erase(_contexts.begin()+_currentContextPosition);
					_currentContextPosition = i;
					_currentContextID = _contexts[i].getID();
				}else{
					_mergelog << _currentContextID << " merged with " << _contexts[i].getID() << " with prob: " << compareProb;
					_mergelog << std::endl;

					_contexts[_currentContextPosition].absorb(_contexts[i]);
					_contexts.erase(_contexts.begin()+i);
				}
			}
			if(compareProb<_bestMergeProbFound){
				_bestMergeProbFound = compareProb;
			}
		}
	}

	void clusterMergingBasedOnSimpleComparison(){
		//below: check to see if the cluster matches another sufficiently. If so, merge clusters

		//ENABLE for merging based on avg difference
		int closestContext = getClostestContextPosition(_contexts[_currentContextPosition],1);
		double compareValue = averageDistanceMeans(_contexts[_currentContextPosition], _contexts[closestContext]);

		//ENABLE below for merging based on max difference
		//int closestContext = getClostestContextPosition(_contexts[_currentContextPosition],2);
		//double compareValue = maximumDistanceMeans(_contexts[_currentContextPosition], _contexts[closestContext]);

		if((compareValue >= 0.1) && (_contexts[closestContext].getID() != _currentContextID)){
			std::cerr << "merge!";
			//make sure the remaining cluster get's the lowest ID
			if(_contexts[closestContext].getID()< _currentContextID){
				_mergelog << _contexts[closestContext].getID() << " merged with " << _currentContextID << " with prob: " << compareValue;
				_mergelog << std::endl;

				_contexts[closestContext].absorb(_contexts[_currentContextPosition]);
				_contexts.erase(_contexts.begin()+_currentContextPosition);
				_currentContextPosition = closestContext;
				_currentContextID = _contexts[closestContext].getID();
			}else{
				_mergelog << _currentContextID << " merged with " << _contexts[closestContext].getID() << " with prob: " << compareValue;
				_mergelog << std::endl;

				_contexts[_currentContextPosition].absorb(_contexts[closestContext]);
				_contexts.erase(_contexts.begin()+closestContext);
			}
		}
	}

	//return the outcome of the welch T test comparing two contexts. Please note that this test can not be seen as reliable when
	//the contexts are not based on normally distributed data
	double welchTTestMax(context& context1, context& context2){
		double maxT = 0;
		for (unsigned dim = 0; dim < dimensions; ++dim){
			double meansDif = fabs(context1.getMu()[dim] - context2.getMu()[dim]);
			double t = meansDif/(sqrt((context1.getVariance()[dim]/context1.getN())+(context2.getVariance()[dim]/context2.getN())));
			if(t > maxT){
				maxT = t;
			}
		}
		return maxT;
	}

	void updateClustersBasedOnAge(){
		//update unused iterations of clusters that are not currently in use
		//PLEASE NOTE: CURRENTPOSITION MIGHT NOT UPDATE PROPERLY NOW!!!

		for (size_t i = 0; i < _contexts.size(); ++i){
			if(_contexts[i].getID()!=_currentContextID){
				//increment counter for unused clusters
				_contexts[i].setUnusedIterations(_contexts[i].getUnusedIterations()+1);
				if(_contexts[i].getUnusedIterations()>100000){
					//remove context if it has not been used for a long time
					_contexts.erase(_contexts.begin()+i);
					std::cerr << "delete!";
					//currentcontextposition now adjusted elsewhere
/*						if(i<_currentContextPosition){
						//decrement current context position in vector when a context before the current context is removed
						_currentContextPosition--;
					}*/
				}
			}else{
				//reset counter for the current cluster
				_contexts[i].setUnusedIterations(0);
			}
		}

		//the lines below are unefficient, but at least it works for sure!
		for (size_t i = 0; i < _contexts.size(); ++i){
			if(_contexts[i].getID()==_currentContextID){
				/*if(_currentContextPosition==i){
					std::cerr << "OEH YEAH!";
				}*/
				_currentContextPosition = i;
			}
		}

	}

	void printToHistoryLog(const Point& x, int contextN = -1, double bestProbability = 1){
		//do logging stuff first
		int pointsize = x.size();

		//below the table header is added when consider is called for the first time
		if(_firstIteration){
			for(int i = 0; i<pointsize ; i++){
				_historylog << "datapoint_" << i+1;
				_historylog << ",";
			}
			//PLEASE NOTE: CLUSTER_N IS CURRENTLY AN UNREALIABLE STATISTIC
			_historylog << "corresponding_cluster,cluster_n,context_count,history_size,best_similarity_value,best_merge_prob_overall"<<std::endl;
			_firstIteration = false;
		}

		//now we log the values that are fed into consider
		for(int i = 0; i<pointsize ; i++){
			_historylog << x[i];
			_historylog << ",";
		}

		_historylog << _currentContextID << "," ;
		_historylog << contextN << "," ;
/*		_historylog << _contexts[_currentContextPosition].getMu()[0] << "," ;
		_historylog << _contexts[_currentContextPosition].getSigma()[0] << "," ;
		_historylog << _contexts[_currentContextPosition].getSumSquares()[0] << "," ;*/
		_historylog << _contexts.size() << "," ;
		_historylog << _historySize << "," ;
		//_historylog << p << "," ;
		_historylog << bestProbability << "," ;
		_historylog << _bestMergeProbFound;
		_historylog << std::endl;
	}

	void updateClusterUsage(context& context){
		context.setUsages(context.getUsages()+1);
	}

	double ksTestDoubleContext(context& context)
	{
		//
		// generate array of standardised differences between points in history and context
		//
		std::vector< Point > standardDiffs;
		size_t p(0);

		//in this case there is only one, so dont loop over inputiterator
		Point diff;
		for (unsigned dim = 0; dim < dimensions; ++dim)
		{
			diff[dim] = (_contexts[_currentContextPosition].getMu()[dim] - context.getMu()[dim]) / context.getSigma()[dim];
			//std::cout<< _contexts[_currentContextPosition].getMu()[dim]<<",";
		}
		//std::cout<<std::endl;
		standardDiffs.push_back(diff);


		std::set<real> sortedReducedDifferences;
		BOOST_FOREACH(Point p, standardDiffs)
		{
			//
			// transform standardDiffs to uniform distribution through standard Gaussian CDF
			//
			BOOST_FOREACH(real& diff, p)
			{
				diff = gsl_cdf_ugaussian_P(diff);
			}

			//
			// Reduce dimension by
			// first multiplying the standardised transformed differences across dimensions
			// then gamma CDF over -log of the result
			//
			real product( std::accumulate(p.begin(), p.end(), 1.0, std::multiplies<real>() ));
			sortedReducedDifferences.insert(gsl_cdf_gamma_P(-log(product), dimensions, 1));
		}

		real n( sortedReducedDifferences.size() );
		real stepSize(1/n);
		real step(0.0);
		real KSstatistic(0.0);

		//
		// KS test to compare distributions
		//
		BOOST_FOREACH(double diff, sortedReducedDifferences)
		{
			real delta1 = fabs(step - diff);
			step += stepSize;
			real delta2 = fabs(step - diff);
			KSstatistic = std::max(std::max(delta1, delta2), KSstatistic);
		}

		return KolmogorovProb(sqrt(n)* KSstatistic);
	}



	template<class InputIterator>
	double ksTestprint(InputIterator begin, InputIterator end, context& context)
	{
		//
		// generate array of standardised differences between points in history and context
		//
		std::vector< Point > standardDiffs;
		size_t p(0);
		for (InputIterator i = begin; i != end; ++i) // loop over history
		{
			Point diff;
			for (unsigned dim = 0; dim < dimensions; ++dim)
			{
				diff[dim] = ((*i)[dim] - context.getMu()[dim]) / context.getSigma()[dim];
				//std::cout<< (*i)[dim]<<",";
			}
			//std::cout<<std::endl;

			standardDiffs.push_back(diff);
		}

		std::set<real> sortedReducedDifferences;
		BOOST_FOREACH(Point p, standardDiffs)
		{
			//
			// transform standardDiffs to uniform distribution through standard Gaussian CDF
			//
			BOOST_FOREACH(real& diff, p)
			{
				diff = gsl_cdf_ugaussian_P(diff);
			}

			//
			// Reduce dimension by
			// first multiplying the standardised transformed differences across dimensions
			// then gamma CDF over -log of the result
			//
			real product( std::accumulate(p.begin(), p.end(), 1.0, std::multiplies<real>() ));
			sortedReducedDifferences.insert(gsl_cdf_gamma_P(-log(product), dimensions, 1));
		}

		real n( sortedReducedDifferences.size() );
		real stepSize(1/n);
		real step(0.0);
		real KSstatistic(0.0);

		//
		// KS test to compare distributions
		//
		BOOST_FOREACH(double diff, sortedReducedDifferences)
		{
			real delta1 = fabs(step - diff);
			step += stepSize;
			real delta2 = fabs(step - diff);
			KSstatistic = std::max(std::max(delta1, delta2), KSstatistic);
		}

		return KolmogorovProb(sqrt(n)* KSstatistic);
	}

	/**
	 * Adapted from  http://root.cern.ch/root/html/src/TMath.cxx.html#RDBIQ
	 *
	 * Calculates the Kolmogorov distribution function
	 * which gives the probability that Kolmogorov's test statistic will exceed
	 * the value z assuming the null hypothesis. This gives a very powerful
	 * test for comparing two one-dimensional distributions.
	 *  see, for example, Eadie et al, "statistocal Methods in Experimental
	 *  Physics', pp 269-270).
	 *
	 *  This function returns the confidence level for the null hypothesis, where:
	 *   z = dn*sqrt(n), and
	 *   dn  is the maximum deviation between a hypothetical distribution
	 *       function and an experimental distribution with
	 *   n    events
	 *
	 *  NOTE: To compare two experimental distributions with m and n events,
	 *         use z = sqrt(m*n/(m+n))*dn�
	 *
	 * Accuracy: The function is far too accurate for any imaginable application.
	 * Probabilities less than 10^-15 are returned as zero. However, remember that
	 * the formula is only valid for "large" n.
	 *
	 * Theta function inversion formula is used for z <= 1
	 * This function was translated by Rene Brun from PROBKL in CERNLIB.
	 */
	double KolmogorovProb(double z)
	{
		//the if loop below is a workaround to the problem of exactly equal distributions not being found equal
		if(isnan(z)){
			return 1;
		}

		double fj[4] =
		{ -2, -8, -18, -32 }, r[4];
		const double w = 2.50662827;
		// c1 - -pi**2/8, c2 = 9*c1, c3 = 25*c1
		const double c1 = -1.2337005501361697;
		const double c2 = -11.103304951225528;
		const double c3 = -30.842513753404244;

		double u = fabs(z);
		double p;
		if (u < 0.2)
		{
			p = 1;
		}
		else if (u < 0.755)
		{
			double v = 1. / (u * u);
			p = 1 - w * (exp(c1 * v) + exp(c2 * v) + exp(c3 * v)) / u;
		}
		else if (u < 6.8116)
		{
			r[1] = 0;
			r[2] = 0;
			r[3] = 0;
			double v = u * u;
			int maxj = std::max(1, nInt(3. / u));
			for (int j = 0; j < maxj; j++)
			{
				r[j] = exp(fj[j] * v);
			}
			p = 2* (r [0] - r[1] +r[2] - r[3]);
		}
		else
		{
			//currently gets here all the time
			p = 0;
		}
		return p;
	}

	/**
	 * Adapted from  http://root.cern.ch/root/html/src/TMath.cxx.
	 *
	 * Round to nearest integer. Rounds half integers to the nearest
	 * even integer.
	 */
	int nInt(double x)
	{
	   int i;
	   if (x >= 0) {
	      i = int(x + 0.5);
	      if (x + 0.5 == double(i) && i & 1) i--;
	   } else {
	      i = int(x - 0.5);
	      if (x - 0.5 == double(i) && i & 1) i++;

	   }
	   return i;
	}

	//get the position (in the context vector) of the context closest to the given context given averaged differences in dimension means
	//mode == 1, get the closest context based on AVERAGE difference in means per dimension
	//mode == 2, get the closest context based on MAXIMUM difference in means per dimension
	int getClostestContextPosition(context& context, int mode){
		//return 0 if there are (almost) no contexts, in order to make sure things keep working
		if(_contexts.size()<=1)
			return 0;

		//initialize
		int positionLowestAvgDifference = -1;
		int positionLowestMaxDifference = -1;
		double lowestAvgDifference = 1;
		double lowestMaxDifference = 1;

		for (size_t i = 0; i < _contexts.size(); ++i){
			if(_contexts[i].getID()!=context.getID()){
				if(averageDistanceMeans(_contexts[i],context)<lowestAvgDifference) //if average distance is smaller than smallest found before
					lowestAvgDifference = averageDistanceMeans(_contexts[i],context);
					positionLowestAvgDifference = i;
				if(maximumDistanceMeans(_contexts[i],context)<lowestMaxDifference) //if maximum distance is smaller than smallest found before
					lowestMaxDifference = maximumDistanceMeans(_contexts[i],context);
					positionLowestMaxDifference = i;
			}
		}

		if(mode == 1){ //return position in context array based on minimal average distance in means
			return positionLowestAvgDifference;
		}else if (mode == 2) //return position in context array based on minimal maximum distance in means
			return positionLowestMaxDifference;
		else //no proper mode specified, return 0
			return 0;
	}

	//returns the average difference in means between 2 contexts (averaged over the dimensions). Assume sensors inputs between 0 and 1 for usability
	double averageDistanceMeans(context& context1, context& context2){
		double totalDifference = 0;
		for (unsigned dim = 0; dim < dimensions; ++dim){
			totalDifference += fabs(context1.getMu()[dim]-context2.getMu()[dim]);
		}
		double averagedDifference = totalDifference / (double)dimensions;
		return averagedDifference;
	}

	//returns the maximum difference in means between 2 contexts (compares the means of each dimension). Assume sensors inputs between 0 and 1 for usability
	double maximumDistanceMeans(context& context1, context& context2){
		double maxDifference = 0;
		for (unsigned dim = 0; dim < dimensions; ++dim){
			double currentDifference = fabs(context1.getMu()[dim]-context2.getMu()[dim]);
			if(currentDifference>maxDifference){
				maxDifference = currentDifference;
			}
		}
		return maxDifference;
	}

	//returns the average difference in sigmas between 2 contexts (averaged over the dimensions). Assume sensors inputs between 0 and 1 for usability
	double averageDistanceSigma(context& context1, context& context2){
		double totalDifference = 0;
		for (unsigned dim = 0; dim < dimensions; ++dim){
			totalDifference += fabs(context1.getSigma()[dim]-context2.getSigma()[dim]);
		}
		double averagedDifference = totalDifference / (double)dimensions;
		return averagedDifference;
	}

	//returns the maximum difference in sigmas (standard deviation) between 2 contexts (compares the sigmas of each dimension). Assume sensors inputs between 0 and 1 for usability
	double maximumDistanceSigma(context& context1, context& context2){
		double maxDifference = 0;
		for (unsigned dim = 0; dim < dimensions; ++dim){
			double currentDifference = fabs(context1.getSigma()[dim]-context2.getSigma()[dim]);
			if(currentDifference>maxDifference){
				maxDifference = currentDifference;
			}
		}
		return maxDifference;
	}
};

//SLIGHTLY OUTDATED VERSION, NOT CURRENTLY USED. IS REPLACED BY KSTESTPRINT, WHICH ONLY HAS ANOTHER NAME BECAUSE IT WAS USED FOR SOME PRINTING IN THE PAST
/*	template<class InputIterator>
double ksTest(InputIterator begin, InputIterator end, context& context)
{
	//
	// generate array of standardised differences between points in history and context
	//
	std::vector< Point > standardDiffs;
	size_t p(0);
	for (InputIterator i = begin; i != end; ++i) // loop over history
	{
		Point diff;
		for (unsigned dim = 0; dim < dimensions; ++dim)
		{
			diff[dim] = ((*i)[dim] - context.getMu()[dim]) / context.getSigma()[dim];
		}

		standardDiffs.push_back(diff);
	}

	std::set<real> sortedReducedDifferences;
	BOOST_FOREACH(Point p, standardDiffs)
	{
		//
		// transform standardDiffs to uniform distribution through standard Gaussian CDF
		//
		BOOST_FOREACH(real& diff, p)
		{
			diff = gsl_cdf_ugaussian_P(diff);
		}

		//
		// Reduce dimension by
		// first multiplying the standardised transformed differences across dimensions
		// then gamma CDF over -log of the result
		//
		real product( std::accumulate(p.begin(), p.end(), 1.0, std::multiplies<real>() ));
		sortedReducedDifferences.insert(gsl_cdf_gamma_P(-log(product), dimensions, 1));
	}

	real n( sortedReducedDifferences.size() );
	real stepSize(1/n);
	real step(0.0);
	real KSstatistic(0.0);

	//
	// KS test to compare distributions
	//
	BOOST_FOREACH(double diff, sortedReducedDifferences)
	{
		real delta1 = fabs(step - diff);
		step += stepSize;
		real delta2 = fabs(step - diff);
		KSstatistic = std::max(std::max(delta1, delta2), KSstatistic);
	}

	return KolmogorovProb(sqrt(n)* KSstatistic);
}*/

}

#endif /* CONTEXTMANAGER_H_ */
