//+------------------------------------------------------------------+
//|                                                         CoSyNE.h |
//|	Cooperative Synapse Neuroevolution										|
//|	Genetic optimization of neural nets and more							|
//+------------------------------------------------------------------+
#pragma once
#include "Mem2D.h"
#include "RNG.h"
#include "Index.h"

//+==================================================================+
class CoSyNE
{
public:
//////////////////////////////////////////////////////////////////////
	CoSyNE(
		int	weights,							// number of parameters to be coevolved
		int	cache,							// size of the cache for each sample
		int	pop								// (maximal) sample population size
		);

	~CoSyNE();

//+------------------------------------------------------------------+
//	Indices for the strategy parameters at the end of each samples
//	weight array (from offset "w").
//	static const int StrategyWeights = 8;
	enum StrategyWeight
	{
		InitGamma = 0,
		InitAlpha,
		MutateGamma,							// Levy scale
		MutateAlpha,							// Levy exponent (1 = cauchy, 2 = gaussian)
		MutateProb,								// probability of each weight to be mutated
		RecombProb,								// probability of each cell and node to crossover (should be renamed to "CrossoverProb")
		Reserved,
		Fitness,									// Can be used to store reference fitness (e.g., calculated on the whole dataset)
		StrategyWeights						// Number of strategy weights
	};

//+------------------------------------------------------------------+
	float			generatorVariables[StrategyWeights];
	void	adjustGeneratorVariables(int sample, float factor = 0.01f);

//+------------------------------------------------------------------+
//	The cache can be used for anything, including but not limited to:
//	inputs, intermediate-, final- and previous outputs, internal state
	float			*sampleGetCache	(int sample)							{	return &cache(StrategyWeights,sample);	}
	const float	*sampleGetCache	(int sample)	const					{	return &cache(StrategyWeights,sample);	}
	void			 sampleClearCache	(int sample)							{	memset(&cache(StrategyWeights,sample), 0, (cache.m()-StrategyWeights)*sizeof(float));	}

//+------------------------------------------------------------------+
	float strategyWeights			(int sample, StrategyWeight n = InitGamma) const	{	return cache(n,sample);	}
	float&strategyWeights			(int sample, StrategyWeight n = InitGamma)			{	return cache(n,sample);	}

//+------------------------------------------------------------------+
	void	strategyWeightAverage(StrategyWeight n, float x);
	float	strategyWeightAverage(StrategyWeight n) const;

//+------------------------------------------------------------------+
//	Random initialization guided by generator variables
	virtual	void	randomizeSample				(int sample);
				void	randomizePop();

//+------------------------------------------------------------------+
//	Calculates standard deviation of each weight, used for mutation
				void	updateMutationFactors		(int lambda);
	const   float	 *getMuatationFactors() const		{	return mutateFactor;	}

//+------------------------------------------------------------------+
//	Clone the sample, including cache etc.
				void	sampleClone						(int sample, int dst);
				void	sampleMutateStrategyWeights(int j, bool inclInit = false);
	virtual	void	sampleMutateWeights			(int sample);
				void	sampleMutateWeightsWeighted(int sample);

//+------------------------------------------------------------------+
//	Recombination of two samples by double crossover
//	Subclasses probably want to overwrite this method
	virtual	void	samplesRecombine				(int sample1, int sample2, int offspring1, int offspring2);

//+------------------------------------------------------------------+
//	Probabilistic permutation for coevolution of the first lambda samples in the 'ranking'
	void	samplesPermute		(int lambda, double permuteProb = 1.0);
	void	samplesPermute		(int lambda, double permuteProb, int startWeight, int stopWeight);

//+------------------------------------------------------------------+
	const	Mem2D<float>	&getPopulation() const		{	return P;				}
			Mem2D<float>	&getPopulation()				{	return P;				}
			Mem2D<float>	&getCache()						{	return cache;			}
	const	Mem2D<float>	&getFitness() const			{	return fitness;		}
			Mem2D<float>	&getFitness()					{	return fitness;		}
	const	Index				&getRanking() const			{	return ranking;		}
			Index				&getRanking()					{	return ranking;		}
			int				weights() const				{	return w;				}
			int				getPopulationSize() const	{	return pop;				}
			void				setPopulationSize(int n)	{	pop = min(n,P.n());	}

//+------------------------------------------------------------------+
//	Indexes the samples through sorting their fitness values
	void	rankFitness(bool descending = false);
//+------------------------------------------------------------------+
//	Indexes the samples through sorting the provided fitness values
	template<class T>
	void	rankFitness(T *fitness, bool descending = false);

	mutable RNG			rng;					//	our random number generator

protected:
//+------------------------------------------------------------------+
	const int			w;						// total number of weights to be optimized by coevolution == P.m()
	int					pop;					// active population; might be smaller than P.n()

//+------------------------------------------------------------------+
//	all the weights + strategy parameters for self adaption + cache
//	for all samples plus reserve space, e.g. for children, dead samples or champions
//	Having everything in one place should increase cache friendliness where it matters most: in the (custom) fitness evaluation function
	Mem2D<float>		P;						// size (w,pop): sample population
	Mem2D<float>		cache;				// size (StrategyWeights+cache,pop): strategy parameter and cache for every sample
	Mem2D<float>		fitness;				// size pop: fitness of each sample and maybe some additional specific stats, e.g. profit factor, max drawdown, etc.
	Index					ranking;				// size pop: fitness ranking
	Mem2D<int>			permuteIndex;		//	size pop: used to create a probabilistic permutation for coevolution
	Mem2D<float>		mutateFactor;		//	size w:   standard deviation of each weight: higher standard deviation means larger and/or less informative -> stronger mutation gamma

};//class CoSyNE


//+==================================================================+
//| storage and references for transposing during recombination		|
//+==================================================================+
template<class T>
class TransposeCrossover
{
public:
//+------------------------------------------------------------------+
	TransposeCrossover(int maxSize)
	{
	// The first matrix allocates twice the space,
	//	The second matrix uses the 2nd half
		tmp[0].resize(2*maxSize);
		tmp[1].reference(&tmp[0][maxSize]);
		dst[0] = &tmp[2];
		dst[1] = &tmp[3];
		prob = 0;
	}

//+------------------------------------------------------------------+
//	Do the probabilistic transposed crossover
	void cross(const T *src1, const T *src2, T *dst1, T *dst2, int m, int n)
	{
	//----
		tmp[0].assumeSize(n,m);				// prepare dimension of our temp storage, for unpacking by transposing
		tmp[1].assumeSize(n,m);
		tmp[2].reference(dst1,m,n);		// reference the packed destination
		tmp[3].reference(dst2,m,n);
		tmp[0].loadTranspose(src1);		// unpack the weights
		tmp[1].loadTranspose(src2);

	//	possible cross of each row
		for(int j = 0; j < m; j++)
		{
			if(rng.rand() < prob)
				swp(dst[0],dst[1]);

		//----
		//	repack the weights
			dst[0]->loadTranspose(tmp[0],j);
			dst[1]->loadTranspose(tmp[1],j);
		}
	}

//+------------------------------------------------------------------+
//	crossover probability
	double	 prob;

private:
//+------------------------------------------------------------------+
//	our temporary storage and references of the packed destination
	Mem2D<T> tmp[4],
				*dst[2];							// &tmp[2] and &tmp[3], these pointers will be swapped on each crossover
	RNG rng;
};




//-----------------------------------------------------------------------------
//	clone sample j
inline void CoSyNE::sampleClone(int j, int dst)
{
	memcpy(    &P(0,dst),     &P(0,j),     P.m()*sizeof(float));
	memcpy(&cache(0,dst), &cache(0,j), cache.m()*sizeof(float));
}


//-----------------------------------------------------------------------------
inline void CoSyNE::adjustGeneratorVariables(int j, float factor)
{
	__m128	f0 = _mm_set1_ps(factor),
				f1 = _mm_set1_ps(1.0f-factor),
				*src = (__m128*)&cache[0],
				*dst = (__m128*)&generatorVariables[0];

	dst[0] = f1*dst[0] + f0*src[0];
	dst[1] = f1*dst[1] + f0*src[1];
}

//-----------------------------------------------------------------------------
inline void CoSyNE::rankFitness(bool desc)
{
	ranking.index(&fitness[0],pop,desc);
}

//-----------------------------------------------------------------------------
template<class T>
inline void CoSyNE::rankFitness(T *f, bool desc)
{
	ranking.index(f,pop,desc);
}

