#include "CMA.h"
#include "Model.h"
#include <cmath>
#include <limits>
#include "../FormatString.h"
#include <limits>
#include "../Exceptions.h"
#include <stdlib.h>

using namespace std;

template <class T>
inline T sqr(T x) {
	return x * x;
}

inline float getRandom() {	
    return rand() / (RAND_MAX + 1.0f);
}

void fill(float *v, size_t count, float value) {
	for (size_t i = 0; i < count; i++) {
		v[i] = value;
	}
}

void scale(float factor, float *v, size_t count) {
	for (size_t i = 0; i < count; i++) {
		v[i] *= factor;
	}
}

void saxpy(size_t n, float a, float *x, float *y) {
	for (size_t i = 0; i < n; i++) {
		y[i] += a * x[i];
	}
}

template <typename T>
inline T clamp(T x, T minVal, T maxVal) {
	return min(max(minVal, x), maxVal);
}

int learn::CMA::suggestLambda(int parameterCount) {
	int lambda = unsigned(4 + floor(3 * log((double)parameterCount)));
	return clamp(lambda, 5, parameterCount);
}

int learn::CMA::suggestMu(int lambda, RecombType recomb) {
	if (recomb == equal) return  unsigned(floor(lambda / 4.));
	return int(floor(lambda / 2.0f));
}

void learn::CMA::init(int numParameters, float dispersion, float _sigma,
		Individual *population, size_t populationSize,
		RecombType recombination, 
		UpdateType update) 
{
	size_t mu = populationSize;
	
	n = numParameters;
	sigma = _sigma;
		
	w.resize(mu);	
	x.resize(n);
	xPrime.resize(n);
	z.resize(n);
	pc.resize(n, 0);
	ps.resize(n, 0);
	Z.resize(n * n);	
	B.resize(n * n);
	lambda.resize(n);
	theVector.resize(n);
	randomMean.resize(n);
	
	float sumW = 0;
	float sumSqrW = 0;
	for (size_t i = 0; i < mu; i++) {
		switch (recombination) {
		case equal: w[i] = 1; break;
		case linear: w[i] = mu - i; break;
		case superlinear: w[i] = log(mu + 1.0f) - log(1.0f + i); break;
		}
		sumW += w[i];
		sumSqrW += sqr(w[i]);
	}
		
	// normalize weights
	scale(1 / sumW, &w[0], w.size());
	
	sumSqrW /= sqr(sumW);
	mueff = 1 / sumSqrW;
	
	// step size control
	cs = (mueff + 2) / (n + mueff + 3);
	d = 1 + 2 * max(0.0f, sqrt((mueff - 1) / (n + 1)) - 1) + cs;
	
	// covariance matrix adaptation
	mucov = (update == rankone) ? 1 : mueff;
	cc = 4 / (4 + n);
	ccov = 1.0 / mucov * 2 / sqr(n + sqrt(2))
		+ (1 - 1 / mucov) * min(1.0f, (2 * mueff - 1) / (sqr(n + 2) + mueff));

	ccu = sqrt((2 - cc) * cc);
	csu = sqrt((2 - cs) * cs);
	chi_n = sqrt(double(n)) * (1 - 1.0 / (4 * n) +  1 / (21 * sqr(double(n))));
	
	getCenterOfGravity(&x[0], &population[0], populationSize);
	
	// set matrix C to be identity
	C.resize(n * n, 0);
	for (int i = 0; i < n; i++) {
		C[i * n + i] = dispersion;
	}
	
	getEigens(&C[0], &B[0], &lambda[0]);
}

void learn::CMA::getCenterOfGravity(float *cog, Individual *individuals, size_t populationSize) {
	if (populationSize == 0) return;	
	fill(cog, n, 0);
	for (int i = 0; i < populationSize; i++) {		
		saxpy(n, w[i], individuals[i].parameters, cog);
	}
}

void learn::CMA::updateStrategyParameters(learn::Individual *population, size_t populationSize, float lowerBound) {
	if (populationSize == 0) return; 
	getCenterOfGravity(&xPrime[0], population, populationSize);
		
	fill(&theVector[0], theVector.size(), 0);
	// TODO lapack routine? theVector += B * meanz;
	for (int i = 0; i < n; i++) {
		for (int j = 0; j < n; j++) {
			theVector[i] += B[i * n + j] * randomMean[j];
		}
	}
	
	// Eq. (2) & Eq. (4)
	float normPS = 0;
	for (int i = 0; i < n; i++) {
		pc[i] = (1 - cc) * pc[i] + ccu * sqrt(mueff) / sigma * (xPrime[i] - x[i]);
		ps[i] = (1 - cs) * ps[i] + csu * sqrt(mueff) * theVector[i];
		normPS += sqr(ps[i]);
	}
	normPS  = sqrt(normPS);
	
	// Eq. (3)	
	fill(&Z[0], n * n, 0);	
	for (int k = 0; k < populationSize; k++) {
		float *params = population[k].parameters;
		for (int i = 0; i < n; i++) {
			for (int j = 0; j < n; j++) {
				Z[i * n + j] += w[k] * (params[i] - x[i]) * (params[j] - x[j]);
			}
		}
	}
	
	for (int i = 0; i < n; i++) {
		for (int j = 0; j < n; j++) {
			C[i * n + j] = (1 - ccov) * C[i * n + j] + ccov *
				(1.0f / mucov * pc[i] * pc[j] + (1 - 1.0f / mucov) * 1.0f / sqr(sigma) * Z[i * n + j]);
		}
	}
	
	// Eq. (5)
	sigma *= exp((cs / d) * (normPS / chi_n - 1));

	// find eigenvalues / eigenvectors for C
	getEigens(&C[0], &B[0], &lambda[0]);

	// lower bound
	sigma = max(sigma, lowerBound / sqrt(abs(lambda[n - 1])));
	
	// new COG becomes old COG
	x = xPrime;
	fill(&randomMean[0], n, 0);
}

void learn::CMA::getEigens(float *matrix, float *outEigenvectors, float *outEigenvalues) {
	
	if (ifail.size() < n) {
		allocateMemoryForEigens();
	}
		
	int info = 0;
	int numEigens;
	vector<int> ifail(n);
	int lwork = work.size();
	int intWorkSize = n * 5;
	vector<int> iwork(intWorkSize);
	float tolerance = numeric_limits<float>::epsilon();

	lapack.ssyevx("Vectors", "All", "L", &n, &matrix[0], &n,
			NULL, NULL, NULL, NULL, &tolerance,
			&numEigens,  outEigenvalues, outEigenvectors, &n, 
			&work[0], &lwork, &iwork[0], &ifail[0], &info);
	if (info < 0) {
		throw ArgumentException(FormatString() << "Lapack.ssyevx returned " << info);
	}
}

void learn::CMA::allocateMemoryForEigens() {
	// determine work size
	int lwork = -1;
	int info = 0;
	float workLength;
	int tempInt = 1;
	float tempFloat;
	lapack.ssyevx("Vectors", "All", "L", &n, &tempFloat, &n,
			NULL, NULL, NULL, NULL, NULL, NULL, &tempFloat, &tempFloat, &n, 
			&workLength, &lwork, NULL, &tempInt, &info);
	lwork = (int)workLength;
	
	ifail.resize(n);
	work.resize(lwork);
	iwork.resize(n * 5);
}
	
// TODO rename to create random vector
void learn::CMA::createIndividual(float *outParams) {
	vector<float> rnd(n);
	for (int i = 0; i < n; i++) {		
		rnd[i] = learn::normalRandom.next();
		randomMean[i] += rnd[i];
	}
	
	for (int i = 0; i < n; i++) {
		outParams[i] = x[i];
		for (int j = 0; j < n; j++) {
			outParams[i] += sigma * B[i * n + j] * sqrt(abs(lambda[j])) * rnd[j];
		}
	}
}

learn::CMA::~CMA() {
}

learn::NormalRandom::NormalRandom() : current(0), sx(11), sy(13), sz(15) {}
	
void learn::NormalRandom::seed(long s) {
	sx = (s         & 0xff) + 1;
	sy = ((s >>  8) & 0xff) + 10000;
	sz = ((s >> 16) & 0xffff) + 3000;
}
	
float learn::NormalRandom::next() {
	if (current == 0) {
		getNormalRandom(values[0], values[1]);
		current = 2;
	}
	return values[--current];
}
	
float learn::NormalRandom::next(float mean, float stdDeviation) {
	return mean + next() * stdDeviation;
}
	
void learn::NormalRandom::getNormalRandom(float &outRandom1, float &outRandom2) {
	float x1, x2, radiusSq;
	do {
		x1 = 2 * (float)nextDouble() - 1;
		x2 = 2 * (float)nextDouble() - 1;
		radiusSq = x1 * x1 + x2 * x2;
	} while (radiusSq >= 1 || radiusSq == 0);

	float w = sqrt(-2 * log(radiusSq) / radiusSq);
	outRandom1 = w * x1;
	outRandom2 = w * x2;
}

double learn::NormalRandom::nextDouble() {
	double rn;
	// the three congruential generators
	sx = (unsigned)(sx * 171UL % 30269UL);
	sy = (unsigned)(sy * 172UL % 30307UL);
	sz = (unsigned)(sz * 170UL % 30323UL);

	rn = sx / 30269.0 + sy / 30307.0 + sz / 30323.0;

	return rn - (unsigned)rn;
}

