#include <vector>
#include <memory>
#include <cmath>

#include "MultilayerPerceptron.h"
#include "../Exceptions.h"
#include "../clUtils.h"
#include <stdlib.h>

#include <memory>
#include <string.h>

using namespace std;
using namespace learn;

const float Neuron::MAX_INPUT = 150;
static int one = 1;

inline int ceil4(int n) {
	n += 3;
	return n - (n & 3);
}

inline float getRandom() {	
    return rand() / (RAND_MAX + 1.0f);
}

inline float getRandom(float low, float high) {
    return low + (high - low) * getRandom();
}

inline float getRandomWeight() {
	return getRandom(-0.1f, 0.1f);
}

MultilayerPerceptron::Builder& MultilayerPerceptron::Builder::setInputCount(int count) {
	if (count <= 0) throw ArgumentOutOfRangeException();
	inputCount = count;
	return *this;
}

MultilayerPerceptron::Builder& MultilayerPerceptron::Builder::addHiddenLayer(int neuronCount) {
	if (neuronCount <= 0) throw ArgumentOutOfRangeException();
	layerSizes.push_back(neuronCount);
	return *this;
}

MultilayerPerceptron* MultilayerPerceptron::Builder::create(cl_command_queue queue) {
	if (inputCount == 0) throw ArgumentException("Perceptron input count not specified");
	if (layerSizes.size() == 0) throw ArgumentException("Add layers to Perceptron");
	
	auto_ptr<MultilayerPerceptron> result(new MultilayerPerceptron(queue));
	result->inputCount = inputCount;
	
	// calculate input count
	int totalInputs = inputCount;
	for (auto it = layerSizes.begin(); it != layerSizes.end(); ++it) {
		totalInputs = totalInputs + 1 + *it; // ceil4(totalInputs + 1) + *it;
	}
	result->data = ClMem<float>(queue, totalInputs);
	
	// fill multilayer perceptron topology
	int weightIndex = 0;
	int inputIndex = inputCount;
        
	for (int i = 0, layerStart = 0; i < layerSizes.size(); i++) {

		inputIndex = inputIndex + 1; // ceil4(inputIndex + 1);

		int layerInputCount = (i == 0) ? inputCount + 1 : layerSizes[i - 1] + 1;
		int layerLength = layerSizes[i];

		// set layer
		Layer layer;
		layer.neuronStartIndex = result->hostNeurons.size();
		layer.neuronEndIndex = layer.neuronStartIndex + layerLength;
		result->layers.push_back(layer);		

		// fill neurons for this layer
		for (int j = 0; j < layerLength; j++) {
			Neuron neuron;
			neuron.weightStartIndex = weightIndex;
			neuron.inputStartIndex = layerStart;
			neuron.weightCount = layerInputCount;
			neuron.outputIndex = inputIndex++;			
			result->hostNeurons.push_back(neuron);

			weightIndex += layerInputCount; //ceil4(layerInputCount);
		}

		layerStart += layerInputCount; //ceil4(layerInputCount);
	}
	
	size_t neuronCount = result->hostNeurons.size();
	result->weights = ClMem<float>(queue, weightIndex);    
	result->neuronSumms = ClMem<float>(queue, neuronCount);    
    result->deviceNeurons = ClMem<Neuron>(queue, neuronCount);
    result->deviceNeurons.beginWrite(&result->hostNeurons[0], 0, neuronCount).wait();
	    
    // send offset neurons to OpenCL
    vector<float> sendData(totalInputs);    
    for (int i = 0, layerStart = 0; i < layerSizes.size(); i++) {
        int layerInputCount = (i == 0) ? inputCount + 1 : layerSizes[i - 1] + 1;
        sendData[layerStart + layerInputCount - 1] = 1; // set offset input
        layerStart += layerInputCount;
    }
    result->data.beginWrite(&sendData[0], 0, totalInputs).wait();
	
	return result.release();
}

MultilayerPerceptron::MultilayerPerceptron(cl_command_queue _queue) 
    : queue(_queue)
{
    cl_context context = getContext(queue);    
    cl_device_id deviceId = getDeviceId(queue);    
    cl_program program = buildProgramFromFile(context, deviceId, "perceptron.cl");
    
    cl_int errorCode;
	
    activateNeuron = clCreateKernel(program, "activateNeuron", &errorCode);
	CL_CHECK(errorCode);
		
	addError = clCreateKernel(program, "addError", &errorCode);
	CL_CHECK(errorCode);
	
	scale = clCreateKernel(program, "scale", &errorCode);
	CL_CHECK(errorCode);
	
	scaleErrorByDerivative = clCreateKernel(program, "scaleErrorByDerivative", &errorCode);
	CL_CHECK(errorCode);
	
	calculateErrorGradient = clCreateKernel(program, "calculateErrorGradient", &errorCode);
	CL_CHECK(errorCode);
}
	
MultilayerPerceptron::~MultilayerPerceptron() {	
}
	
int MultilayerPerceptron::getParameterCount() {
	return weights.count();
}
	
int MultilayerPerceptron::getInputCount() {
	return inputCount;
}
	
int MultilayerPerceptron::getOutputCount() {
	Layer &l = layers.back();
	return (l.neuronEndIndex - l.neuronStartIndex);
}
	
void MultilayerPerceptron::setParameters(float *values, int offset, int count) {
    weights.beginWrite(values, offset, count).wait();
}
	
void MultilayerPerceptron::getParameters(float *outValues, int offset, int count) {
    weights.beginRead(outValues, offset, count).wait();
}

float dot(float *v1, float *v2, int count) {
	float sum = 0;
	for (int i = 0; i < count; i++) {
		sum += v1[i] * v2[i];
	}
	return sum;
}

float activationFunction(float x) {
	return 1 / (1 + exp(-x));
}

float activationDerivative(float func) {
	return func * (1 - func);
}

template <typename T>
inline T clamp(T x, T minVal, T maxVal) {
	return min(max(minVal, x), maxVal);
}

template <typename T>
void set(T* array, T value, int count) {
	for (int i = 0; i < count; i++) {
		array[i] = value;
	}
}

ClEvent MultilayerPerceptron::activateLayerAsync(Layer layer) {
    BindKernelArgs(activateNeuron) 
            << deviceNeurons << layer.neuronStartIndex
            << data << weights << neuronSumms;

    size_t count = layer.neuronEndIndex - layer.neuronStartIndex;
    cl_event event;
	CL_CHECK(clEnqueueNDRangeKernel(queue,
			activateNeuron, 1, NULL,
			&count, NULL,
			0, NULL, &event));
	return ClEvent(event);
}

ClEvent MultilayerPerceptron::backpropagateErrorAsync(
	Layer prevLayer, int neuronIndex, ClMem<float> errors) 
{
	size_t count = prevLayer.neuronEndIndex - prevLayer.neuronStartIndex;
	size_t weightOffset = hostNeurons[neuronIndex].weightStartIndex;
	size_t errorOffset = prevLayer.neuronStartIndex;
	
	BindKernelArgs(addError) << errors << neuronIndex
		<< weights << weightOffset
		<< errors << errorOffset;
	
	cl_event event;
	CL_CHECK(clEnqueueNDRangeKernel(queue,
			addError, 1, NULL,
			&count, NULL,
			0, NULL, &event));
	return ClEvent(event);
}

ClEvent MultilayerPerceptron::scaleAsync(float factor, ClMem<float> v, size_t offsetV, size_t count) {
	BindKernelArgs(scale) << factor << v << offsetV;
	
	cl_event event;
	CL_CHECK(clEnqueueNDRangeKernel(queue,
			scale, 1, NULL,
			&count, NULL,
			0, NULL, &event));
	return ClEvent(event);
}

ClEvent MultilayerPerceptron::scaleErrorsByDerivativeAsync(Layer layer, ClMem<float> errors) {
	BindKernelArgs(scaleErrorByDerivative) << deviceNeurons << layer.neuronStartIndex
			<< errors << data;
	
	size_t count = layer.neuronEndIndex - layer.neuronStartIndex;
	cl_event event;
	CL_CHECK(clEnqueueNDRangeKernel(queue,
			scaleErrorByDerivative, 1, NULL,
			&count, NULL,
			0, NULL, &event));
	return ClEvent(event);
}

ClEvent MultilayerPerceptron::calculateErrorGradientAsync(int neuronIndex, 
		ClMem<float> errors, ClMem<float> derivatives) 
{
	BindKernelArgs(calculateErrorGradient) 
			<< deviceNeurons << neuronIndex 
			<< errors << data << derivatives;
	
	size_t count = hostNeurons[neuronIndex].weightCount;
	cl_event event;
	CL_CHECK(clEnqueueNDRangeKernel(queue,
			calculateErrorGradient, 1, NULL,
			&count, NULL,
			0, NULL, &event));
	return ClEvent(event);
}

void MultilayerPerceptron::activate(float *input) {
	if (input == NULL) throw ArgumentNullException("input");
	
	data.beginWrite(input, 0, inputCount).wait();    
    	
	for (auto layer = layers.begin(); layer != layers.end(); ++layer) {        
        activateLayerAsync(*layer).wait();
	}
}

void MultilayerPerceptron::run(float *input, float *output) {
	if (output == NULL) throw ArgumentNullException("output");
	
	activate(input);	
	
	int outputCount = getOutputCount();
    int offset = data.count() - outputCount;
    data.beginRead(output, offset, outputCount).wait();
}
	
void MultilayerPerceptron::getDerivative(float *input, float *outDerivative) {
	throw NotImplementedException();
}

float MultilayerPerceptron::getError(Model &model, OptimizationSupervisor &dataProvider) {
	
	int dataSetSize = dataProvider.getCount();
	int outputCount = getOutputCount();
	vector<float> input(dataProvider.getInputLength());
	vector<float> target(outputCount);
	vector<float> output(outputCount);
	float totalErrorSq = 0;
	
	for (int index = 0; index < dataSetSize; index++) {
		
		dataProvider.getInput(index, &input[0]);
		dataProvider.getOutput(index, &target[0]);		
		run(&input[0], &output[0]);
				
		for (int j = 0; j < outputCount; j++) {			
			float neuronDiff = target[j] - output[j];			
			totalErrorSq += neuronDiff * neuronDiff;
		}
	}
	
	return totalErrorSq / max(dataSetSize * outputCount, 1);
}

float MultilayerPerceptron::getErrorDerivative(Model& model, OptimizationSupervisor &dataProvider, float *outDerivative) {
	if (outDerivative == NULL) throw ArgumentNullException("outDerivative");
	if (dataProvider.getInputLength() != getInputCount()) {
		throw ArgumentException("dataProvider.getInputLength() should be equal to input count");
	}
	if (dataProvider.getTargetLength() != getOutputCount()) {
		throw ArgumentException("dataProvider.getOutputLength() should be equal to output count");
	}
	  
	int dataSetSize = dataProvider.getSampleCount();
	int outputCount = getOutputCount();
	vector<float> input(dataProvider.getInputLength());
	vector<float> target(outputCount);
	vector<float> output(outputCount);
	int numNeurons = deviceNeurons.count();
	ClMem<float> errors(queue, numNeurons);	
	float totalErrorSq = 0;
	
	size_t parameterCount = getParameterCount();
	ClMem<float> derivatives(queue, parameterCount, float());
	vector<ClEvent> waits(numNeurons);
	
	set(outDerivative, 0.0f, getParameterCount());
	
	for (int index = 0; index < dataSetSize; index++) {
		
		dataProvider.getInput(index, &input[0]);
		dataProvider.getTarget(index, &target[0]);		
		run(&input[0], &output[0]);
		
		// get error for last layer
		// TODO: move to separate function		
		for (int j = 0; j < outputCount; j++) {			
			float neuronDiff = target[j] - output[j];
			output[j] = 2 * neuronDiff * activationDerivative(output[j]);
			totalErrorSq += neuronDiff * neuronDiff;
		}
		
		// send error to device
		size_t numHiddenNeurons = errors.count() - outputCount;
		WAIT_ALL(
			errors.beginFill(0.0f, 0, numHiddenNeurons),
			errors.beginWrite(&output[0], numHiddenNeurons, outputCount));
						
		// TODO: move to separate function
		// backpropagate error
		for (int l = layers.size() - 1; l > 0; l--) {			
			const Layer layer = layers[l];
			const Layer prevLayer = layers[l - 1];
						
			// propagate error backwards			
			for (int j = layer.neuronStartIndex; j < layer.neuronEndIndex; j++) {				
				// prevLayerErrors[k] += error * neuronWeights[k] k=0..n-1
				backpropagateErrorAsync(prevLayer, j, errors).wait();
			}			
			scaleErrorsByDerivativeAsync(prevLayer, errors).wait();
		}
				
		// calculate error gradient
		// TODO: move to separate function
		for (int i = 0; i < numNeurons; i++) {
			waits[i] = calculateErrorGradientAsync(i, errors, derivatives);
		}
		waitAll(numNeurons, &waits[0]);
	}
		
	float factor = 1.0f / max(dataSetSize * outputCount, 1);
	scaleAsync(factor, derivatives, 0, parameterCount).wait();
	
	derivatives.beginRead(outDerivative, 0, parameterCount).wait();
		
	return totalErrorSq * factor;
}