﻿// Code for learning Restricted Boltzmann Machines
// By Mattias.Fagerlund@cortego.se
// http://sharprbm.codeplex.com/
//
// Refactored, optimized, and parallelized 
// By Christopher J. Hanson
//

// TODO: We don't allocate arrays any more, so these should be removed!!
// Can't dynamically allocate arrays, so these (<UpperCount> and <LowerCount>) are replaced from code
// Since they're allready constants, we don't send them as arguments to methods anywhere
const __constant unsigned int LOWER_COUNT = <LowerCount>;
const __constant unsigned int UPPER_COUNT = <UpperCount>;

const __constant float WEIGHT_DECAY = 0.001;

// Replaced inline with directive
#define Position(lower,upper) ( UPPER_COUNT * lower + upper )

__kernel void AccumulateErrors(
	__global float *testCase,
    __global float *detectorErrors,
    __global float *model,
    __global float *reconstructed,
    __global float *reconstructedModel,
	__global float *reconstructionErrors)
{
	int lower = get_global_id(0);
	int upper = get_global_id(1);

	float error =		
		// What the model should believe in
		testCase[lower] * model[upper] +
			
		// What the model actually believes in
		-reconstructed[lower] * reconstructedModel[upper];

	int pos = Position(lower, upper);
	detectorErrors[pos] += error;

	// Reconstructed errors only occur once lower, not once per weight, so we only
	// update it if upper == 0
	if(upper==0)
	{
		reconstructionErrors[pos] += fabs(testCase[lower] - reconstructed[lower]);
	}
}

// Parallelized
__kernel void SumErrorsDetector(
	__global float *detectorErrors,
	__global float *reconstructionErrors,
	__global float *errorSums,
	__global float *buffer)
{
	// Locals
	int lower = get_global_id(0);
	int offset = lower * UPPER_COUNT;

	// Initialize results
	errorSums[0] = 0;

	// Initialize slot
	buffer[lower] = 0;

	// Accumulate partial
	for (int i= 0; i < UPPER_COUNT; i++)
		buffer[lower] += fabs(detectorErrors[offset + i]);

	// Wait for accumulators
	barrier( CLK_GLOBAL_MEM_FENCE );

	// Sum buffer
	if( lower == 0 )
		for (int i= 0; i < LOWER_COUNT; i++)
			errorSums[0] += buffer[i];
}


// Parallelized
__kernel void SumErrorsRecon(
	__global float *detectorErrors,
	__global float *reconstructionErrors,
	__global float *errorSums,
	__global float *buffer)
{
	// Locals
	int lower = get_global_id(0);
	int offset = lower * UPPER_COUNT;

	// Initialize results
	errorSums[1] = 0;

	// Reset slot for next operation
	buffer[lower] = 0;

	// Accumulate partial
	for (int i= 0; i < UPPER_COUNT; i++)
		buffer[lower] += reconstructionErrors[offset + i];

	// Wait for accumulators again
	barrier( CLK_GLOBAL_MEM_FENCE );

	// One guy stay behind to sum buffer again
	if( lower == 0 )
		for (int i= 0; i < LOWER_COUNT; i++)
			errorSums[1] += buffer[i];
}


__kernel void ClearDetectorErrors(
	__global float *detectorError,
	__global float *reconstructionError)
{
	int lower = get_global_id(0);
	int upper = get_global_id(1);
	int pos = Position(lower, upper);
	detectorError[pos] = 0;
	reconstructionError[pos] = 0;
}


__kernel void ActivateLowerToUpperBinary_Binary(
	__global float *lowerValues, 
	__global float *upperValues, 
	__global float *weights,
	const float seed)

{
	int upper = get_global_id(0);

	if(upper==0)
	{
		upperValues[upper] = 1;
		return;
	}

    float ax = 0;

	// Accumulate
	for (int lower = 0; lower < LOWER_COUNT; lower++)
		ax += weights[Position(lower, upper)] * lowerValues[lower];

	// Seed random
    random_t random = Seed( seed + upper );
    PsuedoRandom( &random, upper );
    NextUniform( &random );

	// Activation sigmoid
	float expectation = 1.0 / ( 1.0 + exp( -ax ) );

	// Stochastic Binary 
	upperValues[upper] = random.Value <= expectation ? 1 : 0;
}

__kernel void ActivateLowerToUpperBinary_Linear(
	__global float *lowerValues, 
	__global float *upperValues, 
	__global float *weights,
	const float seed)
{
	int upper = get_global_id(0);

	if(upper==0)
	{
		upperValues[upper] = 1;
		return;
	}

    float ax = 0;

	// Accumulate
	for (int lower = 0; lower < LOWER_COUNT; lower++)
		ax += weights[Position(lower, upper)] * lowerValues[lower];

	// Activation sigmoid
	upperValues[upper] = 1.0 / ( 1.0 + exp( -ax ) );
}

__kernel void ActivateLowerToUpper_Binary(
	__global float *lowerValues, 
	__global float *upperValues, 
	__global float *weights,
	const float seed)
{
	int upper = get_global_id(0);

	if(upper==0)
	{
		upperValues[upper] = 1;
		return;
	}

    float ax = 0;


	// Seed random
    random_t random = Seed( seed + upper );
    PsuedoRandom( &random, upper );
    
	for (int lower = 0; lower < LOWER_COUNT; lower++)
	{
		NextUniform( &random );

		float activation = lowerValues[lower];

		if (random.Value <= activation)
			ax += weights[Position(lower, upper)];
	}

    upperValues[upper] = 1.0 / ( 1.0 + exp( -ax ) );
}


__kernel void ActivateLowerToUpper_Linear(
	__global float *lowerValues, 
	__global float *upperValues, 
	__global float *weights,
	const float seed)
{
	int upper = get_global_id(0);

	if(upper==0)
	{
		upperValues[upper] = 1;
		return;
	}

    float ax = 0;

	for (int lower = 0; lower < LOWER_COUNT; lower++)
		ax += weights[Position(lower, upper)] * lowerValues[lower];

    upperValues[upper] = 1.0 / ( 1.0 + exp( -ax ) );
}


__kernel void ActivateUpperToLower(
	__global float *lowerValues, 
	__global float *upperValues, 
	__global float *weights)
{
	int lower = get_global_id(0);

	if(lower==0)
	{
		lowerValues[lower] = 1;
		return;
	}

	float ax = 0;
	for (int upper = 0; upper < UPPER_COUNT; upper++)
		ax += weights[Position(lower,upper)] * upperValues[upper];

    lowerValues[lower] = 1.0 / ( 1.0 + exp( -ax ) );
}


__kernel void UpdateWeights(
	__global float *weights,
	__global float *detectorErrors, 
	__global float *epsilon)
{
	int lower = get_global_id(0);
	int upper = get_global_id(1);
	int pos = Position(lower, upper);		
	// oldWeight should possibly be the sum of all weights that belong to the node (lower or upper)?
	float oldWeight = weights[pos];
	float newWeight = oldWeight;
	
	// Correct for detector errors
	newWeight += detectorErrors[pos] * epsilon[0];

	// Weight-Decay
	newWeight -= fabs(oldWeight) * WEIGHT_DECAY * epsilon[0]; // L1
	//newWeight -= oldWeight * oldWeight * WEIGHT_DECAY * epsilon[0]; // L2

	// Sparsity
	// Try to keep decreasing the on hidden layer biases. Biases on hidden layer are connected to lower==0.
	/*if (lower==0)
	{
		if (newWeight > -4)
		{
			newWeight -= 0.1;
		}
		else
		{
			newWeight  = -4;
		}
	}*/

	// newWeight = fmax(-1.0, fmin(1.0, newWeight));

	weights[pos] = newWeight;
}
