/*

 * activation.c
 *
 *  Created on: Sep 4, 2013
 *      Author: Alan
 */
#include "activation.h"
#include "net.h"
#include "vectorizednet.h"

#include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include <string.h>

float32 logsumexp(float32 *a, size_t aSize) {
	size_t i;
	float32 expValue;
	float32 aMax = a[0];

	for (i = 1; i < aSize; i++) {
		aMax = fmax(aMax, a[i]);
	}
	expValue = .0f;
	for (i = 0; i < aSize; i++) {
		expValue += exp(a[i] - aMax);
	}

	return aMax + log(expValue);
}

float32 activationSigmoid(netdata_tp net, float32 a) {
	return 1.0f / (exp(-1.0f * a) + 1.0f);
}

float32 diffActivationSigmoid(netdata_tp net, float32 a) {
	float32 sigmoid;
	sigmoid = activationSigmoid(net, a);
	return sigmoid * (1.0f - sigmoid);
}

float32 activationTanh(netdata_tp net, float32 a) {
	return tanh(a);
}

float32 diffActivationTanh(netdata_tp net, float32 a) {
	float32 t;
	t = activationTanh(net, a);
	return 1.0f - t * t;
}

float32 activationLinear(netdata_tp net, float32 a) {
	return a;
}

float32 diffActivationLinear(netdata_tp net, float32 a) {
	return 1;
}

float32 activationSoftmax_(netdata_tp net, float32 a) {
	//ATTENZIONE POTREBBE CAUSARE OVERFLOW!!!!
	size_t i;
	float32 denominator;

	denominator = .0f;
	for (i = 0; i < net->neurons[net->layersize - 1]; i++) {
		denominator += exp(getNeuron(net, net->layersize - 1, i)->a);
	}
	return exp(a) / denominator;
}

float32 activationSoftmax(netdata_tp net, float32 a) {
	size_t j;
	float32 *aVector;
	float32 lse; //logsumexp result
	uint32 lastLayerSize;

	//create an array as argumetn for the logsumexp function
	lastLayerSize = net->neurons[net->layersize - 1];
	aVector = (float32*) malloc(sizeof(float32) * lastLayerSize);
	for (j = 0; j < lastLayerSize; j++) {
		aVector[j] = getNeuron(net, net->layersize - 1, j)->a;
//		fprintf(stderr, "aVector[%d] = %f\n", j, aVector[j]);
	}
	lse = logsumexp(aVector, lastLayerSize);
	free(aVector);
//	fprintf(stderr, "a: %f lse (not vectorial): %f\n", a, lse);
	return exp(a - lse);
}

float32 diffActivationSoftmax(netdata_tp net, float32 a) {
	float32 softmax;

	softmax = activationSoftmax(net, a);
	return (1 - softmax) * softmax;
}

//activation functions
float32* activationSigmoidV(vectorizedNet_tp vnet, float32* a, uint32 size) {
	uint32 i;
	float32 *z;

	z = malloc(sizeof(float32) * size);
	for (i = 0; i < size; i++) {
		z[i] = 1.0f / (exp(-1.0f * a[i]) + 1.0f);
	}
	return z;
}
float32* activationTanhV(vectorizedNet_tp vnet, float32* a, uint32 size) {
	uint32 i;
	float32 *z;

	z = malloc(sizeof(float32) * size);
	for (i = 0; i < size; i++) {
		z[i] = tanh(a[i]);
	}
	return z;
}
float32* activationLinearV(vectorizedNet_tp vnet, float32* a, uint32 size) {
	float32 *z;

	z = malloc(sizeof(float32) * size);
	memcpy(z, a, size * sizeof(float32));
	return z;
}
float32* activationSoftmaxV(vectorizedNet_tp vnet, float32* a, uint32 size) {
	uint32 i;
	float32 lse;
	float32 *z;

	lse = logsumexp(a, size);

	z = malloc(sizeof(float32) * size);
//	fprintf(stderr, "lse (vectorial): %f\n", lse);
	for (i = 0; i < size; i++) {
		z[i] = exp(a[i] - lse);
	}
	return z;
}

//diff activation functions
float32* diffActivationSigmoidV(vectorizedNet_tp vnet, float32* a, uint32 size) {
	uint32 i;
	float32 *z;
	float32 sigmoid;

	z = malloc(sizeof(float32) * size);
	for (i = 0; i < size; i++) {
		sigmoid = 1.0f / (exp(-1.0f * a[i]) + 1.0f);
		z[i] = sigmoid * (1.0f - sigmoid);
	}
	return z;
}
float32* diffActivationTanhV(vectorizedNet_tp vnet, float32* a, uint32 size) {
	uint32 i;
	float32 *z;
	float32 t;
	z = malloc(sizeof(float32) * size);
	for (i = 0; i < size; i++) {
		t = tanh(a[i]);
		z[i] = 1.0f - t * t;
	}
	return z;
}
float32* diffActivationLinearV(vectorizedNet_tp vnet, float32* a, uint32 size) {
	uint32 i;
	float32 *z;
	z = malloc(sizeof(float32) * size);
	for (i = 0; i < size; i++) {
		z[i] = 1.0f;
	}
	return z;
}
float32* diffActivationSoftmaxV(vectorizedNet_tp vnet, float32* a, uint32 size) {
	uint32 i;

	float32 *z;
	float32 *s;

	z = malloc(sizeof(float32) * size);
	s = activationSoftmaxV(vnet, a, size);

	for (i = 0; i < size; i++) {
		//(1 - softmax) * softmax;
		z[i] = (1 - s[i]) * s[i];
	}
	free(s);
	return z;
}
