/*

 * activation.c
 *
 *  Created on: Sep 4, 2013
 *      Author: Alan
 */
#include "activation.h"
#include "jjann.h"
#include <stdio.h>
#include <math.h>
#include <stdlib.h>

float32 logsumexp(float32 *a, size_t aSize) {
	size_t i;
	float32 expValue;
	float32 aMax = a[0];

	for (i = 1; i < aSize; i++) {
		aMax = fmax(aMax, a[i]);
	}
	expValue = .0f;
	for (i = 0; i < aSize; i++) {
		expValue += exp(a[i] - aMax);
	}

	return aMax + log(expValue);
}

float32 activationSigmoid(netdata_tp net, float32 a) {
	return 1.0f / (exp(-1.0f * a) + 1.0f);
}

float32 diffActivationSigmoid(netdata_tp net, float32 a) {
	float32 sigmoid;
	sigmoid = activationSigmoid(net, a);
	return sigmoid * (1.0f - sigmoid);
}

float32 activationTanh(netdata_tp net, float32 a) {
	return tanh(a);
}

float32 diffActivationTanh(netdata_tp net, float32 a) {
	float32 t;
	t = activationTanh(net, a);
	return 1.0f - t * t;
}

float32 activationLinear(netdata_tp net, float32 a) {
	return a;
}

float32 diffActivationLinear(netdata_tp net, float32 a) {
	return 1;
}

float32 activationSoftmax_(netdata_tp net, float32 a) {
	//ATTENZIONE POTREBBE CAUSARE OVERFLOW!!!!
	size_t i;
	float32 denominator;

	denominator = .0f;
	for (i = 0; i < net->neurons[net->layersize - 1]; i++) {
		denominator += exp(getNeuron(net, net->layersize - 1, i)->a);
	}
	return exp(a) / denominator;
}

float32 activationSoftmax(netdata_tp net, float32 a) {
	size_t j;
	float32 *aVector;
	float32 lse; //logsumexp result
	uint32 lastLayerSize;

	//create an array as argumetn for the logsumexp function
	lastLayerSize = net->neurons[net->layersize - 1];
	aVector = (float32*) malloc(sizeof(float32) * lastLayerSize);
	for (j = 0; j < lastLayerSize; j++) {
		aVector[j] = getNeuron(net, net->layersize - 1, j)->a;
	}
	lse = logsumexp(aVector, lastLayerSize);
	free(aVector);
	return exp(a - lse);
}

float32 diffActivationSoftmax(netdata_tp net, float32 a) {
	float32 softmax;

	softmax = activationSoftmax(net, a);
	return (1 - softmax) * softmax;
}
