#ifndef DLSKFNE_343JK45K3J4_DLK433
#define DLSKFNE_343JK45K3J4_DLK433

#define ACTIVE_FUNC_NONE		0
#define ACTIVE_FUNC_ELU			1
#define ACTIVE_FUNC_RELU		2
#define ACTIVE_FUNC_SIGMORD		3
#define ACTIVE_FUNC_SOFTMAX		4

#include "Core"
using namespace Eigen;

namespace PiaCNN
{
namespace ActFun
{

/************************************************
* See tensorflow::ops::Elu
* Computes exponential linear: 
*
* f(x) = a(exp(x) - 1)  if (x < 0)
*      = x              otherwise
*
* paper:
* See Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs)
* https://arxiv.org/abs/1511.07289
*/
bool Elu(const MatrixXd &src, MatrixXd &dst)
{
	MatrixXd ZeroMat = MatrixXd::Zero(src.rows() , src.cols());
	MatrixXd OnesMat = MatrixXd::Ones(src.rows() , src.cols());
	MatrixXd PosiMat = src.cwiseMax(ZeroMat);
	MatrixXd NegaMat = src.cwiseMin(ZeroMat);
	MatrixXd E = NegaMat.array().exp();
	dst = E - OnesMat + PosiMat;
	return true;
}


/************************************************
* f(x) = 0  if (x < 0)
*      = x  otherwise
*/
bool Relu(const MatrixXd &src, MatrixXd &dst)
{
	MatrixXd ZeroMat = MatrixXd::Zero(src.rows() , src.cols());
	MatrixXd PosiMat = src.cwiseMax(ZeroMat);
	dst = ZeroMat + PosiMat;
	return true;
}


/************************************************
* softmax
* Every column is a feature vector. There are totally
* src.cols() vectors. Generally, the src.cols() = 1 in
* predict stage.
*/
bool softmax(const MatrixXd &src, MatrixXd &dst)
{
	dst = (src.array() ).exp();
	for (int c = 0; c < src.cols(); c++) {
		double dsum = dst.middleCols(c, 1).sum();
		dst.middleCols(c, 1) = dst.middleCols(c, 1) / dsum;
	}
	return true;
}


/************************************************
* sigmoid
* S(x) = 1 / (1 + exp(-x) )
*/
bool sigmoid(const MatrixXd &src, MatrixXd &dst) 
{
	dst = 1.0 / (1.0 + (-src.array()).exp());
	return true;
}



} // namespace ActFun
} // namespace PiaCNN

#endif
