#ifndef DSLDKJLKNF_2342N2KN34K2JN42D_
#define DSLDKJLKNF_2342N2KN34K2JN42D_

#include "define.hpp"
#include "activeFun.hpp"
using namespace PiaCNN;

class PiaFull
{
public:
	PiaFull();
	virtual ~PiaFull();
	
public:
	bool Create(int nVectorLen, int nNeuralNum, int nActiveFunc, string name = "");
	bool PiaFull::LoadWandB(PiaFullW &W, 
							PiaFullB &B);
	bool LoadWB(const MatrixXd &W, const VectorXd &B);
	bool RunImgs(MatrixXd &src, 
	             MatrixXd &dst);
	bool Run(PiaTensor &srcOri,
	         PiaTensor &dstOri);
	
	VectorXd softmax(const VectorXd &src);
	VectorXd sigmoid(const VectorXd &src);
	
	bool softmax(const MatrixXd &src, MatrixXd &dst);
	bool sigmoid(const MatrixXd &src, MatrixXd &dst);
	bool    Relu(const MatrixXd &src, MatrixXd &dst);
	bool     Elu(const MatrixXd &src, MatrixXd &dst);
	
	void Print();
	
private:
	int nIdx;
	string name;
	
	int nVectorLen;
	int nNeuralNum;
	int nActiveFunc;
	
	MatrixXd W;
	VectorXd B;
	
	void dbg0();
	void dbg1();
	void dbg2(const MatrixXd &src, const MatrixXd &dst);
};


PiaFull::PiaFull()
{
	nIdx		= -1;
	name		= "";
	nVectorLen 	= 0;
	nNeuralNum 	= 0;
	nActiveFunc	= ACTIVE_FUNC_NONE;
}

PiaFull::~PiaFull()
{

}

bool PiaFull::Create(int nVectorLen, int nNeuralNum, int nActiveFunc, string name)
{
	if (nVectorLen < 1) {
		cout << ">> error. nVectorLen = " << nVectorLen << endl;
		return false;
	}
	if (nNeuralNum < 1) {
		cout << ">> error. nNeuralNum = " << nVectorLen << endl;
		return false;
	}
	if (nActiveFunc != ACTIVE_FUNC_ELU && 
		nActiveFunc != ACTIVE_FUNC_RELU &&
		nActiveFunc != ACTIVE_FUNC_SIGMORD && 
		nActiveFunc != ACTIVE_FUNC_SOFTMAX) {
			cout << ">> error. nActiveFunc = " << nActiveFunc << endl;
			return false;
	}
	this->nActiveFunc = nActiveFunc;
	this->nVectorLen = nVectorLen;
	this->nNeuralNum = nNeuralNum;
	this->name = name;
	
	W = MatrixXd::Zero(nNeuralNum, nVectorLen);
	B = VectorXd::Zero(nNeuralNum);
//	dbg0();
	return true;
}

void PiaFull::dbg0()
{
	cout << "-----------------------" << endl;
	cout << "nVectorLen            :" << setw(5) << nVectorLen << endl;
	cout << "nNeuralNum            :" << setw(5) << nNeuralNum << endl;
	cout << "W                     :" 
									  << setw(5) << W.rows()
									  << setw(5) << W.cols() << endl;
	cout << "B                     :" 
									  << setw(5) << B.rows()
									  << setw(5) << B.cols() << endl;
}

bool PiaFull::LoadWandB(PiaFullW &W, PiaFullB &B)
{
	return LoadWB(W.data, B.data);
}

bool PiaFull::LoadWB(const MatrixXd &W, const VectorXd &B)
{
	if (W.rows() != nNeuralNum) {
		cout << "W.rows() != nNeuralNum"  << endl;
		return false;
	}
	if (W.cols() != nVectorLen) {
		cout << ">> error. "
			 << "W.cols() = " << W.cols() << ", "  
			 << "nVectorLen = " << nVectorLen
			 << endl;
		return false;
	}
	if (B.rows() != nNeuralNum) {
		cout << "W.rows() != nNeuralNum"  << endl;
		return false;
	}
	
	this->W = W;
	this->B = B;
//	dbg1();
	return true;
}

void PiaFull::dbg1()
{
	cout << "---------- W ----------" << endl;
	cout << W << endl;
	cout << "---------- B ----------" << endl;
	cout << B << endl;
}

/************************************************
* For the connection between convolutional layer and 
* full connection layer, we need convert the data
* structure from PiaConv2dData to PiaFullData.
*/
bool PiaFull::Run(PiaTensor &srcOri,
                  PiaTensor &dstOri)
{
	if (srcOri.shape[1] > 0 ||
		srcOri.shape[2] > 0 ||
		srcOri.shape[3] > 0)
		{
		PiaConv2dData *src = (struct PiaConv2dData*)&srcOri;
		PiaFullData *dst = (struct PiaFullData*)&dstOri;
		*dst = cvtTensor2Vector(*src);
		if (false == RunImgs(src->data, dst->data) ) {
			return false;
		}
	}
	else {
		PiaFullData *src = (struct PiaFullData*)&srcOri;
		PiaFullData *dst = (struct PiaFullData*)&dstOri;
		if (false == RunImgs(src->data, dst->data) ) {
			return false;
		}
	}
	for (int i = 0; i < 4; i++) dstOri.shape[i] = 0;
	return true;
}

/************************************************
* For full connection layer, src.size() = dst.size() = 1
* src - is a matrix. Every column is a feature vector.  
*       There are totally src.cols() samples.
        src.rows() is elements number of sample. 
* dst - rows() == neurals number
		Every column is a feature vector
        cols() == samples number
*/
bool PiaFull::RunImgs(MatrixXd &src, MatrixXd &dst)
{
	if (W.cols() != src.rows() || W.rows() != B.rows()) {
		cout << ">> error. Info: " << endl;
		cout << "W.rows()   = " << setw(8) << W.rows() << endl;
		cout << "W.cols()   = " << setw(8) << W.cols() << endl;
		cout << "B.rows()   = " << setw(8) << B.rows() << endl;
		cout << "src.rows() = " << setw(8) << src.rows() << endl;
		return false;
	}

	MatrixXd tmp = (W * src).colwise() + B;
	
	switch (nActiveFunc) 
	{
		case ACTIVE_FUNC_ELU: {
			Elu (tmp, dst);
			break;
			}
		case ACTIVE_FUNC_RELU: {
			Relu(tmp, dst);
			break;
		}
		case ACTIVE_FUNC_SIGMORD: {
			dst = sigmoid(tmp);
			break;
		}
		case ACTIVE_FUNC_SOFTMAX: {
			dst = softmax(tmp);
			break;
		}
	}
//	dbg2(src, dst);
	return true;
}

void PiaFull::dbg2(const MatrixXd &src, const MatrixXd &dst)
{
	cout << "---------- Input ------" << endl;
	cout << src << endl;
	cout << "---------- Output -----" << endl;
	cout << dst << endl;
}

/************************************************
* typedef Matrix< double , Dynamic , 1> Eigen::VectorXd
*/
VectorXd PiaFull::softmax(const VectorXd &src)
{
	VectorXd tmp = src.array().exp();
	VectorXd rst = tmp.array() / tmp.array().sum();
	return rst;
}

bool PiaFull::softmax(const MatrixXd &src, MatrixXd &dst)
{
	dst = (src.array() ).exp();
	for (int c = 0; c < src.cols(); c++) {
		double dsum = dst.middleCols(c, 1).sum();
		dst.middleCols(c, 1) = dst.middleCols(c, 1) / dsum;
	}
	return true;
}

/************************************************
* s(x) = 1 / (1 + exp(-x) )
*/
VectorXd PiaFull::sigmoid(const VectorXd &src) // single sample
{
	return 1.0 / (1.0 + (-src.array()).exp());
}

bool PiaFull::sigmoid(const MatrixXd &src, MatrixXd &dst) 
{
	dst = 1.0 / (1.0 + (-src.array()).exp());
	return true;
}

/************************************************
* See tensorflow::ops::Elu
* Computes exponential linear: 
*
* f(x) = a(exp(x) - 1)  if (x < 0)
*      = x              otherwise
*
* paper:
* See Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs)
* https://arxiv.org/abs/1511.07289
*/
bool PiaFull::Elu(const MatrixXd &src, MatrixXd &dst)
{
	MatrixXd ZeroMat = MatrixXd::Zero(src.rows() , src.cols());
	MatrixXd OnesMat = MatrixXd::Ones(src.rows() , src.cols());
	MatrixXd PosiMat = src.cwiseMax(ZeroMat);
	MatrixXd NegaMat = src.cwiseMin(ZeroMat);
	MatrixXd E = NegaMat.array().exp();
	dst = E - OnesMat + PosiMat;
	return true;
}

/************************************************
* f(x) = 0  if (x < 0)
*      = x  otherwise
*/
bool PiaFull::Relu(const MatrixXd &src, MatrixXd &dst)
{
	MatrixXd ZeroMat = MatrixXd::Zero(src.rows() , src.cols());
	MatrixXd PosiMat = src.cwiseMax(ZeroMat);
	dst = ZeroMat + PosiMat;
	return true;
}

void PiaFull::Print()
{
	int n = 9;
	string str = "";
	if      (nActiveFunc == ACTIVE_FUNC_RELU) 
		str = "relu";
	else if (nActiveFunc == ACTIVE_FUNC_ELU)
		str = "elu";
	else if (nActiveFunc == ACTIVE_FUNC_SOFTMAX)
		str = "sotfmax";
	else if (nActiveFunc == ACTIVE_FUNC_SIGMORD)
		str = "sigmoid";
	else
		str = "wrong";
	
	cout << "nVectorLen         :" << setw(n) << nVectorLen << endl;
    cout << "nNeuralNum         :" << setw(n) << nNeuralNum << endl;
    cout << "nActiveFunc        :" << setw(n) << str << endl;
}

#endif
