#ifndef DSLFJEWNLJFLWIEJF_234342342DEJNFER_
#define DSLFJEWNLJFLWIEJF_234342342DEJNFER_

#include "define.hpp"
#include "activeFun.hpp"
using namespace PiaCNN;

// class ////////////////////////////////////////
class PiaConv 
{
public:
	PiaConv();
	virtual ~PiaConv();
	
public:
	bool Create(
				int _inputRows,
				int _inputCols,
				int _inputChan,
				int _filterSize1, 
				int _filterSize2,
				int _filterNumber,
				int _padding1  = 1, 
				int _padding2  = 1, 
				int _stride1   = 1, 
				int _stride2   = 1,
				int _activeFun = ACTIVE_FUNC_ELU,
			 string _name      = "");
	bool AddWandB(const std::vector<std::vector<MatrixXd> > &W, 
	              const MatrixXd &B);
	bool LoadWandB(const PiaConv2dW &W,
	               const PiaConv2dB &B);
	bool Run(const std::vector<MatrixXd> &InputData, 
	               std::vector<MatrixXd> &dst);
	bool Run(PiaTensor &srcOri, 
	         PiaTensor &dstOri);
	bool printW(int chan, int ker);
	void Print();
private:
	// temporary data
	MatrixXd InputData;
	MatrixXd prepareKernels;
	MatrixXd prepareB;
	MatrixXd Padded;
	MatrixXd prepareMultMap;
	MatrixXd AfterMulti;
	MatrixXd OutputMaps;
	
	bool CalcuOutputParas();
	bool MallocKernelsMat();
	bool MallocB();
	bool MallocInputDatas();
	bool MallocPepareChan();
	bool MallocAfterMulti();
	bool MallocOutputMaps();
	
	bool CvtW(const std::vector<std::vector<MatrixXd> > &W, MatrixXd &dst);
	bool CvtB(const MatrixXd &B);
	
	bool PrepareInput(const MatrixXd& input);
	
	// activation functions
	bool Elu();
	bool Relu();

public:
	int nIdx;
	string name;
	// input parameters
	int inputRows;
	int inputCols;
	int inputChan;
	int filterSize1; 
	int filterSize2;
	int filterNumber;
	int padding1; 
	int padding2; 
	int stride1; 
	int stride2;
	int activeFun;
	
	// output parameters
	int nOutputRows;
	int nOutputCols;
	int nOutputChan;

private:
	int  dbg;
	void dbg1();
	void dbg2();
	void dbg3();
	void dbg4(int c);
	void dbg5(const std::vector<MatrixXd> &dst);
	void dbg6();
};

PiaConv::PiaConv()
{
	nIdx		= -1;
	name		= "";
	dbg 		= 0;
	inputRows 	= 0;
	inputCols 	= 0;
	inputChan 	= 0;
	filterSize1 = 0;
	filterSize2 = 0;
	filterNumber= 0;
	padding1 	= 0;
	padding2 	= 0;
	stride1 	= 0;
	stride2 	= 0;
	nOutputRows = 0;
	nOutputCols = 0;
	nOutputChan = 0;
}

PiaConv::~PiaConv()
{

}

/************************************************
* Through input parameters, construct a convolution layer 
* The main work is malloc memories.
*/
bool PiaConv::Create(
				int _inputRows,
				int _inputCols,
				int _inputChan,
				int _filterSize1, 
				int _filterSize2,
				int _filterNumber,
				int _padding1  = 1, 
				int _padding2  = 1, 
				int _stride1   = 1, 
				int _stride2   = 1,
				int _activeFun = ACTIVE_FUNC_ELU,
			 string _name      = "")
{
	if (_inputRows < 3 || _inputCols < 3) {
		cout << ">> error. Create() input size" << endl;
		return false;
	}
	if (_inputChan < 1) {
		cout << ">> error. Create() input channel" << endl;
		return false;
	}
	if (_filterSize1 <= 0 || _filterSize2 <= 0) {
		cout << ">> error. Create() filter size" << endl;
		return false;
	}
	if (_filterNumber < 1) {
		cout << ">> error. Create() filter number" << endl;
		return false;
	}
	if (_inputRows < _filterSize1 || _inputCols < _filterSize2) {
		cout << ">> error. Create() input smaller than filter size" << endl;
		return false;
	}
	if (_filterSize1 % 2 == 0 || _filterSize2 % 2 == 0) {
		cout<< ">> error. filter size " 
			<<"must be an odd nuber, such as 3*3)" << endl;
		return false;
	}
	// Because the kernel size is an odd number, the padding will be symmetirc
	_padding1 = _filterSize1 / 2;
	_padding2 = _filterSize2 / 2;
	// So far, the stride must be 1
	_stride1  = 1;
	_stride2  = 1;
	name		= _name;
	inputRows 	= _inputRows;
	inputCols 	= _inputCols;
	inputChan	= _inputChan;
	filterSize1 = _filterSize1;
	filterSize2 = _filterSize2;
	filterNumber= _filterNumber;
	padding1 	= _padding1;
	padding2 	= _padding1;
	stride1 	= _stride1;
	stride2 	= _stride2;
	activeFun	= _activeFun;
	
	// calculate output parameters
	CalcuOutputParas();
	if (dbg == 1) dbg1(); // test code
	// malloc memory for various temporary variables
	MallocKernelsMat();
	MallocB();
//	MallocInputDatas();
	MallocPepareChan();
	MallocAfterMulti();
	MallocOutputMaps();
	if (dbg == 1) dbg2(); // test code
}

/************************************************
* According to the input data and the kernel parameters, 
* calculate the size(rows, cols) and channels of the output data.
*/
bool PiaConv::CalcuOutputParas()
{
	nOutputRows = (inputRows + padding1 * 2 - filterSize1 + 1) / stride1;
	nOutputCols = (inputCols + padding2 * 2 - filterSize2 + 1) / stride2;
	nOutputChan = filterNumber;
	return true;
}

/************************************************
* Define a matrix that used to store the kernels value
*
* each column is a kernel (filter)
* e.g. kernel size is 3*3, kernel number is 9, and the input channels is 7
* 
* <------input channel 1------->  <------input channel 2------->  ... <--channel 7 -->
* <ker1 >  <ker2 > ...,  <ker9 >  <ker1 >  <ker2 > ...,  <ker9 >  ... 
*
* v[0][0], v[0][0], ..., v[0][0] 
* v[1][0], v[1][0], ..., v[1][0]
* v[2][0], .
* v[0][1]      .
* v[1][1]          .
* v[2][1]              .
* v[0][2]
* v[1][2]
* v[2][2]
*
* Initialize randomly
* Random() - For floating point scalar types, the range is [-1:1]
*/
bool PiaConv::MallocKernelsMat()
{
	prepareKernels = MatrixXd::Random(filterSize1 * filterSize2, inputChan * filterNumber);
	return true;
}

/************************************************
* Generate a random value [-1, 1],  constant MatrixXd
*/
bool PiaConv::MallocB()
{
	prepareB = MatrixXd::Zero(nOutputRows * nOutputCols, nOutputChan);
    return true;
}

/************************************************
* Define a matrix that used to store input data (images or feature maps)
* each column is an image (or a feature map)
* e.g. data size is rows = 128, cols = 256, channels = 5
* 1             2           , ..., 5
* Va[  0][  0], Vb[  0][  0], ..., Ve[  0][  0] 
* Va[  1][  0], Vb[  1][  0], ..., Ve[  1][  0]
* ...
* Va[127][  0],
* Va[  0][  1],
* Va[  1][  1],
* Va[  2][  1],
* ...
* Va[127][  1],
* Va[  0][  2],
* Va[  1][  2],
* Va[  2][  2],
* ...
* ...
* Va[127][255],
*
* Initialize : set zero for each pixel
*/
bool PiaConv::MallocInputDatas()
{
	InputData = MatrixXd::Zero(inputRows * inputCols, inputChan);
	return true;
}

/************************************************
* Padded is an enlarged image.
* so far, only support the method: padding='SAME'(tf.nn.conv2d() in TensorFlow)
*/
bool PiaConv::MallocPepareChan()
{
	Padded = MatrixXd::Zero(inputRows + padding1 * 2, inputCols + padding2 * 2);
	prepareMultMap = MatrixXd(	nOutputRows * nOutputCols, 
								filterSize1 * filterSize2);
	return true;
}

bool PiaConv::MallocAfterMulti()
{
	AfterMulti = MatrixXd(nOutputRows * nOutputCols, inputChan * nOutputChan);
	return true;
}

bool PiaConv::MallocOutputMaps()
{
	OutputMaps = MatrixXd(nOutputRows * nOutputCols, nOutputChan);
	return true;
}

bool PiaConv::AddWandB(const std::vector<std::vector<MatrixXd> > &W, 
                       const MatrixXd &B)
{
	if (false == CvtW(W, prepareKernels)) {
		cout << ">> error. AddWandB() -> CvtW() return false" << endl;
		return false;
	}
	if (false == CvtB(B)) {
		cout << ">> error. AddWandB() -> CvtB() return false" << endl;
		return false;
	}
	if (dbg == 1) dbg3(); // test code
	return true;
}

bool PiaConv::LoadWandB(const PiaConv2dW &W,
	                    const PiaConv2dB &B)
{
	if (W.shape[0] == filterSize1 &&
		W.shape[1] == filterSize2 &&
		W.shape[2] == inputChan &&
		W.shape[3] == filterNumber) {
		prepareKernels = W.data;
	}
	else {
		cout << ">> error. PiaConv::LoadWandB() W.shape != prepareKernels's shape" << endl;
		return false;
	}
	
	if (B.data.rows() == 1 &&
		B.data.cols() == nOutputChan) {
		for (int i = 0; i < nOutputChan; i++) {
			prepareB.middleCols(i, 1).setConstant(B.data(0, i));
		}
	}
	else {
		cout<< ">> error. PiaConv::LoadWandB() B.data.cols() = " << B.data.cols()
			<< ", Real = " << nOutputChan << endl;
	}
	
	return true;
}

bool PiaConv::CvtW(const std::vector<std::vector<MatrixXd> > &W, MatrixXd &dst) 
{
	if ((int)W.size() != inputChan) {
		cout << ">> error. CvtW() inputChan" << endl;
		return false;
	}
	for (int i = 0; i < (int)W.size(); i++) {
		if ((int)W[i].size() != nOutputChan) {
			cout << ">> error. CvtW() nOutputChan, inputChan index = " << i << endl;
			return false;
		}
		for (int j = 0; j < (int)W[i].size(); j++) {
			if (prepareKernels.rows() != W[i][j].rows() * W[i][j].cols()) {
				cout << ">> error. CvtW() rows difference" << endl;
				return false;
			}
			prepareKernels.middleCols(i * nOutputChan + j, 1) = 
			MatrixXd::Map(W[i][j].data(), W[i][j].rows() * W[i][j].cols(), 1);
		}
	}

	return true;
}

bool PiaConv::CvtB(const MatrixXd &B)
{
	if (B.rows() != 1 || B.cols() != nOutputChan) {
		cout<< ">> error. CvtB() size difference. Model = " 
			<< nOutputChan << ", Real = " << B.cols() << endl;
		return false;
	}
	for (int i = 0; i < nOutputChan; i++) {
		prepareB.middleCols(i, 1).setConstant(B(0, i));
	}
	return true;
}

bool PiaConv::Run(PiaTensor &srcOri, PiaTensor &dstOri)
{
	PiaConv2dData *src = (struct PiaConv2dData*)&srcOri;
	PiaConv2dData *dst = (struct PiaConv2dData*)&dstOri;
	if (src->IsValid() == false) {
		cout << ">> error. Run() IsValid() return false" << endl;
		return false;
	}
	// process the input channels one by one
	for (int c = 0; c < src->data.cols(); c++) {
		PrepareInput(MatrixXd::Map(src->data.middleCols(c, 1).data(), 
		                           src->shape[1], 
		                           src->shape[2]));
		AfterMulti.middleCols(c * nOutputChan, nOutputChan) = 
		prepareMultMap * prepareKernels.middleCols(c * nOutputChan, nOutputChan);
	}
	// output data
	OutputMaps = MatrixXd::Zero(nOutputRows * nOutputCols, nOutputChan);
	for (int i = 0; i < inputChan; i++) {
		OutputMaps += AfterMulti.middleCols(i * nOutputChan, nOutputChan);
	}
	// bias
	OutputMaps = OutputMaps + prepareB;
	
	// activation
	switch (activeFun) {
	case ACTIVE_FUNC_ELU: {
		Elu();
		break;
	}
	case ACTIVE_FUNC_RELU:{
		Relu();
		break;
	}
	}

	dst->data  = OutputMaps;
	dst->shape[1] = nOutputRows;
	dst->shape[2] = nOutputCols;
	dst->shape[3] = nOutputChan;

	return true;
}

bool PiaConv::Run(const std::vector<MatrixXd> &InputData, 
                        std::vector<MatrixXd> &dst)
{
	if ((int)InputData.size() != inputChan) {
		cout << ">> error. run() input data channels" << endl;
		return false;
	}
	for (int i = 0; i < (int)InputData.size(); i++) {
		if (InputData[i].rows() != inputRows || InputData[i].cols() != inputCols) {
			cout<< ">> error. run() input data size" 
				<< "Model = " << inputRows << ", " << inputCols << ", " 
				<< "Real = " << InputData[i].rows() << ", " << InputData[i].cols() 
				<< endl;
			return false;
		}
	}

	// process the input channels one by one
	for (int i = 0; i < (int)InputData.size(); i++) {
		PrepareInput(InputData[i]);
		AfterMulti.middleCols(i * nOutputChan, nOutputChan) = 
		prepareMultMap * prepareKernels.middleCols(i * nOutputChan, nOutputChan);
		if (dbg == 1) dbg4(i);
	}
	// output data
	OutputMaps = MatrixXd::Zero(nOutputRows * nOutputCols, nOutputChan);
	for (int i = 0; i < inputChan; i++) {
		OutputMaps += AfterMulti.middleCols(i * nOutputChan, nOutputChan);
	}
	// bias
	OutputMaps = OutputMaps + prepareB;
	
	// activation
	Elu();
	
	// reshape the output data
	dst.clear();
	for (int i = 0; i < OutputMaps.cols(); i++) {
		MatrixXd curFM= MatrixXd::Map(&OutputMaps.col(i)[0], nOutputRows, nOutputCols);
		dst.push_back(curFM);
	}
	if (dbg == 1) dbg5(dst);
	return true;
}

void PiaConv::dbg6()
{
	if (name.compare("Conv0") == 0) {
		for (int i = 0; i < OutputMaps.cols(); i++) {
			MatrixXd curFM= MatrixXd::Map(&OutputMaps.col(i)[0], nOutputRows, nOutputCols);
			cout << ">> " << name.c_str() << ", channel " << i << endl;
			cout << curFM << endl;
		}
	}
}

/************************************************
* According to the kernel parameter [filter_height, filter_width, in_channels, out_channels], 
* prepare the input data
*/
bool PiaConv::PrepareInput(const MatrixXd& input)
{
	Padded = MatrixXd::Zero(Padded.rows(), Padded.cols());
	Padded.block(padding1, padding2, input.rows(), input.cols()) = input;

	for (int n2 = 0; n2 < filterSize2; n2++) 
	{
		for (int n1 = 0; n1 < filterSize1; n1++) 
		{
			for (int j = 0; j < nOutputCols; j += 1) 
			{
				for (int i = 0; i < nOutputRows; i += 1) 
				{
					prepareMultMap(j * nOutputRows + i, n2 * filterSize1 + n1) = 
					Padded(i * stride1 + n1, j* stride2 + n2);
				}
			}
		}
	}
	return true;
}

/************************************************
* See tensorflow::ops::Elu
* Computes exponential linear: 
*
* f(x) = a(exp(x) - 1)  if (x < 0)
*      = x              otherwise
*
* paper:
* See Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs)
* https://arxiv.org/abs/1511.07289
*/
bool PiaConv::Elu()
{
	for (int i = 0; i < nOutputChan; i++) {
		MatrixXd ZeroMat = MatrixXd::Zero(nOutputRows * nOutputCols, 1);
		MatrixXd OnesMat = MatrixXd::Ones(nOutputRows * nOutputCols, 1);

		MatrixXd PosiMat = OutputMaps.middleCols(i, 1).cwiseMax(ZeroMat);
		MatrixXd NegaMat = OutputMaps.middleCols(i, 1).cwiseMin(ZeroMat);

		MatrixXd E = NegaMat.array().exp();
		
		OutputMaps.middleCols(i, 1) = E - OnesMat + PosiMat;
	}
	return true;
}

/************************************************
* f(x) = 0  if (x < 0)
*      = x  otherwise
*/
bool PiaConv::Relu()
{
	for (int i = 0; i < nOutputChan; i++) {
		MatrixXd ZeroMat = MatrixXd::Zero(nOutputRows * nOutputCols, 1);
		MatrixXd PosiMat = OutputMaps.middleCols(i, 1).cwiseMax(ZeroMat);
		OutputMaps.middleCols(i, 1) = ZeroMat + PosiMat;
	}
	return true;
}

void PiaConv::dbg1()
{
	int n = 9;
	cout << ">>                  " << endl;
	cout << ">> inputRows.......:" << setw(n) << inputRows << endl;
	cout << ">> inputCols.......:" << setw(n) << inputCols << endl;
	cout << ">> inputChan.......:" << setw(n) << inputChan << endl;
	cout << ">> filterSize1.....:" << setw(n) << filterSize1 << endl;
	cout << ">> filterSize2.....:" << setw(n) << filterSize2 << endl;
	cout << ">> filterNumber....:" << setw(n) << filterNumber << endl;
	cout << ">> padding1........:" << setw(n) << padding1 << endl;
	cout << ">> padding2........:" << setw(n) << padding2 << endl;
	cout << ">> stride1.........:" << setw(n) << stride1 << endl;
	cout << ">> stride2.........:" << setw(n) << stride2 << endl;
	cout << ">>                  " << endl;
	cout << ">> nOutputRows.....:" << setw(n) << nOutputRows << endl;
	cout << ">> nOutputCols.....:" << setw(n) << nOutputCols << endl;
	cout << ">> nOutputChan.....:" << setw(n) << nOutputChan << endl;
}

void PiaConv::dbg2()
{
	int n = 9;
	cout << ">>                  " << endl;
	cout << ">> prepareKernels..:" << setw(n) << prepareKernels.rows() 
								   << setw(n) << prepareKernels.cols() << endl;
	cout << ">> AfterMulti......:" << setw(n) << AfterMulti.rows() 
								   << setw(n) << AfterMulti.cols() << endl;
	cout << ">> OutputMaps......:" << setw(n) << OutputMaps.rows() 
								   << setw(n) << OutputMaps.cols() << endl;
}

void PiaConv::dbg3()
{
	cout << ">> prepareKernels..:" << endl;
	cout << prepareKernels << endl;
	cout << ">> prepareB........:" << endl;
	cout << prepareB << endl;
}

void PiaConv::dbg4(int c)
{
	int n = 9;
	cout << ">> ---------channel:" << setw(n) << c << endl;
	cout << ">>           padded:" << endl;
	cout << Padded << endl;
	cout << ">>   prepareMultMap:" << endl;
	cout << prepareMultMap << endl;
}

void PiaConv::dbg5(const std::vector<MatrixXd> &dst)
{
	cout << ">> ----------------:" << endl;
	cout << ">>       AfterMulti:" << endl;
	cout << AfterMulti << endl;
	cout << ">>       OutputMaps:" << endl;
	cout << OutputMaps << endl;
	for (int i = 0; i < (int)dst.size(); i++) {
		cout << ">>              dst:" << i << endl;
		cout << dst[i] << endl;
	}
}

void PiaConv::Print()
{
	dbg1();
	dbg2();
}
 
bool PiaConv::printW(int chan, int ker)
{
	if (chan < 0) chan = inputChan;
	if (ker < 0) ker = filterNumber;
	if (chan >= 0 &&
		chan < inputChan &&
		ker >= 0 &&
		ker < filterNumber) {
		MatrixXd curFM= MatrixXd::Map(
			&prepareKernels.col(chan * filterNumber + ker)[0], 
			filterSize1, 
			filterSize2);
		cout << curFM << endl;
	}
}













#endif
