#ifndef SLDKFJOWNKJS_23JBK23626BK2D_454542_DWD
#define SLDKFJOWNKJS_23JBK23626BK2D_454542_DWD

#include <iostream>
#include <stdio.h>
#include <unistd.h>
#include <stdlib.h>
#include <string.h>
#include <string>
#include <dirent.h>
#include <unistd.h>
#include <vector>
#include <sstream>
#include <fstream>
#include <sys/io.h>
#include <sys/times.h>
#include <iomanip>
#include <tuple>
#include <cstdlib>
using namespace std;

#include "Core"
using namespace Eigen;


/************************************************
* About cnpy, Please consult: https://github.com/rogersce/cnpy.git 
*
* npz_load(fname,varname) will load and return the NpyArray for 
* data varname from the specified .npz file.
* 
The data structure for loaded data is below. 
Data is accessed via the data<T>() method, which returns 
a pointer of the specified type (which must match the underlying 
datatype of the data). The array shape and 
word size are read from the npy header.

	struct NpyArray {
		std::vector<size_t> shape;
		size_t word_size;
		template<typename T> T* data();
	};
*/
#include "cnpy.h"
using namespace cnpy;


/************************************************
* namespace
*/
namespace PiaCNN 
{

/************************************************
* base data structure
*/
struct PiaTensor 
{
	MatrixXd data;
	int shape[4];
	
	PiaTensor(){
		for (int i = 0; i < 4; i++) {
			shape[i] = 0;
		}
	}
	
	bool FromImgs(std::vector<MatrixXd> &Images)
	{
		if ((int)Images.size() <= 0) {
			return false;
		}
		int nSize = (int)Images.size();
		for (int i = 0; i < nSize; i++) {
			if (Images[i].rows() <= 0 || Images[i].cols() <= 0) 
				return false;
		}
		shape[0] = 1;
		shape[1] = Images[0].rows();
		shape[2] = Images[0].cols();
		shape[3] = nSize;
		for (int i = 0; i < nSize; i++) {
			if (Images[i].rows() != shape[1] || 
				Images[i].cols() != shape[2]) 
				return false;
		}
		data = MatrixXd(shape[1] * shape[2], nSize);
		// row major
		for (int i = 0; i < nSize; i++) {
			data.middleCols(i, 1) = MatrixXd::Map(
				Images[i].data(), 
				shape[1] * shape[2], 
				1);
		}
		return true;
	}
};

/************************************************
* weight matrix structure for 
* convolutional layer
* We show an example to illustrate how the data is 
* stored in this struct.
* e.g. kernel(filter) kernel_height       = 3
*                     kernel_width        = 3
*                     kernel_number       = 9
*                     input_data_channels = 7
* 
* <------input channel 1------->  <------input channel 2------->  ... <--channel 7 -->
* <ker1 >  <ker2 > ...,  <ker9 >  <ker1 >  <ker2 > ...,  <ker9 >  ... 
*
* v[0][0], v[0][0], ..., v[0][0] 
* v[1][0], v[1][0], ..., v[1][0]
* v[2][0], .
* v[0][1]      .
* v[1][1]          .
* v[2][1]              .
* v[0][2]
* v[1][2]
* v[2][2]
*
* Constrast with Tensorflow kernel 
* [kernel_height, kernel_width, input_data_channels, kernel_number],
* PiaW.data.rows() = kernel_height * kernel_width
* PiaW.data.cols() = input_data_channels * kernel_number)
* PiaW.shape[0]    = kernel_height;
* PiaW.shape[1]    = kernel_width;
* PiaW.shape[2]    = input_data_channels;
* PiaW.shape[3]    = kernel_number;
*/
struct PiaConv2dW : PiaTensor
{
	bool IsValid() 
	{
		for (int i = 0; i < 4; i++) 
			if (shape[i] <= 0) {
				cout << ">> error. PiaConv2dW(). IsValid" << endl;
				return false; 
			}
		return true;
	}
	
	void print() {
		for (int chan = 0; chan < shape[2]; chan++) {
			for (int k = 0; k < shape[3]; k++) {
				MatrixXd tmp = MatrixXd::Map(
					data.middleCols(chan*shape[3] + k, 1).data(), 
					shape[0], shape[1]);
				cout<< "---- kernel("
					<< shape[0] << ", "
					<< shape[1] << ", "
					<< shape[2] << ", "
					<< shape[3] << ") ----" << endl;
				cout << tmp << endl;
			}
		}
	}
	// Convert a Tensor(generated by Tensorflow in python) which has 
	// been saved by numpy.savez() and read by cnpy.
	bool cvt(const cnpy::NpyArray &arr) 
	{
		if (arr.shape.size() != 4) {
			cout << ">> error. PiaConv2dW(), shape.size() != 4" << endl;
			return false;
		}
		// Please attention: 
		// If dtype = tf.float32 in tensorflow, here the data type  
		// must be float, otherwise the data will be wrong
		float *mv1 = arr.data<float>();  
		int nOffset0 = arr.shape[1]*arr.shape[2]*arr.shape[3];  
		int nOffset1 = arr.shape[2]*arr.shape[3];  
		int nOffset2 = arr.shape[3];  
		// construct
		for (int i = 0; i < (int)arr.shape.size(); i++) 
			shape[i] = arr.shape[i];
			
		data = MatrixXd(arr.shape[0] * arr.shape[1], arr.shape[2] * arr.shape[3]);
        for (int c = 0; c < arr.shape[1]; c++) {  
            for (int r = 0; r < arr.shape[0]; r++) {  
                for (int chan = 0; chan < arr.shape[2]; chan++) {  
                    for (int k = 0; k < arr.shape[3]; k++) {  
                    	int dr = c * arr.shape[0] + r;
                    	int dc = chan * arr.shape[3] + k;
                    	int pi = r*nOffset0 + c*nOffset1 + chan*nOffset2 + k;
                    	data(dr, dc) = mv1[pi]; 
					}  
                } 
            }  
        }
        return true;
	}
};

/************************************************
* Bias for convolutional layers. It's a column vector
*
* data.rows() = 1 always
* data.cols() = kernel number
* 
* The shape will be no use here under this stuation.
*/
struct PiaConv2dB:PiaTensor
{
	bool IsValid() {
		if (data.rows() != 1 || data.cols() <= 0) {
			cout << ">> error. PiaConv2dB(), IsValid()" << endl;
			return false;
		}
		return true;
	}
	
	bool print() {
		cout<< "---- kernel bias ("
			<< data.rows() << ", "
			<< data.cols() << ") ----" << endl;
		cout << data << endl;
	}
	
	bool cvt(const cnpy::NpyArray &arr) {
		int nSize = (int)arr.shape.size();
		if (nSize != 1) {
			cout << ">> error. PiaConv2dB(). shape.size() != 1" << endl;
			return false;
		}
		
		float *mv1 = arr.data<float>(); 
		data = MatrixXd(1, (int)arr.shape[0]);
		for (int i = 0; i < (int)arr.shape[0]; i++) {
			data(0, i) = mv1[i];
		}
		
		return true;
	}
};

/************************************************
* Input and output data structure for convolutional layers.
* 
* [1] ------------------------------------------- 
* For the 1st layer of a CNN, the input data usually 
* is image with one or more channels.
* We show an example to illustrate how the input data 
* is stored in PiaConv2dData.
* Image information:
*                      rows = 128
*                      cols = 256 
*                  channels = 5
* Then the data is :
* 1             2           , ..., 5
* Va[  0][  0], Vb[  0][  0], ..., Ve[  0][  0] 
* Va[  1][  0], Vb[  1][  0], ..., Ve[  1][  0]
* Va[  2][  0], Vb[  2][  0], ..., Ve[  2][  0]
* ...
* Va[127][  0], Vb[127][  0], ..., Ve[127][  0]
* Va[  0][  1],
* Va[  1][  1],
* Va[  2][  1],
* ...
* Va[127][  1],
* Va[  0][  2],
* Va[  1][  2],
* Va[  2][  2],
* ...
* ...
* Va[127][255],
*
* Constrast with Tensorflow input data [batch, height, with, channels], 
* here the batch = 1 always. This means that our model only allow to put in 
* one sample one time. 
*
* [2] -------------------------------------------
* For other convolutional layers, the input or output data
* usually are feature maps. The data is stored at the same
* as stuation [1].
*/
struct PiaConv2dData:PiaTensor
{
	bool IsValid() 
	{
		for (int i = 0; i < 4; i++) 
			if (shape[i] <= 0)
				return false; 
		// the 1st dims must be 1. That is batch = 1 always
		if (shape[0] != 1)
			return false;
		return true;
	}
	
	void print() 
	{
		for (int i = 0; i < shape[3]; i++) {
			MatrixXd tmp = MatrixXd::Map(data.middleCols(i, 1).data(), shape[1], shape[2]);
			cout<< "---- conv2d data ("
				<< shape[0] << ", "
				<< shape[1] << ", "
				<< shape[2] << ", "
				<< shape[3] << ") "
				<< setw(5) << i 
				<< " ----" << endl;
			cout << tmp << endl;
		}
	}
	
	bool cvt(const cnpy::NpyArray &arr) 
	{
		if (arr.shape.size() != 4) {
			cout << ">> error. shape.size() != 4" << endl;
			return false;
		}
		if (arr.shape[0] != 1) {
			cout << ">> error. PiaFullData(), shape[0] != 1. The batch == 1 always" << endl;
			return false;
		}
		// Please attention: 
		float *mv1 = arr.data<float>();  
		int nOffset0 = arr.shape[1]*arr.shape[2]*arr.shape[3];  
		int nOffset1 = arr.shape[2]*arr.shape[3];  
		int nOffset2 = arr.shape[3];
		// construct
		for (int i = 0; i < (int)arr.shape.size(); i++) {
			shape[i] = arr.shape[i];
		}
		data = MatrixXd(arr.shape[1] * arr.shape[2], arr.shape[3]);
		for (int c = 0; c < arr.shape[2]; c++) { // x
			for (int r = 0; r < arr.shape[1]; r++) { // y
				for (int chan = 0; chan < arr.shape[3]; chan++) { // channel
					data(c*arr.shape[1] + r, chan) = 
					mv1[r*nOffset1 + c*nOffset2 + chan];
				}
			}
		}
        return true;
	}
};

/************************************************
* Input data or output structure for full connection layers. It's a vector
*
* Every row is a neural. This means that:
* data.rows() = length of the input vector
* data.cols() = 1 always
* 
* The shape will be no use here
*/
struct PiaFullData:PiaTensor
{
	bool IsValid() {
		if (data.rows() <= 0 || data.cols() != 1) {
			return false;
		}
		return true;
	}
	
	bool print() {
		cout<< "---- Full Data ("
			<< data.rows() << ", "
			<< data.cols() << ") ----" << endl;
		cout << data << endl;
	}
};

/************************************************
* weight matrix for full connection layers. It's a matrix
*
* Every row is a neural. This means that:
* data.rows() = neural number
* data.cols() = length of the input vector
* 
* The shape will be no use here
*/
struct PiaFullW:PiaTensor
{
	bool IsValid() {
		if (data.rows() <= 0 || data.cols() <= 0) {
			return false;
		}
		return true;
	}
	
	bool print() {
		cout<< "---- Full W ("
			<< data.rows() << ", "
			<< data.cols() << ") ----" << endl;
		cout << data << endl;
	}
	
	bool cvt(const cnpy::NpyArray &arr)
	{
		if (arr.shape.size() != 2) {
			cout << ">> error. shape.size() != 4" << endl;
			return false;
		}
//		cout << ">> PiaFullData arr.shape " << arr.shape[0] << ", " << arr.shape[1] << endl;
		float *mv1 = arr.data<float>();
		data = MatrixXd(arr.shape[1], arr.shape[0]);
		for (int r = 0; r < arr.shape[0]; r++) {  
            for (int c = 0; c < arr.shape[1]; c++) {  
            	data(c, r) = mv1[r * arr.shape[1] + c];
            }
        }
//		cout << data.middleCols(150, 1) << endl;
	}
};

/************************************************
* The bias for full connection layers. It's a vector.
*
* Every row is a neural. This means that:
* data.rows() = neural number
* data.cols() = 1 always
*
* The shape will be no use here
*/
struct PiaFullB:PiaTensor
{
	bool IsValid() {
		if (data.rows() <= 0 || data.cols() != 1) {
			return false;
		}
		return true;
	}
	
	bool print() {
		cout<< "---- Full B ("
			<< data.rows() << ", "
			<< data.cols() << ") ----" << endl;
		cout << data << endl;
	}
	
	bool cvt(const cnpy::NpyArray &arr)
	{
		if (arr.shape.size() != 1) {
			cout << ">> error. PiaFullB(), shape.size() != 1" << endl;
			return false;
		}
//		cout << ">> PiaFullB arr.shape " << arr.shape[0] << endl;
		float *mv1 = arr.data<float>();
		data = MatrixXd(arr.shape[0], 1);
		for (int r = 0; r < arr.shape[0]; r++) {  
            data(r, 0) = mv1[r];
        }
//		cout << data << endl;
	}
};

/************************************************
* convert images or feature maps to PiaTensor
*/
bool cvtImgs2Tensor(const std::vector<MatrixXd> &Images, PiaConv2dData & dst) 
{
	if ((int)Images.size() <= 0) return false;

	int nSize = (int)Images.size();
	for (int i = 0; i < nSize; i++) {
		if (Images[i].rows() <= 0 || Images[i].cols() <= 0) return false;
	}
	dst.shape[0] = 1;
	dst.shape[1] = Images[0].rows();
	dst.shape[2] = Images[0].cols();
	dst.shape[3] = nSize;
	for (int i = 0; i < nSize; i++) {
		if (Images[i].rows() != dst.shape[1] || Images[i].cols() != dst.shape[2]) return false;
	}

	dst.data = MatrixXd(dst.shape[1] * dst.shape[2], nSize);
	// row major
	for (int i = 0; i < nSize; i++) {
		dst.data.middleCols(i, 1) = MatrixXd::Map(Images[i].data(), dst.shape[1] * dst.shape[2], 1);
	}
	
	return true;
}

/************************************************
* convert PiaConv2dData to images or feature maps
*/
void cvtTensor2Imgs(const PiaConv2dData &src, std::vector<MatrixXd> &Images) 
{
	if (src.IsValid() == false) 
		return;
	
	Images.clear();
	for (int i = 0; i < (int)src.shape[3]; i++) {
		MatrixXd tmp = MatrixXd::Map(
			src.data.middleCols(i, 1).data(), 
			src.shape[1], 
			src.shape[2]);
		Images.push_back(tmp);
	}
}

/************************************************
* convert images or feature maps to a vector
*/
MatrixXd cvtImages2Vector(const std::vector<MatrixXd> &FeatureMaps)
{
	MatrixXd dst;
	if (FeatureMaps.size() <= 0) return dst;
	
	int rows = FeatureMaps[0].rows();
	int cols = FeatureMaps[0].cols();
	int numb = (int)FeatureMaps.size();
	
	MatrixXd Mer(numb, rows * cols);
			   
	for (int i = 0; i < numb; i++) {
		MatrixXd tmp = FeatureMaps[i].transpose();
		tmp = MatrixXd::Map(tmp.data(), rows * cols, 1);
		Mer.middleRows(i, 1) = tmp.transpose();
	}
	dst = MatrixXd::Map(Mer.data(), Mer.rows() * Mer.cols(), 1);
	return dst;
}

/************************************************
* 
*/
PiaFullData cvtImages2TensorVector(const std::vector<MatrixXd> &FeatureMaps)
{
	PiaFullData dst;
	dst.data = cvtImages2Vector(FeatureMaps);
	return dst;
}

/************************************************
* 
*/
PiaFullData cvtTensor2Vector(const PiaConv2dData &src)
{
	std::vector<MatrixXd> FeatureMaps;
	cvtTensor2Imgs(src, FeatureMaps);
	return cvtImages2TensorVector(FeatureMaps);
}

/************************************************
* 
*/
void PrintImgs(const std::vector<MatrixXd> &src)
{
	for (int i = 0; i < (int)src.size(); i++) {
		cout 
		<< "---------Map" 
		<< setw(4) << i 
		<< " ("
		<< setw(4) << src[i].rows()
		<< setw(4) << src[i].cols()
		<< ") "
		<< "---------" 
		<< endl;
		cout << src[i] << endl;
	}
}

} // namespace PiaCNN




#endif
