#include "nn_layer.h"
#include<iostream>
#include<fstream>
#include<cstdio>
#include<string>

/*if input_shape equals defualt value 0, 
we can predect the shape of weight with number of units;
or it means this layer is the first hidden layer.*/
layer::layer(int i, std::string temp,int input_shape) :units(i), activation(temp)
{
	if (input_shape != 0)
	{
		if (activation == "tanh")
			weight = 0.01 * mat::Random(input_shape, i);
		else
			weight = 0.1 * mat::Random(input_shape, i);
	}
}


layer::layer(const layer& temp)
{
	units = temp.units;
	weight = temp.weight;
	activation = temp.activation;
}


msize layer::shape() const
{
	msize re;
	re.row = weight.rows();
	re.col = weight.cols();
	return re;
}


void layer::showAct() const
{
	std::cout << activation;
}


void layer::describe() const
{
	std::cout << "Fully connected layer: number of units = " << units << " activation = " << activation << std::endl;
}


void layer::forward_prop(const mat &input,mat & result)
{
	result = input * weight;

	//using activation functions
	if (activation == "relu")
	{
		result = Relu_mat(result);
	}
	else if (activation == "sigmoid")
	{
		result = Sigmoid_mat(result);
	}
	else if (activation == "tanh")
	{
		result = Tanh_mat(result);
	}
	else if (activation == "softmax")
	{
		result = Softmax(result);
	}
}


void layer::backward_prop(const mat& input,const mat& output,mat& result)
{
	result = output * weight.transpose();
	if (activation == "relu")
	{
		result = result.cwiseProduct(Reluderiv_mat(input));
	}
	else if (activation == "sigmoid")
	{
		result = result.cwiseProduct(Sigmoidderiv_mat(input));
	}
	else if (activation == "tanh")
	{
		result = result.cwiseProduct(Tanhderiv_mat(input));
	}
	else if (activation == "softmax")
	{
		/*softmax is usually the activation function of the last output layer,
		thus we can compute the gradient after the forwardprop ends, rather than 
		in this function*/
		//result = output * weight.transpose();
	}
}


void layer::random(int r,double pram)
{
	weight = pram * mat::Random(r, units);
}


int layer::weight_row() const
{
	return weight.rows();
}


int layer::weight_column() const
{
	return units;
}


void layer::update(const mat& delta)
{
	weight -= delta;
}

void layer::layer_export(int i) const
{
	using namespace std;
	ofstream out;
	string file = "fulllayerfile";
	file += to_string(i);
	file += ".txt";
	out.open(file, ios::out);
	if (!out.is_open())
	{
		cout << "can not open layer file" << endl;
		return;
	}
	//out << 0 << " ";
	out << weight_row() << " " << weight_column() << " " << activation << endl;
	out << weight;
	out.close();
}

void layer::layer_input(const char* ptr)
{
	using namespace std;
	ifstream in;
	in.open(ptr, ios::in);
	if (!in.is_open())
	{
		cout << "can not open layer file" << endl;
		return;
	}
	int r; in >> r >> units;
	in >> activation;
	weight.resize(r, units);
	for (int i = 0; i < r; i++)
	{
		for (int j = 0; j < units; j++)
			in >> weight(i, j);
	}
	in.close();
	
}




/*the params are numer rows of kernels, number of cols of kernels, number of kernels,
stride of scanning, original number of rows of pictures, original number of cols of pictures,
activation function*/
cnn_layer::cnn_layer(int r, int c, int num, int step, int pic_r,int pic_c, std::string temp) :
	kernel_rows(r), kernel_cols(c), num_kernels(num), stride(step), pic_rows(pic_r),pic_cols(pic_c) ,activation(temp)
{
	conv_size = ((pic_rows-kernel_rows) / stride + 1) *
	((pic_cols - kernel_cols) / stride + 1);
}


cnn_layer::cnn_layer(const cnn_layer& temp)
{
	kernel_rows = temp.kernel_rows;
	kernel_cols = temp.kernel_cols;
	num_kernels = temp.num_kernels;
	pic_cols = temp.pic_cols;
	pic_rows = temp.pic_rows;
	conv_size = temp.conv_size;
	kernels = temp.kernels;
	activation = temp.activation;
	stride = temp.stride;
}


//kernels initialization
//elements of kernels are in range of (-r,r)
void cnn_layer::random(int r, double param)
{
	kernels = param * mat::Random(kernel_cols * kernel_rows , num_kernels);
}


void cnn_layer::forward_prop(const mat& input, mat& result)
{
	mat temp;
	flattend_input.resize(input.rows() * conv_size, kernel_rows * kernel_cols);
	int cnt = 0;
	for (int i = 0; i < input.rows(); i++)
	{
		temp = (input.row(i)).reshaped(pic_rows,pic_cols);
		for (int j = 0; j <= pic_rows - kernel_rows; j += stride)
		{
			for (int k = 0; k <= pic_cols - kernel_cols; k += stride)
			{
				flattend_input.row(cnt++) = temp.block(j, k, kernel_rows, kernel_cols).reshaped<Eigen::RowMajor>().transpose();
			}
		}
	}
	result = (flattend_input * kernels).reshaped<Eigen::RowMajor>(input.rows(), num_kernels * conv_size);
	if (activation == "relu")
	{
		result = Relu_mat(result);
	}
	else if (activation == "sigmoid")
	{
		result = Sigmoid_mat(result);
	}
	else if (activation == "tanh")
	{
		result = Tanh_mat(result);
	}
	else if (activation == "softmax")
	{
		result = Softmax(result);
	}
}


void cnn_layer::backward_prop(const mat& input, const mat& output, mat& result)
{
	if (activation == "relu")
	{
		result = (output.reshaped<Eigen::RowMajor>(input.rows() * conv_size, num_kernels) * kernels.transpose());
		result = result.reshaped<Eigen::RowMajor>(input.rows(), input.cols()).cwiseProduct(Reluderiv_mat(input));
	}
	else if (activation == "sigmoid")
	{
		result = (output.reshaped<Eigen::RowMajor>(input.rows() * conv_size, num_kernels) * kernels.transpose());
		result = result.reshaped<Eigen::RowMajor>(input.rows(), input.cols()).cwiseProduct(Sigmoidderiv_mat(input));
	}
	else if (activation == "tanh")
	{
		result = (output.reshaped<Eigen::RowMajor>(input.rows() * conv_size, num_kernels) * kernels.transpose());
		result = result.reshaped<Eigen::RowMajor>(input.rows(), input.cols()).cwiseProduct(Tanhderiv_mat(input));
	}
	else if (activation == "softmax")
	{
		result = output.reshaped<Eigen::RowMajor>(input.rows() * conv_size, num_kernels) * kernels.transpose();
		result = result.reshaped(input.rows(), input.cols()).eval();
	}
	else {
		result = output.reshaped<Eigen::RowMajor>(input.rows() * conv_size, num_kernels) * kernels.transpose();
		result = result.reshaped(input.rows(), input.cols()).eval();
	}
}

void cnn_layer::update(int r, double lr, const mat& delta)
{
	kernels -= lr * flattend_input.transpose() * delta.reshaped<Eigen::RowMajor>(r * conv_size, num_kernels);
}

int cnn_layer::weight_column() const
{
	return conv_size * num_kernels;
}

void cnn_layer::describe() const
{
	std::cout << "Convolutional layer: kernal shape = " << "(" << kernel_rows << "," << kernel_cols << ") ";
	std::cout << "number of kernals = " << num_kernels << " stride = " << stride << " activetion = " << activation << std::endl;
}

void cnn_layer::cnnlayer_export(int i) const
{
	using namespace std;
	ofstream out;
	string file = "cnnlayerfile";
	file += to_string(i);
	file += ".txt";
	out.open(file, ios::out);
	if (!out.is_open())
	{
		cout << "can not open layer file" << endl;
		return;
	}
	//out << 1 << " ";
	out << kernel_rows << " " << kernel_cols << " " << num_kernels << " " << stride << " " << conv_size << " " << pic_rows << " " << pic_cols << " " << activation << endl;
	out << kernels;
	out.close();
}

void cnn_layer::layer_input(const char* ptr)
{
	using namespace std;
	ifstream in;
	in.open(ptr, ios::in);
	if (!in.is_open())
	{
		cout << "can not open layer file" << endl;
		return;
	}
	in >> kernel_rows >> kernel_cols >> num_kernels >> stride >> conv_size >> pic_rows >> pic_cols >> activation;
	kernels.resize(kernel_cols * kernel_rows, num_kernels);
	for (int i = 0; i < kernel_cols * kernel_rows; i++)
	{
		for (int j = 0; j < num_kernels; j++)
			in >> kernels(i, j);
	}
	in.close();
}





cnn_derived::cnn_derived(int kernelrow, int kernelcol, int kernelnum, int step, 
	int picrows, int piccols, std::string temp) :
	layer(((picrows - kernelrow) / step + 1)*
		((piccols - kernelcol) / step + 1),temp),
	kernel_rows(kernelrow), kernel_cols(kernelcol), num_kernels(kernelnum), stride(step), 
	pic_rows(picrows), pic_cols(piccols)
{
	conv_size = ((picrows - kernelrow) / step + 1) *
		((piccols - kernelcol) / step + 1);
}


void cnn_derived::random(int rows, double pram)
{
	weight = pram * mat::Random(kernel_cols * kernel_rows, num_kernels);
}


void cnn_derived::forward_prop(const mat& input, mat& result)
{
	mat temp;
	flattend_input.resize(input.rows() * conv_size, kernel_rows * kernel_cols);
	int cnt = 0;
	for (int i = 0; i < input.rows(); i++)
	{
		temp = (input.row(i)).reshaped(pic_rows, pic_cols);
		for (int j = 0; j <= pic_rows - kernel_rows; j += stride)
		{
			for (int k = 0; k <= pic_cols - kernel_cols; k += stride)
			{
				flattend_input.row(cnt++) = temp.block(j, k, kernel_rows, kernel_cols).reshaped<Eigen::RowMajor>().transpose();
			}
		}
	}
	result = (flattend_input * weight).reshaped<Eigen::RowMajor>(input.rows(), num_kernels * conv_size);
	if (activation == "relu")
	{
		result = Relu_mat(result);
	}
	else if (activation == "sigmoid")
	{
		result = Sigmoid_mat(result);
	}
	else if (activation == "tanh")
	{
		result = Tanh_mat(result);
	}
	else if (activation == "softmax")
	{
		result = Softmax(result);
	}
}


void cnn_derived::backward_prop(const mat& input, const mat& output, mat& result)
{
	if (activation == "relu")
	{
		result = (output.reshaped<Eigen::RowMajor>(input.rows() * conv_size, num_kernels) * weight.transpose());
		result = result.reshaped<Eigen::RowMajor>(input.rows(), input.cols()).cwiseProduct(Reluderiv_mat(input));
	}
	else if (activation == "sigmoid")
	{
		result = (output.reshaped<Eigen::RowMajor>(input.rows() * conv_size, num_kernels) * weight.transpose());
		result = result.reshaped<Eigen::RowMajor>(input.rows(), input.cols()).cwiseProduct(Sigmoidderiv_mat(input));
	}
	else if (activation == "tanh")
	{
		result = (output.reshaped<Eigen::RowMajor>(input.rows() * conv_size, num_kernels) * weight.transpose());
		result = result.reshaped<Eigen::RowMajor>(input.rows(), input.cols()).cwiseProduct(Tanhderiv_mat(input));
	}
	else if (activation == "softmax")
	{
		result = output.reshaped<Eigen::RowMajor>(input.rows() * conv_size, num_kernels) * weight.transpose();
		result = result.reshaped(input.rows(), input.cols()).eval();
	}
	else {
		result = output.reshaped<Eigen::RowMajor>(input.rows() * conv_size, num_kernels) * weight.transpose();
		result = result.reshaped(input.rows(), input.cols()).eval();
	}
}

void cnn_derived::update(int r, double lr, const mat& delta)
{
	weight -= lr * flattend_input.transpose() * delta.reshaped<Eigen::RowMajor>(r * conv_size, num_kernels);
}

int cnn_derived::weight_column() const
{
	return conv_size * num_kernels;
}

void cnn_derived::describe() const
{
	std::cout << "kernal shape = " << "(" << kernel_rows << "," << kernel_cols << ") ";
	std::cout << "number of kernals = " << num_kernels << " stride = " << stride << " activetion = " << activation << std::endl;
}

