#include"mymodel.h"
#include "nn_layer.h"
#include<iostream>
#include<random>
#include<fstream>

mymodel::mymodel(int cnt, double lr, int flag, double dp) :
	layer_cnt(cnt), lerning_rate(lr), loss_func(flag), dropout(dp)
{

}

mymodel::mymodel(const mymodel& temp)
{
	loss_func = temp.loss_func;
	lerning_rate = temp.lerning_rate;
	layer_cnt = temp.layer_cnt;
	dropout = temp.dropout;
	for (int i = 1; i <= layer_cnt; i++)
	{
		if (temp.layers[i].index() == 0)
		{
			layers[i] = new layer(*std::get<0>(temp.layers[i]));
		}
		else if (temp.layers[i].index() == 1)
		{
			layers[i] = new cnn_layer(*std::get<1>(temp.layers[i]));
		}
	}
}


mymodel::~mymodel()
{
	for (int i = 1; i <= layer_cnt; i++)
	{
		if (layers[i].index() == 0)
		{
			delete std::get<0>(layers[i]);
		}
		else if (layers[i].index() == 1)
		{
			delete std::get<1>(layers[i]);
		}
	}
}


void mymodel::add(const layer& cp)
{
	layer* lp = new layer(cp);
	layers[++layer_cnt] = lp;
}

void mymodel::add(const cnn_layer& cp)
{
	cnn_layer* lp = new cnn_layer(cp);
	layers[++layer_cnt] = lp;
}


void mymodel::init()
{
	if (layers[1].index() == 1)
	{
		std::get<1>(layers[1])->random(0, 0.01);
	}
	for (int i = 2; i <= layer_cnt; i++)
	{
		if (layers[i].index() == 0 && layers[i - 1].index() == 0)
		std::get<0>(layers[i])->random(std::get<0>(layers[i - 1])->weight_column(), 0.1);
		else if(layers[i].index() == 0 && layers[i - 1].index() == 1)
		std::get<0>(layers[i])->random(std::get<1>(layers[i - 1])->weight_column(), 0.1);
	}
}

void mymodel::compile(double lr, double dp, int loss)
{
	lerning_rate = lr;
	loss_func = loss;
	dropout = dp;
}

void mymodel::fit(const mat& input, const mat& output,
	int epochs, int batch_size)
{
	for (int t = 0; t < epochs; t++)
	{
		std::cout << "epoch " << t + 1 << "/" << epochs<< std::endl << "  [";
		double error = 0, correct_cnt = 0;
		for (int j = 0; j + batch_size <= input.rows(); j += batch_size)
		{
			input_tensor[0] = input.block(j, 0, batch_size, input.cols());

			for (int i = 1; i <= layer_cnt; i++)
			{
				std::get<0>(layers[i])->forward_prop(input_tensor[i - 1], input_tensor[i]);
			}
			for (int i = j; i < j + batch_size; i++)
			{
				mat::Index x, y;
				input_tensor[layer_cnt].row(i-j).maxCoeff(&x);
				output.row(i).maxCoeff(&y);
				if (x == y) correct_cnt++;
			}

			if (loss_func == 0)
			{
				error += loss_mean_square(output.block(j, 0, batch_size, output.cols()), input_tensor[layer_cnt]);
				gradient[layer_cnt] = input_tensor[layer_cnt] - output.block(j, 0, batch_size, output.cols());
			}
			else if (loss_func == 1)
			{
				error += loss_crossentropy(output.block(j, 0, batch_size, output.cols()), input_tensor[layer_cnt]);
				gradient[layer_cnt] = Softmaxderiv(output.block(j, 0, batch_size, output.cols()), input_tensor[layer_cnt]);
			}

			for (int i = layer_cnt; i > 1; i--)
			{
				std::get<0>(layers[i])->backward_prop(input_tensor[i - 1], gradient[i], gradient[i - 1]);
			}

			mat weight_delta;
			for (int i = 1; i <= layer_cnt; i++)
			{
					weight_delta = lerning_rate * (input_tensor[i - 1].transpose()) * gradient[i];
					std::get<0>(layers[i])->update(weight_delta);
			}
			std::cout << "=";
		}
		std::cout <<"]  Train-loss: " << error << " Train-acc: " << correct_cnt * 1.0 / output.rows() << std::endl;
	}
}

void mymodel::fit(const mat& input, const mat& output,
	int epochs, int batch_size, const mat& test_img,
	const Eigen::VectorXd& test_labels)
{
	mat drop_out[layer_limits - 2];
	for (int t = 0; t < epochs; t++)
	{
		std::cout << "epoch " << t + 1 << "/" << epochs << std::endl << "  [";
		double error = 0, correct_cnt = 0;
		for (int j = 0; j + batch_size <= input.rows(); j += batch_size)
		{
			if (dropout != 0)
			for (int i = 1; i < layer_cnt; i++)
			{
				if (layers[i].index() == 0)
					drop_out[i] = mask(batch_size, std::get<0>(layers[i])->weight_column(), dropout);
				else if (layers[i].index() == 1)
					drop_out[i] = mask(batch_size, std::get<1>(layers[i])->weight_column() ,dropout);
			}
			input_tensor[0] = input.block(j, 0, batch_size, input.cols());

			for (int i = 1; i <= layer_cnt; i++)
			{
				if (layers[i].index() == 0)
					std::get<0>(layers[i])->forward_prop(input_tensor[i - 1], input_tensor[i]);
				else if (layers[i].index() == 1)
					std::get<1>(layers[i])->forward_prop(input_tensor[i - 1], input_tensor[i]);

				if (dropout != 0 && i < layer_cnt)
				input_tensor[i] = input_tensor[i].cwiseProduct(drop_out[i] / (1 - dropout));
				//std::cout << input_tensor[i].row(0) << std::endl;
			}

			for (int i = j; i < j + batch_size; i++)
			{
				mat::Index x, y;
				input_tensor[layer_cnt].row(i - j).maxCoeff(&x);
				output.row(i).maxCoeff(&y);
				if (x == y) correct_cnt++;
			}

			if (loss_func == 0)
			{
				error += loss_mean_square(output.block(j, 0, batch_size, output.cols()), input_tensor[layer_cnt]);
				gradient[layer_cnt] = 2 * (input_tensor[layer_cnt] - output.block(j, 0, batch_size, output.cols())) / output.cols();
			}
			else if (loss_func == 1)
			{
				error += loss_crossentropy(output.block(j, 0, batch_size, output.cols()), input_tensor[layer_cnt]);
				gradient[layer_cnt] = Softmaxderiv(output.block(j, 0, batch_size, output.cols()), input_tensor[layer_cnt]);
			}

			gradient[layer_cnt] = gradient[layer_cnt] / batch_size;

			for (int i = layer_cnt; i > 1; i--)
			{
				if (layers[i].index() == 0)
					std::get<0>(layers[i])->backward_prop(input_tensor[i - 1], gradient[i], gradient[i - 1]);
				else if (layers[i].index() == 1)
					std::get<1>(layers[i])->backward_prop(input_tensor[i - 1], gradient[i], gradient[i - 1]);
				if (dropout != 0)
				gradient[i - 1] = gradient[i - 1 ].cwiseProduct(drop_out[i - 1]);
			}

			mat weight_delta;
			for (int i = 1; i <= layer_cnt; i++)
			{
				if (layers[i].index() == 0)
				{
					weight_delta = lerning_rate * (input_tensor[i - 1].transpose()) * gradient[i];
					std::get<0>(layers[i])->update(weight_delta);
				}
				else {
					std::get<1>(layers[i])->update(input_tensor[i - 1].rows(), lerning_rate, gradient[i]);
				}
			}
			std::cout << "=";
		}
		std::cout << "]  Train-loss: " << error/input.rows() << " Train-acc: " << correct_cnt * 1.0 / input.rows() << std::endl;
		if ((t + 1) % 5 == 0)
		evaluate(test_img, test_labels);
	}

}

double loss_mean_square(const Eigen::MatrixXd& one_hot, const Eigen::MatrixXd& pred)
{
	int r = one_hot.rows(), c = one_hot.cols();
	double loss = 0;
	for (int i = 0; i < r; i++)
	{
		for (int j = 0; j < c; j++)
		{
			loss += (one_hot(i, j) - pred(i, j)) * (one_hot(i, j) - pred(i, j));
		}
	}
	return loss;
}

double loss_crossentropy(const Eigen::MatrixXd& one_hot, const Eigen::MatrixXd& pred)
{
	int r = one_hot.rows(), c = one_hot.cols();
	double loss = 0;
	for (int i = 0; i < r; i++)
	{
		for (int j = 0; j < c; j++)
		{
			if (one_hot(i, j) ==0 ) continue;
			loss += -1 * log(pred(i, j));
		}
	}
	return loss;
}

Eigen::VectorXd mymodel::predict(const mat& input)
{
	mat in = input;
    //remove dropout, important
	dropout = 0.0;
	for (int i = 1; i <= layer_cnt; i++)
	{
		if (layers[i].index() == 0)
		std::get<0>(layers[i])->forward_prop(in, in);
		else if (layers[i].index() == 1)
		std::get<1>(layers[i])->forward_prop(in, in);
	}
	Eigen::VectorXd pred(in.rows());
	mat::Index maxindex;
	for (int i = 0; i < in.rows(); i++)
	{
		in.row(i).maxCoeff(&maxindex);
		pred(i, 0) = maxindex;
	}
	return pred;
}

void mymodel::evaluate(const mat& input,const Eigen::VectorXd& labels)
{
	Eigen::VectorXd pred = predict(input);
	int correct_cnt = 0;
	for (int i = 0; i < labels.rows(); i++)
	{
		if (pred(i,0) == labels(i,0))
		{
			correct_cnt++;
		}
	}
	std::cout << "Test-acc: " << correct_cnt * 1.0 / labels.rows() << std::endl;
}


void mymodel::describe() const
{
	std::cout << "number of layers = " << layer_cnt+1 << " learning rate = " << lerning_rate <<" dropout= "<<dropout << " loss function = ";
	if (loss_func == 0)
		std::cout << "mean square";
	else if (loss_func == 1)
		std::cout << "cross entropy";
	std::cout << std::endl;
	for (int i = 1; i <= layer_cnt; i++)
	{
		if (layers[i].index() == 0)
			std::get<0>(layers[i])->describe();
		else if (layers[i].index() == 1)
			std::get<1>(layers[i])->describe();
	}
}


Eigen::MatrixXd mask(int r,int c, double drop)
{
	mat result(r,c);

	//bad in practice
	//static std::default_random_engine generator;

	static std::mt19937 generator;
	static std::bernoulli_distribution distribution(1 - drop);
	for (int i = 0; i < r; i++)
	{
		for (int j = 0; j < c; j++)
		{
			result(i, j) = distribution(generator);
		}
	}
	return result;
}


//drop out=0.5, original way
//mat mask(int r, int c, double drop)
//{
//	mat result = mat::Random(r, c);
//	for (int i = 0; i < r; i++)
//	{
//		for (int j = 0; j < c; j++)
//		{
//			result(i, j) = ((int)(result(i, j) + 1) % 2);
//		}
//	}
//	return result;
//}


//in .h file
//template <class T, class ...Args>
//void mymodel::combine_template(const T& value, Args... args)
//{
//	/*if (value.flag == 0)
//		layers[++layer_cnt] = new layer(value);
//	else if (value.flag == 1)
//		layers[++layer_cnt] = new cnn_layer(value);*/
//	combine_template(args...);
//}
//


void mymodel::combine_template()
{

}


void mymodel::combine(const layer& a, const layer& b)
{
	layer_cnt = 2;
	layers[1] = new layer(a);
	layers[2] = new layer(b);
}

void mymodel::combine(const layer& a, const layer& b, const layer& c)
{
	layer_cnt = 3;
	layers[1] = new layer(a);
	layers[2] = new layer(b);
	layers[3] = new layer(c);
}

void mymodel::combine(const cnn_layer& a, const layer& b)
{
	layer_cnt = 2;
	layers[1] = new cnn_layer(a);
	layers[2] = new layer(b);
}

void mymodel::combine(const cnn_layer& a, const layer& b, const layer& c)
{
	layer_cnt = 3;
	layers[1] = new cnn_layer(a);
	layers[2] = new layer(b);
	layers[3] = new layer(c);
}

void mymodel::test()
{
	Eigen::MatrixXd matinuse;
	wchar_t wptr[20]=L"jpgs/";
	std::cout << "input name of jpg in folder jpgs with suffix(example: test1.jpg):";
	while (std::wcin >> (wptr+5))
	{
		std::cin.get();
		if (wptr[5] == 'e') break;
		std::ifstream in(wptr);
		if (!in.is_open())
		{
			in.close();
			std::cout << "no such jpg" << std::endl;
			std::cout << "input name of jpg in folder jpgs with suffix(input end to break):";
			continue;
		}
		in.close();
		showjpg(wptr);
		readjpg(wptr, matinuse);
		matinuse = matinuse.reshaped<Eigen::RowMajor>().transpose().eval();
		std::cout << "model predict: " << predict(matinuse) << std::endl;
		std::cout << "input name of jpg in folder jpgs with suffix(input end to break):";
	}
}

void mymodel::file_export()
{
	for (int i = 1; i <= layer_cnt; i++)
	{
		if (layers[i].index() == 0)
			std::get<0>(layers[i])->layer_export(i);
		else if (layers[i].index() == 1)
			std::get<1>(layers[i])->cnnlayer_export(i);
	}
	std::cout << "data has been exported into files";
}

void mymodel::file_input(const char* ch1, const char* ch2)
{
	if (ch1[0] == 'f')
	{
		layer* ptr = new layer;
		ptr->layer_input(ch1);
		layers[++layer_cnt] = ptr;
	}
	else if (ch1[0] == 'c')
	{
		cnn_layer* ptr = new cnn_layer;
		ptr->layer_input(ch1);
		layers[++layer_cnt] = ptr;
	}
	if (ch2[0] == 'f')
	{
		layer* ptr = new layer;
		ptr->layer_input(ch2);
		layers[++layer_cnt] = ptr;
	}
	else if (ch2[0] == 'c')
	{
		cnn_layer* ptr = new cnn_layer;
		ptr->layer_input(ch2);
		layers[++layer_cnt] = ptr;
	}
}