#include "MLP.h"

#define Print(x) std::cout<<(x)<<std::endl
double ReLU(double x) {return x > 0.0 ? x: 0.0;};
double soild_delta(double x) { return (ACT(x + delta) - ACT(x - delta)) / (2 *delta); }
double MSE_Loss(Eigen::VectorXd const& y, Eigen::VectorXd const& y_hat)
{
	double l = (y - y_hat).unaryExpr([](double x) { return x * x;}).sum();
	return l;
}

double abs_acc(Eigen::VectorXd const& y, Eigen::VectorXd const& y_hat)
{
	double acc = abs(y[0] - y_hat[0]) / abs(y[0]);
	return acc;
}

std::ostream& operator<<(std::ostream& out, MLP& mlp)
{
	auto temp = mlp.first();
	for (int i = 0; i < mlp._size; i++, temp = temp->succ)
		out<<"layer ["<<i + 1<<"] :"
		   <<temp->data;

	return out;
}

std::ostream& operator<<(std::ostream& out, Layer& layer)
{
	out<<"input size = "<<layer.in_size<<"\n"
	   <<"output size = "<<layer.out_size<<"\n"
	   <<"weight W = \n"
	   <<layer.W
	   <<"\nbias B = \n"
	   <<layer.B
	   <<"\n\n";

	return out;
};

Layer::Layer(int input_size, int output_size,
			 double (*A)(double X),
			 double (*G)(double X)
			 ):
	W(Eigen::MatrixXd(output_size, input_size)),
	B(Eigen::VectorXd(output_size)),
	activate(A), gradient(G),
	in_size(input_size), out_size(output_size)
{
	W = Eigen::MatrixXd::Random(output_size, input_size);
	B = Eigen::VectorXd::Random(output_size);
}

Eigen::VectorXd& Layer::operator()(Eigen::VectorXd const& x)
{
	X = x;
	z = W * x + B;
	a = z.unaryExpr(activate);
	return z;
}

MLP::MLP(int shape[], int n)
{
	for (int i = 0; i < n - 1; i++)
		insert_back(Layer(shape[i], shape[i + 1], ACT, soild_delta));
}

Eigen::VectorXd MLP::forward(Eigen::VectorXd const& x)
{
	auto temp = first();
	Eigen::VectorXd y = x;
	for (int i = 0; i < _size; i++, temp = temp->succ)
		y = temp->data(y);

	return temp->pred->data.a;
}
//if (i < _size - 1)
//y.binaryExpr(x, [](double x1, double x2){if (x1 == 0.0 or x2 == 0.0) return 0.0; else return x1 * x2;});
void SGD::step_zero()
{
	should = true;
	error = Eigen::VectorXd::Random(1);
	error[0] = 0.0;
	last_W.unaryExpr([](double x) {return 0.0;});
	Y.unaryExpr([](double x) {return 0.0;});
}

void SGD::set(Layer& layer)
{
	last_W = layer.W;
	error = Eigen::VectorXd::Random(layer.z.size());
	Eigen::VectorXd delta_z = layer.z;
	Eigen::VectorXd delta_z2 = layer.z;

	for (int i = 0; i < error.size(); i++) {
		delta_z[i] += delta;
		delta_z2[i] -= delta;
		double L = (loss(delta_z, Y) - loss(delta_z2, Y)) / (2 * delta);
		error[i] = L;
		delta_z[i] -= delta;
		delta_z2[i] += delta;
	}
}

void SGD::operator()(Layer &layer)
{
	if (should) {
		set(layer);
		should = false;
	}
	else {
		Eigen::VectorXd delta_z = layer.z;
		Eigen::VectorXd delta_z2 = layer.z;
		for (auto& i: delta_z) i += delta;
		for (auto& i: delta_z2) i -= delta;
		delta_z.unaryExpr(layer.activate);
		delta_z2.unaryExpr(layer.activate);

		Eigen::VectorXd dadz = (delta_z - delta_z2).unaryExpr([](double x){return x / (2 * delta);});
		error = dadz.binaryExpr(last_W.transpose() * error, [](double x1, double x2){return x1 * x2;});
	}

	last_W = layer.W;
	layer.W -= LR * (error * layer.X.transpose() + regular_rate * layer.W);
	layer.B -= LR * error;
}

template<typename VST>
double MLP::step(
		VST& optmizer,
		double (*lossfuc)(const Eigen::VectorXd &, const Eigen::VectorXd &),
		const Eigen::VectorXd& x,
		const Eigen::VectorXd& y
		)
{
	Eigen::VectorXd y_perdict = forward(x);
	optmizer.ground_truth(y);
	reverse_traverse(optmizer);
	optmizer.step_zero();
	return std::abs(lossfuc(y, y_perdict));
}

template<typename VST>
void MLP::train(
		int epoch, VST &optmizer, int show_interval, int save_interval,
		double (*lossfuc)(const Eigen::VectorXd &, const Eigen::VectorXd &),
		Eigen::VectorXd* features, Eigen::VectorXd* labels, int size_,
		double (*accfuc)(const Eigen::VectorXd &, const Eigen::VectorXd &),
		void (*save)(MLP const& mlp)
		)
{
	srand(114514);

	int train_size = (size_ / 10) * 9;
	int val_size = size_ - train_size;
	double avg_loss = 0.0, avg_acc = 0.0;

	for (int i = 0; i < epoch; i++) {
		for (int j = 0; j < train_size; j++) {
			int index = abs(rand()) % (size_ + 1);

			double l = step<VST>(optmizer, lossfuc, features[index], labels[index]);
			if ((j + 1) % show_interval == 0)
				std::cout<<" epoch:"<<i + 1<<"/"<<epoch
						 <<" step:"<<j + 1<<"/"<<train_size
						 <<" loss:"<<l<<std::endl;
		}

		for (int j = 0; j < val_size; j++) {
			int index = abs(rand()) % (size_ + 1);

			avg_loss += lossfuc(labels[index], forward(features[index]));
			avg_acc += accfuc(labels[index], forward(features[index]));
			if (i == epoch - 1) std::cout<<"truth:"<<labels[index]<<" predict:"<<forward(features[index])<<"\n";
		}

		std::cout<<" epoch:"<<i + 1<<"/"<<epoch<<" is over"
		         <<" avg_loss:"<<avg_loss / val_size
		         <<" avg_error:"<<avg_acc / val_size<<"\n\n";
		//if ((i + 1) % save_interval == 0) save(*this);
		avg_loss = 0;
		avg_acc = 0.0;
	}
}
