#include "Layer.hpp"
#include "Function.hpp"

namespace minnet
{
	namespace model
	{
		Tensor  Model::operator()(Tensor input) {
			return Forward(input);
		}

		void Model::train() {
			*_train = 1;
			for (auto model : model_list) model->train();
		}

		void Model::eval() {
			*_train = 0;
			for (auto model : model_list) model->eval();
		}

		std::list<Tensor*> Model::parameters() {
			std::list<Tensor*> ret;
			for (auto model : model_list) {
				std::list<Tensor*> param_list = model->parameters();
				for (auto* tensor : param_list) {
					ret.push_back(tensor);
				}
			}
			return ret;
		}

		Linear::Linear(int input, int output, bool use_bias) :use_bias(use_bias) {
			param = Tensor(input, output);
			bias = Tensor(1, output);
			param.rand();
		}

		Tensor Linear::Forward(Tensor& tensor) {
			if(use_bias) return tensor.dot2d(param) + bias;
			return tensor.dot2d(param);
		}
		
		std::list<Tensor*> Linear::parameters() {
			return std::list<Tensor*>{&param, &bias};
		}

		Conv2d::Conv2d(int out_ch, int in_ch, int kernel_size, int padding, int stride_x, int stride_y)
		: padding(padding), stride_x(stride_x), stride_y(stride_y) {
			param = Tensor(out_ch, in_ch, kernel_size, kernel_size);
			param.rand();
			param.set_conv_bias(out_ch);
		}

		Tensor Conv2d::Forward(Tensor& tensor) {
			return tensor.conv2d(param, padding, stride_x, stride_y);
		}

		std::list<Tensor*> Conv2d::parameters() {
			return std::list<Tensor*>{&param};
		}

		DropOut::DropOut(float proportion) : proportion(proportion) {
			
		}

		Tensor DropOut::Forward(Tensor& tensor) {
			if (*_train) return tensor.dropout(proportion);
			else return tensor;
		}

		Tensor Relu::Forward(Tensor& tensor) {
			return minnet::function::Relu(tensor);
		}

		Tensor SoftMax::Forward(Tensor& tensor) {
			return minnet::function::SoftMax(tensor);
		}

		Tensor Sigmoid::Forward(Tensor& tensor) {
			return function::Sigmoid(tensor);
		}

		Tensor Tanh::Forward(Tensor& tensor) {
			return function::Tanh(tensor);
		}

		MaxPool2d::MaxPool2d(int size): size(size) {

		}

		Tensor MaxPool2d::Forward(Tensor& tensor) {
			return tensor.maxpool2d(size);
		}

		Tensor Sequence::Forward(Tensor& input) {
			Tensor out = input;
			for (auto it = model_list.begin(); it != model_list.end(); it++) {
				out = (*it)->Forward(out);
			}
			return out;
		}

		RNN::RNN(int input_size, int hidden_size, int out_size, int num_layer) :hidden_size(hidden_size),
			num_layer(num_layer), input_size(input_size), output_size(out_size)
		{
			rnn_in = Linear(input_size, hidden_size);
			rnn_hidden = std::vector<Sequence>(num_layer);
			for (int i = 0; i < num_layer; i++) {
				rnn_hidden[i] = Sequence(Linear(hidden_size + hidden_size, hidden_size), Tanh());
			}
			rnn_out = Linear(hidden_size, out_size);
			register_layer(rnn_in, rnn_out);
			for (auto& hidden : rnn_hidden) register_layer(hidden);
		}

		Tensor RNN::Forward(Tensor& rnn_input) {
			std::vector<Tensor> input = minnet::Tensor::splite(0, rnn_input);
			std::vector<Tensor> hidden_state(num_layer + 1, Tensor(1, hidden_size));
			int seq_len = input.size();
			for (int i = 0; i < seq_len; i++) {
				hidden_state[0] = rnn_in(input[i]);
				for (int j = 1; j <= num_layer; j++) {
					hidden_state[j] = rnn_hidden[j - 1](Tensor::concat(1, { hidden_state[j], hidden_state[j-1] }));
				}
			}
			return rnn_out(hidden_state[num_layer]);
		}

		LSTMBasicBlock::LSTMBasicBlock(int hidden_size) {
			forget_gate = Sequence(Linear(hidden_size + hidden_size, hidden_size), Sigmoid());
			output_gate = Sequence(Linear(hidden_size + hidden_size, hidden_size), Sigmoid());
			input_gate = Sequence(Linear(hidden_size + hidden_size, hidden_size), Sigmoid());
			update_gate = Sequence(Linear(hidden_size + hidden_size, hidden_size), Tanh());
			register_layer(forget_gate, input_gate, update_gate, output_gate);
		}

		Tensor LSTMBasicBlock::Forward(Tensor& input) {
			C = C * forget_gate(input);
			C = C + input_gate(input) * update_gate(input);
			return tanh(C) * output_gate(input);
		}
		

		LSTM::LSTM(int input_size, int hidden_size, int out_size, int num_layer) :hidden_size(hidden_size),
			num_layer(num_layer), input_size(input_size), output_size(out_size)
		{
			lstm_in = Linear(input_size, hidden_size);
			lstm_hidden = std::vector<LSTMBasicBlock>(num_layer);
			for (int i = 0; i < num_layer; i++) {
				lstm_hidden[i] = LSTMBasicBlock(hidden_size);
			}
			lstm_out = Linear(hidden_size, output_size);
			register_layer(lstm_in, lstm_out);
			for (auto& hidden : lstm_hidden) register_layer(hidden);
		}

		Tensor LSTM::Forward(Tensor& _input) {
			std::vector<Tensor> input = minnet::Tensor::splite(0, _input);
			std::vector<Tensor> hidden_state(num_layer + 1, Tensor(1, hidden_size));
			int seq_len = input.size();
			for (auto& h : lstm_hidden) h.C = Tensor(1, hidden_size);
			for (int i = 0; i < seq_len; i++) {
				hidden_state[0] = lstm_in(input[i]);
				for (int j = 1; j <= num_layer; j++) {
					hidden_state[j] = lstm_hidden[j - 1](Tensor::concat(1, { hidden_state[j], hidden_state[j - 1] }));
				}
			}
			return lstm_out(hidden_state[num_layer]);
		}
	} // namespace model
} // namespace minnet
