#ifndef LAYER_H_
#define LAYER_H_
#include "Tensor.hpp"
#include "Function.hpp"
#include <list>

namespace minnet
{
	namespace model
	{
		class Model {
		public:
			virtual Tensor Forward(Tensor& input) = 0;
			virtual std::list<Tensor*> parameters();

			template<typename T, typename... Args>
			void register_layer(const T& _model, const Args&... _models) {
				T* model = new decltype(T(_model))(_model);
				model_list.push_back(std::shared_ptr<Model>(model));
				register_layer(_models...);
			}

			void register_layer() {

			}

			void train();
			void eval();
			
			Tensor operator()(Tensor input);

			std::shared_ptr<bool> _train = std::make_shared<bool>(1);
			std::list<std::shared_ptr<Model>> model_list = {};
		};

		class Linear : public Model {
		public:
			Linear() {}
			Linear(int input, int output, bool use_bias = 1);
			Tensor Forward(Tensor& tensor) override;
			std::list<Tensor*> parameters() override;
		private:
			Tensor param, bias;
			bool use_bias;
		};

		class Conv2d : public Model {
		public:
			Conv2d() :padding(0), stride_x(0), stride_y(0) {}
			Conv2d(int out_ch, int in_ch, int kernel_size = 3, int padding = 1, int stride_x = 1, int stride_y = 1);
			Tensor Forward(Tensor& tensor) override;
			std::list<Tensor*> parameters() override;
		private:
			Tensor param;
			int padding, stride_x, stride_y;
		};

		class DropOut : public Model {
		public:
			DropOut() :proportion(0.5f) {}
			DropOut(float proportion);
			Tensor Forward(Tensor& tensor) override;
		private:
			float proportion;
		};

		class Relu : public Model {
		public:
			Relu() {}
			Tensor Forward(Tensor& tensor) override;
		};

		class SoftMax : public Model {
		public:
			SoftMax() {}
			Tensor Forward(Tensor& tensor) override;
		};

		class Sigmoid : public Model {
		public:
			Sigmoid() {}
			Tensor Forward(Tensor& tensor) override;
		};

		class Tanh : public Model {
		public:
			Tanh() {}
			Tensor Forward(Tensor& tensor) override;
		};

		class MaxPool2d : public Model {
		public:
			MaxPool2d(): size(2) {}
			MaxPool2d(int size);
			Tensor Forward(Tensor& tensor) override;
		private:
			int size;
		};

		class Sequence :public Model {
		public:
			Sequence() {}
			template<typename... Args>
			Sequence(const Args&... args) {				
				Init(args...);
			}

			template<typename T, typename... Args>
			void Init(const T& arg, const Args&... args) {
				T* model = new decltype(T(arg))(arg);
				model_list.push_back(std::shared_ptr<Model>(model));
				Init(args...);
			}

			void Init() {
				return;
			}

			Tensor Forward(Tensor& tensor) override;
		};

		class RNN :public Model {
		public:
			RNN() {}
			RNN(int input_size, int hidden_size, int out_size, int num_hidden_layer = 1);
			Tensor Forward(Tensor& input) override;
		private:
			Sequence rnn_in;
			Sequence rnn_out;
			std::vector<Sequence> rnn_hidden;
			int input_size = 0;
			int output_size = 0;
			int hidden_size = 0;
			int num_layer = 0;
		};

		class LSTMBasicBlock :public Model {
		public:
			LSTMBasicBlock() {}
			LSTMBasicBlock(int hidden_size);
			Tensor Forward(Tensor& input) override;
			Tensor C;
		private:
			Sequence forget_gate;
			Sequence output_gate;
			Sequence input_gate;
			Sequence update_gate;
			Tanh tanh;
		};

		class LSTM :public Model {
		public:
			LSTM() {}
			LSTM(int input_size, int hidden_size, int out_size, int num_hidden_layer = 1);
			Tensor Forward(Tensor& input) override;
		private:
			Linear lstm_in;
			Linear lstm_out;
			std::vector<LSTMBasicBlock> lstm_hidden;
			int input_size = 0;
			int output_size = 0;
			int hidden_size = 0;
			int num_layer = 0;
		};

		class SelfAttention :public Model {
		public:
			SelfAttention() {}
			SelfAttention(int input_size, int output_size):input_size(input_size), output_size(output_size) {
				K_M = Linear(input_size, output_size, 0);
				Q_M = Linear(input_size, output_size, 0);
				V_M = Linear(input_size, output_size, 0);
			}
			Tensor Forward(Tensor& input) override {
				float scale = std::pow(output_size, 0.5);
				Tensor K = K_M(input);
				K.transpose(1, 0);
				Tensor Q = Q_M(input);
				Tensor V = V_M(input);
				return minnet::function::SoftMax(Q.dot2d(K) / scale).dot2d(V);
			}
		private:
			Linear K_M;
			Linear Q_M;
			Linear V_M;
			int input_size, output_size;
		};

		class TransformerEncoder :public Model {
		public:
			TransformerEncoder() {}
			TransformerEncoder(int input_size, int output_size, int head_num = 1) :head_num(head_num) {
				QKV = std::vector <SelfAttention>(head_num, SelfAttention(input_size, output_size));
				transformer_out = Sequence(
					Linear(head_num * output_size, input_size, 0),
					Relu(),
					Linear(input_size, input_size, 0)
					);
			}
			Tensor Forward(Tensor& input) override {
				Tensor pos = Tensor(input.shape());
				for (int i = 0; i < pos.shape()[0]; i++) {
					int index = 0;
					for (int k = 0; k < input.shape()[1]; k++) {
						if (input.at(i, k) != 0) index = k;
					}
					float word_pos = 0.f;
					if (index % 2 == 0) {
						word_pos = std::sin(i / std::pow(10000, index / input.shape()[1]));
					}
					else {
						word_pos = std::cos(i / std::pow(10000, index / input.shape()[1]));
					}
					for (int j = 0; j < pos.shape()[1]; j++) {
						pos.at(i, j) = word_pos;
					}
				}
				Tensor in = input + pos;
				std::vector<Tensor> hidden(head_num);
				for (int i = 0; i < head_num; i++) {
					hidden[i] = QKV[i](in);
				}
				Tensor out = transformer_out(Tensor::concat(1, hidden));
				return out;
			}
		private:
			std::vector<SelfAttention> QKV;
			Sequence transformer_out;
			int head_num = 0;
		};
	} // namespace model
} // namespace minnet
#endif // LAYER_H_


