#pragma once
#include <LibDL/utils.h>
#include <LibDL/optim/optimizer.h>

namespace optim {
	/*
	 * Optimizers.
	 *
	 */
	using namespace std;
	using Parameters = std::vector<Tensor>;
	struct SGD :public Optimizer {
		SGD(const Parameters& parameters,
			double learning_rate,
			double momentum = 0, double dampening = 0, double weight_decay = 0, bool nesterov = false)
			:Optimizer(make_shared<torch::optim::SGD>(map_core(parameters),
				torch::optim::SGDOptions(learning_rate)
					.momentum(momentum)
					.dampening(dampening)
					.weight_decay(weight_decay)
					.nesterov(nesterov))) {}
	};

	struct Adagrad :public Optimizer {
		Adagrad(const Parameters& parameters,
			double learning_rate,
			double lr_decay = 0, double weight_decay = 0)
			:Optimizer(make_shared<torch::optim::Adagrad>(map_core(parameters),
				torch::optim::AdagradOptions(learning_rate)
				.lr_decay(lr_decay)
				.weight_decay(weight_decay))) {}
	};

	struct Adam :public Optimizer {
#ifdef Torch_Version_Less_15
		Adam(const Parameters& parameters,
			double learning_rate,
			double beta1 = 0.9, double beta2 = 0.999, double weight_decay = 0, double eps = 1e-8, bool amsgrad = false)
			:Optimizer(make_shared<torch::optim::Adam>(map_core(parameters),
				torch::optim::AdamOptions(learning_rate)
				.beta1(beta1).beta2(beta2).eps(eps).amsgrad(amsgrad)
				.weight_decay(weight_decay))) {}
#else
        Adam(const Parameters& parameters,
             double learning_rate,
             double beta1 = 0.9, double beta2 = 0.999, double weight_decay = 0, double eps = 1e-8, bool amsgrad = false)
            :Optimizer(make_shared<torch::optim::Adam>(map_core(parameters),
                 torch::optim::AdamOptions(learning_rate)
                 .betas(std::make_tuple(beta1, beta2)).eps(eps).amsgrad(amsgrad)
                 .weight_decay(weight_decay))) {}
#endif
	};

//	struct LBFGS :public Optimizer {
//		LBFGS(const Parameters& parameters,
//			double learning_rate,
//			int64_t max_iter = 20, int64_t max_eval = 25,
//			float tolerance_grad = 1e-5, float tolerance_change = 1e-9,
//			float history_size = 100)
//			:Optimizer(std::forward<torch::optim::LBFGS>(torch::optim::LBFGS(map_core(parameters),
//				torch::optim::LBFGSOptions(learning_rate)
//				.history_size(history_size)
//				.max_eval(max_eval)
//				.max_iter(max_iter)
//				.tolerance_change(tolerance_change)
//				.tolerance_grad(tolerance_grad)))) {}
//	};

	struct RMSprop :public Optimizer {
		RMSprop(const Parameters& parameters,
			double learning_rate,
			double alpha = 0.99, double eps = 1e-8,
			double weight_decay = 0, double momentum = 0, bool centered = false)
			:Optimizer(make_shared<torch::optim::RMSprop>(map_core(parameters),
				torch::optim::RMSpropOptions(learning_rate)
				.alpha(alpha)
				.eps(eps)
				.weight_decay(weight_decay)
				.centered(centered)
				.momentum(momentum))) {}
	};
}
