#pragma once

#include "..\\ExternalTools\\ExternalTools.h"

#include "..\\Loirey\\loirey_GLOBAL.h"

#include "..\\Loirey\\loirey_BasicStructure.h"
#include "..\\Loirey\\loirey_Configuration.h"
#include "..\\Loirey\\loirey_MyMath.h"

#include "..\\Classification.Base\\cl_base_Data.h"
#include "..\\Classification.Base\\cl_base_BinaryClassification.h"

#include <cstdarg>

using namespace loirey;

enum { C_SVC, NU_SVC, ONE_CLASS, EPSILON_SVR, NU_SVR };	/* svm_type */
enum { LINEAR, POLY, RBF, SIGMOID, PRECOMPUTED }; /* kernel_type */

class CLibSVMdense_Tools
{
public:
	struct svm_node
	{
		int dim;
		double* values;
	};
	struct svm_problem
	{
		int l;
		double* y;
		svm_node* x;
	};

	static const char* svm_type_table[];
	static const char* kernel_type_table[];

	struct svm_parameter
	{
		int svm_type;
		int kernel_type;
		int degree;	/* for poly */
		double gamma;	/* for poly/rbf/sigmoid */
		double coef0;	/* for poly/sigmoid */

		/* these are for training only */
		double cache_size; /* in MB */
		double eps;	/* stopping criteria */
		double C;	/* for C_SVC, EPSILON_SVR and NU_SVR */
		int nr_weight;		/* for C_SVC */
		int *weight_label;	/* for C_SVC */
		double* weight;		/* for C_SVC */
		double nu;	/* for NU_SVC, ONE_CLASS, and NU_SVR */
		double p;	/* for EPSILON_SVR */
		int shrinking;	/* use the shrinking heuristics */
		int probability; /* do probability estimates */

		svm_parameter()
		{
			weight_label = NULL;
			weight = NULL;
		}
		~svm_parameter()
		{
			CDataStructureTools::MyRelease_List(weight_label);
			CDataStructureTools::MyRelease_List(weight);
		}
	};

	//
	// svm_model
	//
	class svm_model
	{
	public:
		svm_parameter param;	// parameter
		int nr_class;		// number of classes, = 2 in regression/one class svm
		int l;			// total #SV
		svm_node *SV;		// SVs (SV[l])
		double** sv_coef;	// coefficients for SVs in decision functions (sv_coef[k-1][l])
		double* rho;		// constants in decision functions (rho[k*(k-1)/2])
		double* probA;          // pariwise probability information
		double* probB;

		// for classification only

		int *label;		// label of each class (label[k])
		int *nSV;		// number of SVs for each class (nSV[k])
		// nSV[0] + nSV[1] + ... + nSV[k-1] = l
		// XXX
		int free_sv;		// 1 if svm_model is created by svm_load_model
		// 0 if svm_model is created by svm_train
	};

	static svm_model* svm_train(const svm_problem *prob, const struct svm_parameter *param);
	static void svm_cross_validation(const svm_problem *prob, const struct svm_parameter *param, int nr_fold, double* target);

	static int svm_save_model(const char *model_file_name, const svm_model *model);
	static svm_model *svm_load_model(const char *model_file_name);

	static int svm_get_svm_type(const svm_model *model);
	static int svm_get_nr_class(const svm_model *model);
	static void svm_get_labels(const svm_model *model, int *label);
	static double svm_get_svr_probability(const svm_model *model);

	static void svm_predict_values(const svm_model *model, const svm_node *x, double* dec_values);
	static double svm_predict(const svm_model *model, const svm_node *x);
	static double svm_predict_probability(const svm_model *model, const svm_node *x, double* prob_estimates);

	static void svm_destroy_model(svm_model *model);
	static void svm_destroy_param(struct svm_parameter *param);

	static const char *svm_check_parameter(const svm_problem *prob, const struct svm_parameter *param);
	static int svm_check_probability_model(const svm_model *model);

	//#ifndef min
	//template <class T> static inline T min(T x,T y) { return (x<y)?x:y; }
	//#endif
	//#ifndef max
	//template <class T> static inline T max(T x,T y) { return (x>y)?x:y; }
	//#endif
	//template <class T> static inline void swap(T& x, T& y) { T t=x; x=y; y=t; }
	template <class S, class T> static inline void clone(T*& dst, S* src, int n)
	{
		dst = new T[n];
		memcpy((void *)dst,(void *)src,sizeof(T)*n);
	}
	static inline double powi(double base, int times)
	{
		double tmp = base, ret = 1.0;

		for(int t=times; t>0; t/=2)
		{
			if(t%2==1) ret*=tmp;
			tmp = tmp * tmp;
		}
		return ret;
	}
#define INF HUGE_VAL
#define TAU 1e-12
#define Malloc(type,n) (type *)malloc((n)*sizeof(type))
#if 1
	static void info(const char *fmt,...)
	{
		va_list ap;
		va_start(ap,fmt);
		vprintf(fmt,ap);
		va_end(ap);
	}
	static void info_flush()
	{
		fflush(stdout);
	}
#else
	static void info(char *fmt,...) {}
	static void info_flush() {}
#endif

	typedef float Qfloat;
	typedef signed char schar;

	//
	// Kernel Cache
	//
	// l is the number of total data items
	// size is the cache size limit in bytes
	//
	class Cache
	{
	public:
		Cache(int l,long int size);
		~Cache();

		// request data [0,len)
		// return some position p where [p,len) need to be filled
		// (p >= len if nothing needs to be filled)
		int get_data(const int index, Qfloat **data, int len);
		void swap_index(int i, int j);	// future_option
	private:
		int l;
		long int size;
		struct head_t
		{
			head_t *prev, *next;	// a cicular list
			Qfloat *data;
			int len;		// data[0,len) is cached in this entry
		};

		head_t *head;
		head_t lru_head;
		void lru_delete(head_t *h);
		void lru_insert(head_t *h);
	};

	//
	// Kernel evaluation
	//
	// the static method k_function is for doing single kernel evaluation
	// the constructor of Kernel prepares to calculate the l*l kernel matrix
	// the member function get_Q is for getting one column from the Q Matrix
	//
	class QMatrix {
	public:
		virtual Qfloat *get_Q(int column, int len) const = 0;
		virtual Qfloat *get_QD() const = 0;
		virtual void swap_index(int i, int j) const = 0;
		virtual ~QMatrix() {}
	};

	class Kernel: public QMatrix {
	public:
		Kernel(int l, svm_node * x, const svm_parameter& param);
		virtual ~Kernel();

		static double k_function(const svm_node *x, const svm_node *y,
			const svm_parameter& param);
		virtual Qfloat *get_Q(int column, int len) const = 0;
		virtual Qfloat *get_QD() const = 0;
		virtual void swap_index(int i, int j) const	// no so const...
		{
			swap(x[i],x[j]);
			if(x_square) swap(x_square[i],x_square[j]);
		}
	protected:

		double (Kernel::*kernel_function)(int i, int j) const;

	private:
		svm_node *x;
		double* x_square;

		// svm_parameter
		const int kernel_type;
		const int degree;
		const double gamma;
		const double coef0;

		static double dot(const svm_node *px, const svm_node *py);
		static double dot(const svm_node &px, const svm_node &py);

		double kernel_linear(int i, int j) const
		{
			return dot(x[i],x[j]);
		}
		double kernel_poly(int i, int j) const
		{
			return powi(gamma*dot(x[i],x[j])+coef0,degree);
		}
		double kernel_rbf(int i, int j) const
		{
			return exp(-gamma*(x_square[i]+x_square[j]-2*dot(x[i],x[j])));
		}
		double kernel_sigmoid(int i, int j) const
		{
			return tanh(gamma*dot(x[i],x[j])+coef0);
		}
		double kernel_precomputed(int i, int j) const
		{
			return (x+i)->values[(int)((x+j)->values[0])];
		}
	};

	// An SMO algorithm in Fan et al., JMLR 6(2005), p. 1889--1918
	// Solves:
	//
	//	min 0.5(\alpha^T Q \alpha) + p^T \alpha
	//
	//		y^T \alpha = \delta
	//		y_i = +1 or -1
	//		0 <= alpha_i <= Cp for y_i = 1
	//		0 <= alpha_i <= Cn for y_i = -1
	//
	// Given:
	//
	//	Q, p, y, Cp, Cn, and an initial feasible point \alpha
	//	l is the size of vectors and matrices
	//	eps is the stopping tolerance
	//
	// solution will be put in \alpha, objective value will be put in obj
	//
	class Solver {
	public:
		Solver() {};
		virtual ~Solver() {};

		struct SolutionInfo {
			double obj;
			double rho;
			double upper_bound_p;
			double upper_bound_n;
			double r;	// for Solver_NU
		};

		void Solve(int l, const QMatrix& Q, const double* p_, const schar *y_,
			double* alpha_, double Cp, double Cn, double eps,
			SolutionInfo* si, int shrinking);
	protected:
		int active_size;
		schar *y;
		double* G;		// gradient of objective function
		enum { LOWER_BOUND, UPPER_BOUND, FREE };
		char *alpha_status;	// LOWER_BOUND, UPPER_BOUND, FREE
		double* alpha;
		const QMatrix *Q;
		const Qfloat *QD;
		double eps;
		double Cp,Cn;
		double* p;
		int *active_set;
		double* G_bar;		// gradient, if we treat free variables as 0
		int l;
		bool unshrinked;	// XXX

		double get_C(int i)
		{
			return (y[i] > 0)? Cp : Cn;
		}
		void update_alpha_status(int i)
		{
			if(alpha[i] >= get_C(i))
				alpha_status[i] = UPPER_BOUND;
			else if(alpha[i] <= 0)
				alpha_status[i] = LOWER_BOUND;
			else alpha_status[i] = FREE;
		}
		bool is_upper_bound(int i) { return alpha_status[i] == UPPER_BOUND; }
		bool is_lower_bound(int i) { return alpha_status[i] == LOWER_BOUND; }
		bool is_free(int i) { return alpha_status[i] == FREE; }
		void swap_index(int i, int j);
		void reconstruct_gradient();
		virtual int select_working_set(int &i, int &j);
		virtual double calculate_rho();
		virtual void do_shrinking();
	private:
		bool be_shrunken(int i, double Gmax1, double Gmax2);	
	};

	//
	// Solver for nu-svm classification and regression
	//
	// additional constraint: e^T \alpha = constant
	//
	class Solver_NU : public Solver
	{
	public:
		Solver_NU() {}
		void Solve(int l, const QMatrix& Q, const double* p, const schar *y,
			double* alpha, double Cp, double Cn, double eps,
			SolutionInfo* si, int shrinking)
		{
			this->si = si;
			Solver::Solve(l,Q,p,y,alpha,Cp,Cn,eps,si,shrinking);
		}
	private:
		SolutionInfo *si;
		int select_working_set(int &i, int &j);
		double calculate_rho();
		bool be_shrunken(int i, double Gmax1, double Gmax2, double Gmax3, double Gmax4);
		void do_shrinking();
	};

	//
	// Q matrices for various formulations
	//
	class SVC_Q: public Kernel
	{ 
	public:
		SVC_Q(const svm_problem& prob, const svm_parameter& param, const schar *y_)
			:Kernel(prob.l, prob.x, param)
		{
			clone(y,y_,prob.l);
			cache = new Cache(prob.l,(long int)(param.cache_size*(1<<20)));
			QD = new Qfloat[prob.l];
			for(int i=0;i<prob.l;i++)
				QD[i]= (Qfloat)(this->*kernel_function)(i,i);
		}

		Qfloat *get_Q(int i, int len) const
		{
			Qfloat *data;
			int start;
			if((start = cache->get_data(i,&data,len)) < len)
			{
				for(int j=start;j<len;j++)
					data[j] = (Qfloat)(y[i]*y[j]*(this->*kernel_function)(i,j));
			}
			return data;
		}

		Qfloat *get_QD() const
		{
			return QD;
		}

		void swap_index(int i, int j) const
		{
			cache->swap_index(i,j);
			Kernel::swap_index(i,j);
			swap(y[i],y[j]);
			swap(QD[i],QD[j]);
		}

		~SVC_Q()
		{
			delete[] y;
			delete cache;
			delete[] QD;
		}
	private:
		schar *y;
		Cache *cache;
		Qfloat *QD;
	};

	class ONE_CLASS_Q: public Kernel
	{
	public:
		ONE_CLASS_Q(const svm_problem& prob, const svm_parameter& param)
			:Kernel(prob.l, prob.x, param)
		{
			cache = new Cache(prob.l,(long int)(param.cache_size*(1<<20)));
			QD = new Qfloat[prob.l];
			for(int i=0;i<prob.l;i++)
				QD[i]= (Qfloat)(this->*kernel_function)(i,i);
		}

		Qfloat *get_Q(int i, int len) const
		{
			Qfloat *data;
			int start;
			if((start = cache->get_data(i,&data,len)) < len)
			{
				for(int j=start;j<len;j++)
					data[j] = (Qfloat)(this->*kernel_function)(i,j);
			}
			return data;
		}

		Qfloat *get_QD() const
		{
			return QD;
		}

		void swap_index(int i, int j) const
		{
			cache->swap_index(i,j);
			Kernel::swap_index(i,j);
			swap(QD[i],QD[j]);
		}

		~ONE_CLASS_Q()
		{
			delete cache;
			delete[] QD;
		}
	private:
		Cache *cache;
		Qfloat *QD;
	};

	class SVR_Q: public Kernel
	{ 
	public:
		SVR_Q(const svm_problem& prob, const svm_parameter& param)
			:Kernel(prob.l, prob.x, param)
		{
			l = prob.l;
			cache = new Cache(l,(long int)(param.cache_size*(1<<20)));
			QD = new Qfloat[2*l];
			sign = new schar[2*l];
			index = new int[2*l];
			for(int k=0;k<l;k++)
			{
				sign[k] = 1;
				sign[k+l] = -1;
				index[k] = k;
				index[k+l] = k;
				QD[k]= (Qfloat)(this->*kernel_function)(k,k);
				QD[k+l]=QD[k];
			}
			buffer[0] = new Qfloat[2*l];
			buffer[1] = new Qfloat[2*l];
			next_buffer = 0;
		}

		void swap_index(int i, int j) const
		{
			swap(sign[i],sign[j]);
			swap(index[i],index[j]);
			swap(QD[i],QD[j]);
		}

		Qfloat *get_Q(int i, int len) const
		{
			Qfloat *data;
			int real_i = index[i];
			if(cache->get_data(real_i,&data,l) < l)
			{
				for(int j=0;j<l;j++)
					data[j] = (Qfloat)(this->*kernel_function)(real_i,j);
			}

			// reorder and copy
			Qfloat *buf = buffer[next_buffer];
			next_buffer = 1 - next_buffer;
			schar si = sign[i];
			for(int j=0;j<len;j++)
				buf[j] = si * sign[j] * data[index[j]];
			return buf;
		}

		Qfloat *get_QD() const
		{
			return QD;
		}

		~SVR_Q()
		{
			delete cache;
			delete[] sign;
			delete[] index;
			delete[] buffer[0];
			delete[] buffer[1];
			delete[] QD;
		}
	private:
		int l;
		Cache *cache;
		schar *sign;
		int *index;
		mutable int next_buffer;
		Qfloat *buffer[2];
		Qfloat *QD;
	};

	//
	// construct and solve various formulations
	//

	static void solve_c_svc(
		const svm_problem *prob, const svm_parameter* param,
		double* alpha, Solver::SolutionInfo* si, double Cp, double Cn
		);

	static void solve_nu_svc(
		const svm_problem *prob, const svm_parameter *param,
		double* alpha, Solver::SolutionInfo* si
		);

	static void solve_one_class(
		const svm_problem *prob, const svm_parameter *param,
		double* alpha, Solver::SolutionInfo* si
		);

	static void solve_epsilon_svr(
		const svm_problem *prob, const svm_parameter *param,
		double* alpha, Solver::SolutionInfo* si
		);

	static void solve_nu_svr(
		const svm_problem *prob, const svm_parameter *param,
		double* alpha, Solver::SolutionInfo* si
		);

	//
	// decision_function
	//
	struct decision_function
	{
		double* alpha;
		double rho;	
	};

	static decision_function svm_train_one(
		const svm_problem *prob, const svm_parameter *param,
		double Cp, double Cn
		);

	// Platt's binary SVM Probablistic Output: an improvement from Lin et al.
	static void sigmoid_train(
		int l, const double* dec_values, const double* labels, 
		double& A, double& B
		);

	static double sigmoid_predict(double decision_value, double A, double B);

	// Method 2 from the multiclass_prob paper by Wu, Lin, and Weng
	static void multiclass_probability(int k, double** r, double* p);

	static void svm_binary_svc_probability(
		const svm_problem *prob, const svm_parameter *param,
		double Cp, double Cn, double& probA, double& probB
		);

	static double svm_svr_probability(
		const svm_problem *prob, const svm_parameter *param
		);
};

class CLibSVMdense_Helper
{
public:
	static const char* HelpMsg_Train;
	static const char* HelpMsg_Predict;

public:
	static void parse_training_parameters(CLibSVMdense_Tools::svm_parameter& dstParameters, const char* srcCommandLine);
	static void parse_training_parameters(CLibSVMdense_Tools::svm_parameter& dstParameters, int nArgs, const char** sArgs);
	static void parse_predicting_parameters(bool& fPredictProbability, const char* srcCommandLine);
	static void parse_predicting_parameters(bool& fPredictProbability, int nArgs, const char** sArgs);

public:
	class CSVMNode
	{
	public:
		CSimpleTypeArray<double>* pFeatures;
	public:
		CSVMNode();
		CSVMNode(const CSVMNode& b);
		CSVMNode(const CSimpleTypeArray<double> Features);
		CSVMNode& operator = (const CSVMNode& b);
		CSVMNode& operator = (const CSimpleTypeArray<double> Features);
		virtual ~CSVMNode();
		void Generate_tnode(CLibSVMdense_Tools::svm_node& dstNode) const;
		void FromDataSet(CDataSetForClassification* pDataSet, int NodeIndex);
		void DoScaling(const CSimpleTypeArray<double>& srcBaseValues, const CSimpleTypeArray<double>& srcRangeFactors);
	};

	class CSVMProblem
	{
	public:
		CSimpleTypeArray<double> yList;
		CSimpleTypeArray<CSVMNode> xList;
	public:
		void Clear();
		CSVMProblem();
		void PushBack(double y, const CSVMNode& x);
		void PushBack(double y, const CSimpleTypeArray<double>& xFeatureList);
		void Generate_tprob(CLibSVMdense_Tools::svm_problem& dstProb, CSimpleTypeArray<CLibSVMdense_Tools::svm_node>& tnodeList) const;
		void DoScaling(
			CSimpleTypeArray<double>& dstBaseValues, CSimpleTypeArray<double>& dstRangeFactors,
			double targetLowerBound = -1.0, double targetUpperBound = 1.0
			);
	};

	static void SaveScalingData(const char* strFN_dstScalingData, const CSimpleTypeArray<double>& srcBaseValues, const CSimpleTypeArray<double>& srcRangeFactors);
	static void LoadScalingData(CSimpleTypeArray<double>& dstBaseValues, CSimpleTypeArray<double>& dstRangeFactors, const char* strFN_srcScalingData);

	class CParameters
	{
	public:
		CLibSVMdense_Tools::svm_parameter* pParameters;
	protected:
		void myRelease();
	public:
		CParameters();
		~CParameters();
		void myInit(const char* srcCommandLine);
		void myInit(int nArgs, const char** sArgs);
	};

	class CModel
	{
	public:
		CLibSVMdense_Tools::svm_model* pModel;
		bool fPredictProbability;
	protected:
		void myRelease();
	public:
		CModel();
		CModel(const CModel& Another);
		CModel& operator= (const CModel& Another);
		virtual void Clone(const CModel& SrcModel);
		virtual ~CModel();
		virtual void SetPredictingMode(bool fPredictProbability);
		virtual void SaveToFile(const char* strFN_dstModel) const;
		virtual void LoadFromFile(const char* strFN_srcModel);
		virtual void Train(const CSVMProblem& Problem, const CParameters& Parameters);
		virtual double Predict(CSimpleTypeArray<double>& dstPredictionList, const CSVMNode& x);
	};

	class CSVMBinaryClassfier : public CBinaryClassifier
	{
	private:
		CSVMProblem __prob;
	public:
		CSimpleTypeArray<double> ScalingBaseValues;
		CSimpleTypeArray<double> ScalingRangeFactors;
		CModel model;

	public:
		virtual ~CSVMBinaryClassfier() { }
		virtual void Train(
			CDataSetForBinaryClassification* pTrainDataSet, CBinaryClassificationExampleList& TrainNodeList,
			const CParameters& Parameters, double ScalingLowerBound = -1.0, double ScalingUpperBound = +1.0
			);
		virtual void SaveToFile(const string& strFN_dstModel);
		virtual void SaveToFile(const char* strFN_dstModel);
		virtual bool LoadFromFile(const string& strFN_srcModel);
		virtual bool LoadFromFile(const char* strFN_srcModel);
		virtual void SaveScalingData(const string& strFN_ScalingData);
		virtual bool LoadScalingData(const string& strFN_ScalingData);
		virtual void SaveScalingData(const char* FN_ScalingData);
		virtual bool LoadScalingData(const char* FN_ScalingData);
		virtual void OutputToStream(ostream& outStream);
		virtual bool InputFromStream(istream& inStream);
		virtual void Classify(CDataSetForClassification* pDataSet, int NodeIndex, double& DstConfidence, int& DstPrediction);
	};

	class CLinearSVMBinaryClassfier : public CSVMBinaryClassfier
	{
	public:
		double bias;
		CSimpleTypeArray<double> coefficient;

	protected:
		void _BuildSimpleModel();
	public:
		virtual void Train(
			CDataSetForBinaryClassification* pTrainDataSet, CBinaryClassificationExampleList& TrainNodeList,
			const CParameters& Parameters, double ScalingLowerBound = -1.0, double ScalingUpperBound = +1.0
			);
		virtual bool LoadFromFile(const string& strFN_srcModel);
		virtual bool LoadFromFile(const char* strFN_srcModel);
		virtual void Classify(CDataSetForClassification* pDataSet, int NodeIndex, double& DstConfidence, int& DstPrediction);
	};
};

