#pragma once

# include <stdio.h>
# include <ctype.h>
# include <math.h>
# include <string.h>
# include <stdlib.h>
# include <time.h> 
# include <float.h>

#include "..\\ExternalTools\\ExternalTools.h"

#include "..\\Loirey\\loirey_GLOBAL.h"

#include "..\\Loirey\\loirey_BasicStructure.h"
#include "..\\Loirey\\loirey_Configuration.h"
#include "..\\Loirey\\loirey_MyMath.h"

#include "..\\Classification.Base\\cl_base_Data.h"
#include "..\\Classification.Base\\cl_base_BinaryClassification.h"

//#include <cstdarg>

using namespace loirey;

class CSVMLightTools
{
public:
	static const char* FN_TrainLog;

	static const int MAXSHRINK			=	50000;		/* maximum number of shrinking rounds */
	static const int MAXFEATNUM			=	99999999;	/* maximum feature number (must be in
													valid range of long int type!) */
	static const char* VERSION;
	static const char* VERSION_DATE;

# define SVMLIGHT_DENSE	1

	typedef double CFLOAT;	/* the type of float to use for caching */
							/* kernel evaluations. Using float saves */
							/* us some memory, but you can use double, too */
	typedef long FNUM;		/* the type used for storing feature ids */
	typedef double FVAL;		/* the type used for storing feature values */

	static const int LINEAR				=	0;           /* linear kernel type */
	static const int POLY				=	1;           /* polynoial kernel type */
	static const int RBF				=	2;           /* rbf kernel type */
	static const int SIGMOID			=	3;           /* sigmoid kernel type */

	static const int CLASSIFICATION		=	1;    /* train classification model */
	static const int REGRESSION			=	2;    /* train regression model */
	static const int RANKING			=	3;    /* train ranking model */
	static const int OPTIMIZATION		=	4;    /* train on general set of constraints */

	static long   verbosity;					/* verbosity level (0-4) */
	static long   format;						/* data format type (0-1) */
	static long   kernel_cache_statistic;

	//////////////////////////////////////////////////

	typedef struct ts_word {
		FNUM    wnum;               /* word number */
		FVAL    weight;              /* word weight */
	} CWORD_SVMLIGHT;

	typedef struct ts_svector {
		FNUM    n_words;	       /* length of feature vector */
#ifdef SVMLIGHT_DENSE
		FVAL    *words;	       /* Dense representation of feature vectors */
#else
		CWORD_SVMLIGHT    *words;              /* The features/values in the vector by
									 increasing feature-number. Feature
									 numbers that are skipped are
									 interpreted as having value zero. */
#endif
		double  twonorm_sq;          /* The squared euclidian length of the
									 vector. Used to speed up the RBF kernel. */
		char    *userdefined;        /* You can put additional information
									 here. This can be useful, if you are
									 implementing your own kernel that
									 does not work with feature/values
									 representations (for example a
									 string kernel). By default,
									 svm-light will put here the string
									 after the # sign from each line of
									 the input file. */
		long    kernel_id;           /* Feature vectors with different
									 kernel_id's are orthogonal (ie. the
									 feature number do not match). This
									 is used for computing component
									 kernels for linear constraints which
									 are a sum of several different
									 weight vectors. (currently not
									 implemented). */
		struct ts_svector *next;        /* Let's you set up a list of SVECTOR's
									 for linear constraints which are a
									 sum of multiple feature
									 vectors. List is terminated by
									 NULL. */
		double  factor;              /* Factor by which this feature vector
									 is multiplied in the sum. */
	} SVECTOR;

	typedef struct ts_doc {
		long    docnum;              /* Document ID. This has to be the position of 
									 the document in the training set array. */
		long    queryid;             /* for learning rankings, constraints are 
									 generated for documents with the same 
									 queryID. */
		double  costfactor;          /* Scales the cost of misclassifying this
									 document by this factor. The effect of this
									 value is, that the upper bound on the alpha
									 for this example is scaled by this factor.
									 The factors are set by the feature 
									 'cost:<val>' in the training data. */
		long    slackid;             /* Index of the slack variable
									 corresponding to this
									 constraint. All constraints with the
									 same slackid share the same slack
									 variable. This can only be used for
									 svm_learn_optimization. */
		SVECTOR *fvec;               /* Feature vector of the example. The
									 feature vector can actually be a
									 list of feature vectors. For
									 example, the list will have two
									 elements, if this DOC is a
									 preference constraint. The one
									 vector that is supposed to be ranked
									 higher, will have a factor of +1,
									 the lower ranked one should have a
									 factor of -1. */
	} DOC;

	typedef struct ts_learn_parm {
		long   m_type;                 /* selects between regression and
									 classification */
		double svm_c;                /* upper bound C on alphas */
		double eps;                  /* regression epsilon (eps=1.0 for
									 classification */
		double svm_costratio;        /* factor to multiply C for positive examples */
		double transduction_posratio;/* fraction of unlabeled examples to be */
		/* classified as positives */
		long   biased_hyperplane;    /* if nonzero, use hyperplane w*x+b=0 
									 otherwise w*x=0 */
		long   sharedslack;          /* if nonzero, it will use the shared
									 slack variable mode in
									 svm_learn_optimization. It requires
									 that the slackid is set for every
									 training example */
		long   svm_maxqpsize;        /* size q of working set */
		long   svm_newvarsinqp;      /* new variables to enter the working set 
									 in each iteration */
		long   kernel_cache_size;    /* size of kernel cache in megabytes */
		double epsilon_crit;         /* tolerable error for distances used 
									 in stopping criterion */
		double epsilon_shrink;       /* how much a multiplier should be above 
									 zero for shrinking */
		long   svm_iter_to_shrink;   /* iterations h after which an example can
									 be removed by shrinking */
		long   maxiter;              /* number of iterations after which the
									 optimizer terminates, if there was
									 no progress in maxdiff */
		long   remove_inconsistent;  /* exclude examples with alpha at C and 
									 retrain */
		long   skip_final_opt_check; /* do not check KT-Conditions at the end of
									 optimization for examples removed by 
									 shrinking. WARNING: This might lead to 
									 sub-optimal solutions! */
		long   compute_loo;          /* if nonzero, computes leave-one-out
									 estimates */
		double rho;                  /* parameter in xi/alpha-estimates and for
									 pruning leave-one-out range [1..2] */
		long   xa_depth;             /* parameter in xi/alpha-estimates upper
									 bounding the number of SV the current
									 alpha_t is distributed over */
		char predfile[200];          /* file for predicitions on unlabeled examples
									 in transduction */
		char alphafile[200];         /* file to store optimal alphas in. use  
									 empty string if alphas should not be 
									 output */

		/* you probably do not want to touch the following */
		double epsilon_const;        /* tolerable error on eq-constraint */
		double epsilon_a;            /* tolerable error on alphas at bounds */
		double opt_precision;        /* precision of solver, set to e.g. 1e-21 
									 if you get convergence problems */

		/* the following are only for internal use */
		long   svm_c_steps;          /* do so many steps for finding optimal C */
		double svm_c_factor;         /* increase C by this factor every step */
		double svm_costratio_unlab;
		double svm_unlabbound;
		double* svm_cost;            /* individual upper bounds for each var */
		long   totwords;             /* number of features */
	} LEARN_PARM;

	typedef struct ts_kernel_parm {
		long    kernel_type;   /* 0=linear, 1=poly, 2=rbf, 3=sigmoid, 4=custom */
		long    poly_degree;
		double  rbf_gamma;
		double  coef_lin;
		double  coef_const;
		char    custom[50];    /* for user supplied kernel */
	} KERNEL_PARM;

	typedef struct ts_model {
		long    sv_num;	
		long    at_upper_bound;
		double  b;
		DOC     **supvec;
		double  *alpha;
		long    *index;       /* index from docnum to position in model */
		long    totwords;     /* number of features */
		long    totdoc;       /* number of training documents */
		KERNEL_PARM kernel_parm; /* kernel */

		/* the following values are not written to file */
		double  loo_error,loo_recall,loo_precision; /* leave-one-out estimates */
		double  xa_error,xa_recall,xa_precision;    /* xi/alpha estimates */
		double  *lin_weights;                       /* weights for linear case using
													folding */
		double  maxdiff;                            /* precision, up to which this 
													model is accurate */
	} MODEL;

	typedef struct ts_quadratic_program {
		long   opt_n;            /* number of variables */
		long   opt_m;            /* number of linear equality constraints */
		double* opt_ce,*opt_ce0; /* linear equality constraints */
		double* opt_g;           /* hessian of objective */
		double* opt_g0;          /* linear part of objective */
		double* opt_xinit;       /* initial value for variables */
		double* opt_low,*opt_up; /* box constraints */
	} QP;

	typedef struct ts_kernel_cache {
		long   *index;  /* cache some kernel evalutations */
		CFLOAT *buffer; /* to improve speed */
		long   *invindex;
		long   *active2totdoc;
		long   *totdoc2active;
		long   *lru;
		long   *occu;
		long   elems;
		long   max_elems;
		long   time;
		long   activenum;
		long   buffsize;
	} KERNEL_CACHE;


	typedef struct ts_timing_profile {
		long   time_kernel;
		long   time_opti;
		long   time_shrink;
		long   time_update;
		long   time_model;
		long   time_check;
		long   time_select;
	} TIMING;

	typedef struct ts_shrink_state {
		long   *active;
		long   *inactive_since;
		long   deactnum;
		double** a_history;  /* for shrinking with non-linear kernel */
		long   maxhistory;
		double* last_a;      /* for shrinking with linear kernel */
		double* last_lin;    /* for shrinking with linear kernel */
	} SHRINK_STATE;

	static void   free_svector(SVECTOR *);
	static void   free_example(DOC *, long);
	static void   free_model(MODEL *, int);

	static double classify_example(MODEL *, DOC *);
	static double classify_example_linear(MODEL *, DOC *);
	static CFLOAT kernel(KERNEL_PARM *, DOC *, DOC *); 
	static CFLOAT single_kernel(KERNEL_PARM *, SVECTOR *, SVECTOR *); 
	static double custom_kernel(KERNEL_PARM *, SVECTOR *, SVECTOR *); 
	static SVECTOR *create_svector(CWORD_SVMLIGHT *, FNUM, char *, double);
#ifdef SVMLIGHT_DENSE
	static SVECTOR *create_ns_svector(const FVAL*,FNUM,char *,double);
#endif
	static SVECTOR *copy_svector(SVECTOR *);
	static double    sprod_ss(SVECTOR *, SVECTOR *);
	static SVECTOR*  sub_ss(SVECTOR *, SVECTOR *); 
	static SVECTOR*  add_ss(SVECTOR *, SVECTOR *); 
	static SVECTOR*  add_list_ss(SVECTOR *); 
	static void      append_svector_list(SVECTOR *a, SVECTOR *b);
	static SVECTOR*  smult_s(SVECTOR *, double);
	static int       featvec_eq(SVECTOR *, SVECTOR *); 
	static double model_length_s(MODEL *, KERNEL_PARM *);
	static void   clear_vector_n(double* , long);
	static void   add_vector_ns(double* , SVECTOR *, double);
	static double sprod_ns(double* , SVECTOR *);
	static void   add_weight_vector_to_linear_model(MODEL *);
	static DOC    *create_example(long, long, long, double, SVECTOR *);
	static MODEL  *read_model(const char *);
	static MODEL *read_binary_model(const char *modelfile);
	static void   write_binary_model(const char *modelfile, MODEL* model);
	static MODEL  *copy_model(MODEL *);
	static void   read_documents(char *, DOC ***, double** , long *, long *);
	static void   read_binary_documents(char *, DOC ***, double** , long *, long *);
	static int    parse_document(char *, CWORD_SVMLIGHT *, double* , long *, long *, double* , long *, long, char **);
	static int read_feature(FILE *docfl, CWORD_SVMLIGHT *words, double* label, 
		int target_typeid, int data_typeid, 
		long *queryid, 
		long *slackid, double* costfactor, long int *numwords, 
		long int max_words_doc, char **comment);
	static double* read_alphas(const char *,long);
	static void   nol_ll(const char *, long *, long *, long *);
	static long   minl(long, long);
	static long   maxl(long, long);
	static long   get_runtime(void);
	static int    space_or_null(int);
	static void   *my_malloc(size_t); 
	static void   copyright_notice(void);
# ifdef _MSC_VER
	static int isnan(double);
# endif

	static void   svm_learn_classification(DOC **, double* , long, long, LEARN_PARM *, 
		KERNEL_PARM *, KERNEL_CACHE *, MODEL *,
		double* );
	static void   svm_learn_regression(DOC **, double* , long, long, LEARN_PARM *, 
		KERNEL_PARM *, KERNEL_CACHE **, MODEL *);
	static void   svm_learn_ranking(DOC **, double* , long, long, LEARN_PARM *, 
		KERNEL_PARM *, KERNEL_CACHE **, MODEL *);
	static void   svm_learn_optimization(DOC **, double* , long, long, LEARN_PARM *, 
		KERNEL_PARM *, KERNEL_CACHE *, MODEL *,
		double* );
	static long   optimize_to_convergence(DOC **, long *, long, long, LEARN_PARM *,
		KERNEL_PARM *, KERNEL_CACHE *, SHRINK_STATE *,
		MODEL *, long *, long *, double* ,
		double* , double* ,
		TIMING *, double* , long, long);
	static long   optimize_to_convergence_sharedslack(DOC **, long *, long, long, 
		LEARN_PARM *,
		KERNEL_PARM *, KERNEL_CACHE *, SHRINK_STATE *,
		MODEL *, double* , double* , double* ,
		TIMING *, double* );
	static double compute_objective_function(double* , double* , double* , double,
		long *, long *);
	static void   clear_index(long *);
	static void   add_to_index(long *, long);
	static long   compute_index(long *,long, long *);
	static void   optimize_svm(DOC **, long *, long *, long *, double, long *, long *, 
		MODEL *, 
		long, long *, long, double* , double* , double* , 
		LEARN_PARM *, CFLOAT *, KERNEL_PARM *, QP *, double* );
	static void   compute_matrices_for_optimization(DOC **, long *, long *, long *, double,
		long *,
		long *, long *, MODEL *, double* , 
		double* , double* , long, long, LEARN_PARM *, 
		CFLOAT *, KERNEL_PARM *, QP *);
	static long   calculate_svm_model(DOC **, long *, long *, double* , double* , 
		double* , double* , LEARN_PARM *, long *,
		long *, MODEL *);
	static long   check_optimality(MODEL *, long *, long *, double* , double* ,
		double* , long, 
		LEARN_PARM *,double* , double, long *, long *, long *,
		long *, long, KERNEL_PARM *);
	static long   check_optimality_sharedslack(DOC **docs, MODEL *model, long int *label,
		double* a, double* lin, double* c, double* slack,
		double* alphaslack, long int totdoc, 
		LEARN_PARM *learn_parm, double* maxdiff, 
		double epsilon_crit_org, long int *misclassified, 
		long int *active2dnum,
		long int *last_suboptimal_at, 
		long int iteration, KERNEL_PARM *kernel_parm);
	static void   compute_shared_slacks(DOC **docs, long int *label, double* a, 
		double* lin, double* c, long int *active2dnum, 
		LEARN_PARM *learn_parm,
		double* slack, double* alphaslack);
	static long   identify_inconsistent(double* , long *, long *, long, LEARN_PARM *, 
		long *, long *);
	static long   identify_misclassified(double* , long *, long *, long,
		MODEL *, long *, long *);
	static long   identify_one_misclassified(double* , long *, long *, long,
		MODEL *, long *, long *);
	static long   incorporate_unlabeled_examples(MODEL *, long *,long *, long *,
		double* , double* , long, double* ,
		long *, long *, long, KERNEL_PARM *,
		LEARN_PARM *);
	static void   update_linear_component(DOC **, long *, long *, double* , double* , 
		long *, long, long, KERNEL_PARM *, 
		KERNEL_CACHE *, double* ,
		CFLOAT *, double* );
	static long   select_next_qp_subproblem_grad(long *, long *, double* , 
		double* , double* , long,
		long, LEARN_PARM *, long *, long *, 
		long *, double* , long *, KERNEL_CACHE *,
		long, long *, long *);
	static long   select_next_qp_subproblem_rand(long *, long *, double* , 
		double* , double* , long,
		long, LEARN_PARM *, long *, long *, 
		long *, double* , long *, KERNEL_CACHE *,
		long *, long *, long);
	static long   select_next_qp_slackset(DOC **docs, long int *label, double* a, 
		double* lin, double* slack, double* alphaslack, 
		double* c, LEARN_PARM *learn_parm, 
		long int *active2dnum, double* maxviol);
	static void   select_top_n(double* , long, long *, long);
	static void   init_shrink_state(SHRINK_STATE *, long, long);
	static void   shrink_state_cleanup(SHRINK_STATE *);
	static long   shrink_problem(DOC **, LEARN_PARM *, SHRINK_STATE *, KERNEL_PARM *, 
		long *, long *, long, long, long, double* , long *);
	static void   reactivate_inactive_examples(long *, long *, double* , SHRINK_STATE *,
		double* , double*, long, long, long, LEARN_PARM *, 
		long *, DOC **, KERNEL_PARM *,
		KERNEL_CACHE *, MODEL *, CFLOAT *, 
		double* , double* );

	/* cache kernel evalutations to improve speed */
	static KERNEL_CACHE *kernel_cache_init(long, long);
	static void   kernel_cache_cleanup(KERNEL_CACHE *);
	static void   get_kernel_row(KERNEL_CACHE *,DOC **, long, long, long *, CFLOAT *, 
		KERNEL_PARM *);
	static void   cache_kernel_row(KERNEL_CACHE *,DOC **, long, KERNEL_PARM *);
	static void   cache_multiple_kernel_rows(KERNEL_CACHE *,DOC **, long *, long, 
		KERNEL_PARM *);
	static void   kernel_cache_shrink(KERNEL_CACHE *,long, long, long *);
	static void   kernel_cache_reset_lru(KERNEL_CACHE *);
	static long   kernel_cache_malloc(KERNEL_CACHE *);
	static void   kernel_cache_free(KERNEL_CACHE *,long);
	static long   kernel_cache_free_lru(KERNEL_CACHE *);
	static CFLOAT *kernel_cache_clean_and_malloc(KERNEL_CACHE *,long);
	static long   kernel_cache_touch(KERNEL_CACHE *,long);
	static long   kernel_cache_check(KERNEL_CACHE *,long);
	static long   kernel_cache_space_available(KERNEL_CACHE *);

	static void compute_xa_estimates(MODEL *, long *, long *, long, DOC **, 
		double* , double* , KERNEL_PARM *, 
		LEARN_PARM *, double* , double* , double* );
	static double xa_estimate_error(MODEL *, long *, long *, long, DOC **, 
		double* , double* , KERNEL_PARM *, 
		LEARN_PARM *);
	static double xa_estimate_recall(MODEL *, long *, long *, long, DOC **, 
		double* , double* , KERNEL_PARM *, 
		LEARN_PARM *);
	static double xa_estimate_precision(MODEL *, long *, long *, long, DOC **, 
		double* , double* , KERNEL_PARM *, 
		LEARN_PARM *);
	static void avg_similarity_of_sv_of_one_class(MODEL *, DOC **, double* , long *, KERNEL_PARM *, double* , double* );
	static double most_similar_sv_of_same_class(MODEL *, DOC **, double* , long, long *, KERNEL_PARM *, LEARN_PARM *);
	static double distribute_alpha_t_greedily(long *, long, DOC **, double* , long, long *, KERNEL_PARM *, LEARN_PARM *, double);
	static double distribute_alpha_t_greedily_noindex(MODEL *, DOC **, double* , long, long *, KERNEL_PARM *, LEARN_PARM *, double); 
	static void estimate_transduction_quality(MODEL *, long *, long *, long, DOC **, double* );
	static double estimate_margin_vcdim(MODEL *, double, double, KERNEL_PARM *);
	static double estimate_sphere(MODEL *, KERNEL_PARM *);
	static double estimate_r_delta_average(DOC **, long, KERNEL_PARM *); 
	static double estimate_r_delta(DOC **, long, KERNEL_PARM *); 
	static double length_of_longest_document_vector(DOC **, long, KERNEL_PARM *); 

	static void   write_model(char *, MODEL *);
	static void   write_prediction(char *, MODEL *, double* , double* , long *, long *,
		long, LEARN_PARM *);
	static void   write_alphas(char *, double* , long *, long);

	typedef struct ts_cache_parm_s {
		KERNEL_CACHE *kernel_cache;
		CFLOAT *cache;
		DOC **docs; 
		long m;
		KERNEL_PARM *kernel_parm;
		long offset,stepsize;
	} cache_parm_t;

	//static double* optimize_qp(QP *, double* , long, double* , LEARN_PARM *);

	/** 
	* @author Navneet Dalal (Navneet.Dalal@inrialpes.fr)
	* Support for binary data format added to SVM Light, as it takes too 
	* long to read supported text format.
	*/

	static void typeid_verbose(int __typeid);
	//static void read_binary_documents(char *docfile, DOC ***docs, double** label, 
	//	long int *totwords, long int *totdoc);
	//static int read_feature(FILE *docfl, CWORD_SVMLIGHT *words, double* label,
	//	int target_typeid, int data_typeid,
	//	long *queryid, long *slackid, double* costfactor,
	//	long int *numwords, long int max_words_doc,
	//	char **comment);
	//static void write_binary_model(const char *modelfile, MODEL *model);
	//static MODEL* read_binary_model(const char *modelfile);

	class CHideo
	{
	public:
		static long verbosity_sh;

		static const int PRIMAL_OPTIMAL      =	1;
		static const int DUAL_OPTIMAL        =	2;
		static const int MAXITER_EXCEEDED    =	3;
		static const int NAN_SOLUTION        =	4;
		static const int ONLY_ONE_VARIABLE   =	5;

		static const int LARGEROUND          =	0;
		static const int SMALLROUND          =	1;

		///////////////////////////////////////////////////////////////

		static const double DEF_PRECISION;
		static const int DEF_MAX_ITERATIONS;
		static const double DEF_LINDEP_SENSITIVITY;
		static const double EPSILON_HIDEO;
		static const double EPSILON_EQ;

		static double* primal;
		static double* dual;
		static long   precision_violations;
		static double opt_precision;
		static long   maxiter;
		static double lindep_sensitivity;
		static double* buffer;
		static long   *nonoptimal;

		static long  smallroundcount;
		static long  roundnumber;

		static double* optimize_qp(QP *, double* , long, double* , LEARN_PARM *);

		static int optimize_hildreth_despo(long,long,double,double,double,long,long,long,double,double* ,
			double* ,double* ,double* ,double* ,double* ,
			double* ,double* ,double* ,long *,double* ,double* );
		static int solve_dual(long,long,double,double,long,double* ,double* ,double* ,
			double* ,double* ,double* ,double* ,double* ,double* ,
			double* ,double* ,double* ,double* ,long);

		static void linvert_matrix(double* , long, double* , double, long *);
		static void lprint_matrix(double* , long);
		static void ladd_matrix(double* , long, double);
		static void lcopy_matrix(double* , long, double* );
		static void lswitch_rows_matrix(double* , long, long, long);
		static void lswitchrk_matrix(double* , long, long, long);

		static double calculate_qp_objective(long, double* , double* , double* );
	};
};

class CSVMLightHelper
{
public:
	class CParameters
	{
	public:
		long verbosity;
		long format;
		CSVMLightTools::LEARN_PARM learn_parm;
		CSVMLightTools::KERNEL_PARM kernel_parm;
		string restartfile;
	public:
		string fn_trainlog;

	public:
		CParameters();
		void SetFN_TrainLog(string strFN_TrainLog);
		void myInit(const char* srcCommandLine);
		void myInit(int nArgs, const char** sArgs);

		//void read_input_parameters(int argc,char *argv[],char *docfile,char *modelfile,
		//	char *restartfile,long *verbosity, long* format,
		//	LEARN_PARM *learn_parm,KERNEL_PARM *kernel_parm)
	};
	class CModel : public CBinaryClassifier
	{
	public:
		CSVMLightTools::MODEL* pModel;

	protected:
		void myRelease();
		void _Train(CSVMLightTools::DOC** DocList, double* LabelList, int DocAmount, int Dim, CParameters& Parameters);
	public:
		CModel();
		CModel(const CModel& Another);
		CModel& operator= (const CModel& Another);
		virtual void Clone(const CModel& SrcModel);
		virtual ~CModel();

		virtual void Train(
			CDataSetForBinaryClassification* pTrainDataSet, CWeightedClassificationExampleList& TrainExampleList,
			CParameters& Parameters
			);
		virtual void Train(
			CDataSetForClassification* pTrainDataSet, CBinaryClassificationExampleList& TrainExampleList,
			CParameters& Parameters
			);
		virtual void _Classify(CDataSetForClassification* pDataSet, int NodeIndex, double& DstConfidence, int& DstPrediction);
		virtual void SaveToFile(string strFN_dstModel);
		virtual bool LoadFromFile(string strFN_srcModel);
		virtual void SaveToFile_F(string strFN_dstModel, bool fBinaryFormat);
		virtual bool LoadFromFile_F(string strFN_srcModel, bool fBinaryFormat);
		virtual void OutputToStream(ostream& outStream);
		virtual bool InputFromStream(istream& inStream);
	};

	static void print_help();
};

