#include "all.h"
#include "svm_light/svm_common.h"
#include "svm_light/svm_learn.h"
#include "svm_struct/svm_struct_common.h"
#include "svm_struct/svm_struct_learn.h"
#include "svm_struct_api.h"

namespace classifier_test
{

using namespace svm_light;

namespace svm_light
{

DOC* record_to_doc(int ID, const record& Record, const record_domain& Domain, bool AddBias)
{
	int Pos = 0;
	int Num = 1;
	WORD *words = new WORD[Domain.size() + (int)AddBias];
	for (int I = 0; I < Domain.size() - 1; I++)
		if (Domain[I].type == cont_attr)
		{
			if (Record[I].cont != 0)
			{
				words[Pos].wnum = Num;
				words[Pos].weight = Record[I].cont;
				Pos++;
			}
			Num++;
		}
		else if (Domain[I].type == discr_attr)
		{
			words[Pos].wnum = Num + Record[I].discr;
			words[Pos].weight = 1;
			Pos++;
			Num += Domain[I].discr.count;
		}
	if (AddBias)
		words[Pos].wnum = Num,
		words[Pos].weight = 1,
		Pos++, Num++;
	words[Pos++].wnum = 0;
	DOC* doc = create_example(ID,0,0,1,create_svector(words,"",1.0));
	delete[] words;
	return doc;
}

DOC** records_to_docs(subarray<const record* const> Data, const record_domain& Domain, int& totwords, bool AddBias)
{
	totwords = (int)AddBias;
	for (int I = 0; I < Domain.size() - 1; I++)
		if (Domain[I].type == cont_attr) totwords++;
		else if (Domain[I].type == discr_attr) totwords += Domain[I].discr.count;
	DOC** docs = new DOC*[Data.size()];
	for (int I = 0; I < Data.size(); I++) docs[I] = record_to_doc(I, *Data[I], Domain, AddBias);
	return docs;
}

STRUCTMODEL* copy_struct_model(STRUCTMODEL* model)
{
	STRUCTMODEL* newmodel = new STRUCTMODEL;
	newmodel->svm_model = copy_model(model->svm_model);
	newmodel->w = newmodel->svm_model->lin_weights;
	newmodel->sizePsi = model->sizePsi;
	return newmodel;
}

STRUCT_LEARN_PARM* copy_struct_parm(STRUCT_LEARN_PARM* struct_parm)
{
	STRUCT_LEARN_PARM* new_struct_parm = new STRUCT_LEARN_PARM;
	new_struct_parm->num_classes = struct_parm->num_classes;
	new_struct_parm->num_features = struct_parm->num_features;
	return new_struct_parm;
}

void serialize(serializer& Serializer, KERNEL_PARM& kernel_parm)
{
	Serializer
		("kernel_type", kernel_parm.kernel_type)
		("poly_degree", kernel_parm.poly_degree)
		("rbf_gamma",kernel_parm. rbf_gamma)
		("coef_lin", kernel_parm.coef_lin)
		("coef_const", kernel_parm.coef_const);
}

void serialize(serializer& Serializer, word& theword)
{
	Serializer(theword.wnum)(theword.weight);
}

void serialize(serializer& Serializer, MODEL& model)
{
	Serializer
		("kernel_parm", model.kernel_parm)
		("totwords", model.totwords)
		("totdoc", model.totdoc)
		("b", model.b);
	Serializer.begin_property("support_vectors");
	if (Serializer.is_loading())
	{
		Serializer("count", model.sv_num);
		model.supvec = (DOC **)malloc(sizeof(DOC *)*model.sv_num);
		model.alpha = (double *)malloc(sizeof(double)*model.sv_num);
		model.index = NULL;
		model.lin_weights = NULL;
		for(int I = 1; I < model.sv_num; I++)
		{
			Serializer("alpha", model.alpha[I]);
			int word_count;
			Serializer("word_count", word_count);
			word* words = new word[word_count + 1];
			Serializer("words", to_nonconst(make_subarray(words, word_count)));
			words[word_count].wnum = 0;
			model.supvec[I] = create_example(-1,0,0,0.0,create_svector(words,"",1.0));
			delete[] words;
		}
		if (model.kernel_parm.kernel_type == LINEAR) add_weight_vector_to_linear_model(&model);
	}
	else
	{
		int sv_num=1;
		for(int I = 1; I < model.sv_num; I++)
			for(SVECTOR *v = model.supvec[I]->fvec; v; v=v->next) 
				sv_num++;
		Serializer("count", sv_num);
		for(int I = 1; I < model.sv_num; I++)
			for(SVECTOR* v = model.supvec[I]->fvec; v; v=v->next)
			{
				Serializer["alpha"].put(model.alpha[I] * v->factor);
				int word_count = 0;
				for (int J = 0; (v->words[J]).wnum; J++) word_count++;
				Serializer["word_count"].put(word_count);
				Serializer("words", to_nonconst(make_subarray(v->words, word_count)));
			}
	}
	Serializer.end_property();
}

void serialize(serializer& Serializer, STRUCTMODEL& model)
{
	if (Serializer.is_loading()) model.svm_model = 0;
	Serializer
		("svm_model", *(smart_ptr<MODEL>*)(void*)&model.svm_model)
		("sizePsi", model.sizePsi);
	model.w = model.svm_model->lin_weights;
}

void serialize(serializer& Serializer, STRUCT_LEARN_PARM& struct_parm)
{
	Serializer
		("num_classes", struct_parm.num_classes)
		("num_features", struct_parm.num_features);
}

}

svm_light_classifier::svm_light_classifier()
{
	ModelPtr = 0;
}

svm_light_classifier::svm_light_classifier(const svm_light_classifier& Val)
	: classifier(Val)
{
	if (Val.ModelPtr == 0) ModelPtr = 0;
	else ModelPtr = copy_model((MODEL*)Val.ModelPtr);
}

svm_light_classifier::~svm_light_classifier()
{
	if (ModelPtr != 0) free_model((MODEL*)ModelPtr, 1);
}

smart_ptr<classifier> svm_light_classifier::clone() const
{
	return make_smart_ptr(new svm_light_classifier(*this));
}

void svm_light_classifier::serialize(serializer& Serializer)
{
	classifier::serialize(Serializer);
	Serializer("Model", *(smart_ptr<MODEL>*)(void*)&ModelPtr);
}

void svm_light_classifier::train(subarray<const record* const> Data)
{
	assert(ModelPtr == 0);
	if (Domain.back().type != discr_attr || Domain.back().discr.count != 2)
		throw runtime_error("svm_light supports only 2 classes");

	double *alpha_in=NULL;
	KERNEL_CACHE *kernel_cache = 0;
	LEARN_PARM learn_parm;
	KERNEL_PARM kernel_parm;
	MODEL *model=new MODEL;

	verbosity=0;
	set_learning_defaults(&learn_parm, &kernel_parm);
	if(learn_parm.svm_iter_to_shrink == -9999)
		if(kernel_parm.kernel_type == LINEAR) learn_parm.svm_iter_to_shrink=2;
		else learn_parm.svm_iter_to_shrink=100;

	int totwords = 0;
	DOC** docs = records_to_docs(Data, Domain, totwords, false);
	double* label = new double[Data.size()];
	for (int I = 0; I < Data.size(); I++) label[I] = Data[I]->back().discr == 0 ? 1 : -1;
	if(kernel_parm.kernel_type == LINEAR) kernel_cache=NULL;
	else kernel_cache=kernel_cache_init(Data.size(),learn_parm.kernel_cache_size);
	if(learn_parm.type == CLASSIFICATION)
		svm_learn_classification(docs,label,Data.size(),totwords,&learn_parm,&kernel_parm,kernel_cache,model,alpha_in);

	if(kernel_cache) kernel_cache_cleanup(kernel_cache);
	if (model->kernel_parm.kernel_type == LINEAR) add_weight_vector_to_linear_model(model);
	ModelPtr = copy_model(model);
	free_model(model, 0);
	for(int i=0;i<Data.size();i++) free_example(docs[i],1);
	delete[] label;
	delete[] docs;
}

void svm_light_classifier::estimate(const record& Record, array<double>& Probs) const
{
	MODEL* model = (MODEL*)ModelPtr;
	DOC* doc = record_to_doc(-1, Record, Domain, false);
	double dist;
	if (model->kernel_parm.kernel_type == LINEAR) dist = classify_example_linear(model,doc);
	else dist = classify_example(model,doc);
	free_example(doc, 1);
	Probs.assign(2, 0);
	if (dist >= 0) Probs[0] = 1;
	else Probs[1] = 1;
}


svm_struct_classifier::svm_struct_classifier()
{
	ModelPtr = 0;
	ParmPtr = 0;
}

svm_struct_classifier::svm_struct_classifier(const svm_struct_classifier& Val)
	: classifier(Val)
{
	if (Val.ModelPtr == 0)
		ModelPtr = 0,
		ParmPtr = 0;
	else
		ModelPtr = copy_struct_model((STRUCTMODEL*)Val.ModelPtr),
		ParmPtr = copy_struct_parm((STRUCT_LEARN_PARM*)Val.ParmPtr);
}

svm_struct_classifier::~svm_struct_classifier()
{
	if (ModelPtr != 0)
	{
		free_model(((STRUCTMODEL*)ModelPtr)->svm_model, 1);
		free(ModelPtr);
		free(ParmPtr);
	}
}

smart_ptr<classifier> svm_struct_classifier::clone() const
{
	return make_smart_ptr(new svm_struct_classifier(*this));
}

void svm_struct_classifier::serialize(serializer& Serializer)
{
	classifier::serialize(Serializer);
	Serializer
		("Model", *(smart_ptr<STRUCTMODEL>*)(void*)&ModelPtr)
		("Parm", *(smart_ptr<STRUCT_LEARN_PARM>*)(void*)&ParmPtr);
}

void svm_struct_classifier::train(subarray<const record* const> Data)
{
	assert(ModelPtr == 0);

	LEARN_PARM learn_parm;
	KERNEL_PARM kernel_parm;
	STRUCT_LEARN_PARM struct_parm;
	STRUCTMODEL structmodel;
	int alg_type;

	verbosity=0;
	struct_verbosity=-1;
	alg_type=DEFAULT_ALG_TYPE;
	struct_parm.C=Data.size();
	struct_parm.slack_norm=1;
	struct_parm.epsilon=DEFAULT_EPS;
	struct_parm.custom_argc=0;
	struct_parm.loss_function=DEFAULT_LOSS_FCT;
	struct_parm.loss_type=DEFAULT_RESCALING;
	struct_parm.newconstretrain=100;
	struct_parm.ccache_size=5;
	strcpy(learn_parm.predfile, "trans_predictions");
	strcpy(learn_parm.alphafile, "");
	learn_parm.biased_hyperplane=1;
	learn_parm.remove_inconsistent=0;
	learn_parm.skip_final_opt_check=0;
	learn_parm.svm_maxqpsize=10;
	learn_parm.svm_newvarsinqp=0;
	learn_parm.svm_iter_to_shrink=-9999;
	learn_parm.maxiter=100000;
	learn_parm.kernel_cache_size=40;
	learn_parm.svm_c=99999999;
	learn_parm.eps=0.001;
	learn_parm.transduction_posratio=-1.0;
	learn_parm.svm_costratio=1.0;
	learn_parm.svm_costratio_unlab=1.0;
	learn_parm.svm_unlabbound=1E-5;
	learn_parm.epsilon_crit=0.001;
	learn_parm.epsilon_a=1E-10;
	learn_parm.compute_loo=0;
	learn_parm.rho=1.0;
	learn_parm.xa_depth=0;
	kernel_parm.kernel_type=0;
	kernel_parm.poly_degree=3;
	kernel_parm.rbf_gamma=1.0;
	kernel_parm.coef_lin=1;
	kernel_parm.coef_const=1;
	strcpy(kernel_parm.custom,"empty");
	if(learn_parm.svm_iter_to_shrink == -9999) learn_parm.svm_iter_to_shrink=100;

	SAMPLE sample;
	EXAMPLE  *examples;
	int totwords;
	DOC** docs = records_to_docs(Data, Domain, totwords, true);
	examples=(EXAMPLE *)malloc(sizeof(EXAMPLE)*Data.size());
	for(int i=0;i<Data.size();i++)
	{
		examples[i].x.doc=docs[i];
		examples[i].y.class_=Data[i]->back().discr+1;
		examples[i].y.scores=NULL;
		examples[i].y.num_classes=Domain.back().discr.count;
	}
	delete[] docs;
	sample.n=Data.size();
	sample.examples=examples;
	if(alg_type == 1) svm_learn_struct(sample,&struct_parm,&learn_parm,&kernel_parm,&structmodel);
	else if(alg_type == 2) svm_learn_struct_joint(sample,&struct_parm,&learn_parm,&kernel_parm,&structmodel,PRIMAL_ALG);
	else if(alg_type == 3) svm_learn_struct_joint(sample,&struct_parm,&learn_parm,&kernel_parm,&structmodel,DUAL_ALG);
	else if(alg_type == 4) svm_learn_struct_joint(sample,&struct_parm,&learn_parm,&kernel_parm,&structmodel,DUAL_CACHE_ALG);

	if (structmodel.svm_model->kernel_parm.kernel_type == LINEAR) add_weight_vector_to_linear_model(structmodel.svm_model);
	ModelPtr = copy_struct_model(&structmodel);
	ParmPtr = copy_struct_parm(&struct_parm);
	free_struct_sample(sample);
	free_model(structmodel.svm_model, 0);
}

void svm_struct_classifier::estimate(const record& Record, array<double>& Probs) const
{
	STRUCTMODEL* model = (STRUCTMODEL*)ModelPtr;
	STRUCT_LEARN_PARM* struct_parm = (STRUCT_LEARN_PARM*)ParmPtr;
	PATTERN pattern;
	pattern.doc = record_to_doc(-1, Record, Domain, true);
	Probs.assign(Domain.back().discr.count, 0);
	LABEL y = classify_struct_example(pattern,model,struct_parm);
	Probs[y.class_-1] = 1;
	free_label(y);
	free_example(pattern.doc, 1);
}

}
