﻿#include <iostream>
#include "DataFrame.h"
#include <stdint.h>
#include <xgboost/c_api.h>
using namespace std;
	

float train[90000][33];

int main(int argc, char const *argv[])
{

	text_match::DataFrame* df=new text_match::DataFrame();

	//df->get_jieba()->Cut(s, words, false);
	//cout << limonp::Join(words.begin(), words.end(), "/") << endl;
	
	text_match::TrainTemp train_temp = df->read_csv("data.tsv");
	
	df->compute_tfidf(train_temp);
	df->load_wordvec("word2vec.vec");
	df->load_wordvec_norm("word2vec_norm.vec");
	df->compute_wordvec(train_temp);
	df->compute_wordmatch(train_temp);
	df->compute_len_feature(train_temp);
	df->compute_edit_distance(train_temp);
    int cols=33,rows=90000;//df->get_rownum();
    cout<<"feature cols "<<cols<<endl;
	cout<<"feature rows "<<rows<<endl;
	for (int i=0;i<rows;i++){
		train[i][0] = df->feature_result.word_vec_mean_russellrao[i];
		train[i][1] = df->feature_result.char_vec_mean_russellrao[i];
		train[i][2] = df->feature_result.tfidf_word_sokalmichener[i];
		train[i][3] = df->feature_result.tfidf_word_braycurtis[i];
		train[i][4] = df->feature_result.tfidf_word_canberra[i];
		train[i][5] = df->feature_result.tfidf_word_chebyshev[i];
		train[i][6] = df->feature_result.word_q2_skew[i];
		train[i][7] = df->feature_result.word_q1_skew[i];
		train[i][8] = df->feature_result.char_q2_skew[i];
		train[i][9] = df->feature_result.char_q1_skew[i];
		train[i][10] = df->feature_result.word_q2_kur[i];
		train[i][11] = df->feature_result.word_q1_kur[i];
		train[i][12] = df->feature_result.char_q2_kur[i];
		train[i][13] = df->feature_result.char_q1_kur[i];
		train[i][14] = df->feature_result.len_word_text1[i];
		train[i][15] = df->feature_result.len_word_text2[i];
		train[i][16] = df->feature_result.word_match_list[i];
		train[i][17] = df->feature_result.char_match_list[i];
		train[i][18] = df->feature_result.word_vec_mean_kulsinski[i];
		train[i][19] = df->feature_result.char_vec_mean_kulsinski[i];
		train[i][20] = df->feature_result.word_vec_mean_russellrao_norm[i];
		train[i][21] = df->feature_result.char_vec_mean_russellrao_norm[i];
		train[i][22] = df->feature_result.word_vec_mean_kulsinski_norm[i];
		train[i][23] = df->feature_result.char_vec_mean_kulsinski_norm[i];
		train[i][24] = df->feature_result.word_match_list_stopword[i];
		train[i][25] = df->feature_result.char_match_list_stopword[i];
		train[i][26] = df->feature_result.tfidf_word_yule[i];
		train[i][27] = df->feature_result.word_levenstein[i];
		train[i][28] = df->feature_result.word_jaroWinkler[i];
		train[i][29] = df->feature_result.word_tokenSetRatio[i];
		train[i][30] = df->feature_result.word_partialRatio[i];
		train[i][31] = df->feature_result.word_partialSortRatio[i];
		train[i][32] = df->feature_result.word_sortRatio[i];
	}

	float train_labels[rows];
	for (int i=0;i<rows;i++){
		train_labels[i] = df->label_column[i];
	}
	
	cout<<"start train"<<endl;
	
	// convert to DMatrix
	DMatrixHandle h_train[1];
	XGDMatrixCreateFromMat((float *) train, rows, cols, -1, &h_train[0]);
    cout<<"start train!"<<endl;
	// load the labels
	//XGDMatrixSetUIntInfo(h_train[0], "label", train_labels, rows);
	XGDMatrixSetFloatInfo(h_train[0], "label", train_labels, rows);
    //XGDMatrixSetGroup
	
	// read back the labels, just a sanity check
	//bst_ulong bst_result;
	//const float *out_floats;
	//XGDMatrixGetFloatInfo(h_train[0], "label" , &bst_result, &out_floats);
	//for (unsigned int i=0;i<bst_result;i++)
	//	std::cout << "label[" << i << "]=" << out_floats[i] << std::endl;
    cout<<"start train!!"<<endl;
	// create the booster and load some parameters
	BoosterHandle h_booster;
	XGBoosterCreate(h_train, 1, &h_booster);
	XGBoosterSetParam(h_booster, "booster", "gbtree");
	XGBoosterSetParam(h_booster, "objective", "binary:logistic");
	XGBoosterSetParam(h_booster, "max_depth", "5");
	XGBoosterSetParam(h_booster, "eta", "0.1");
	XGBoosterSetParam(h_booster, "silent", "1");
	XGBoosterSetParam(h_booster, "min_child_weight", "1");
	XGBoosterSetParam(h_booster, "subsample", "0.5");
	XGBoosterSetParam(h_booster, "colsample_bytree", "1");
	XGBoosterSetParam(h_booster, "num_parallel_tree", "1");
    cout<<"start train!!!"<<endl;
	// perform 200 learning iterations
	for (int iter=0; iter<500; iter++)
		XGBoosterUpdateOneIter(h_booster, iter, h_train[0]);
    cout<<"train end!!!!"<<endl;
	
	XGBoosterSaveModel(h_booster,"cpp_model");
	
	// predict
	const int sample_rows = 2900;
	float test[sample_rows][cols];
	for (int i=rows;i<rows+sample_rows;i++){
		test[i-rows][0] = df->feature_result.word_vec_mean_russellrao[i];
		test[i-rows][1] = df->feature_result.char_vec_mean_russellrao[i];
		
		test[i-rows][2] = df->feature_result.tfidf_word_sokalmichener[i];
		test[i-rows][3] = df->feature_result.tfidf_word_braycurtis[i];
		test[i-rows][4] = df->feature_result.tfidf_word_canberra[i];
		test[i-rows][5] = df->feature_result.tfidf_word_chebyshev[i];
	
		test[i-rows][6] = df->feature_result.word_q2_skew[i];
		test[i-rows][7] = df->feature_result.word_q1_skew[i];
		test[i-rows][8] = df->feature_result.char_q2_skew[i];
		test[i-rows][9] = df->feature_result.char_q1_skew[i];
		test[i-rows][10] = df->feature_result.word_q2_kur[i];
		test[i-rows][11] = df->feature_result.word_q1_kur[i];
		test[i-rows][12] = df->feature_result.char_q2_kur[i];
		test[i-rows][13] = df->feature_result.char_q1_kur[i];
		
		test[i-rows][14] = df->feature_result.len_word_text1[i];
		test[i-rows][15] = df->feature_result.len_word_text2[i];
		
		test[i-rows][16] = df->feature_result.word_match_list[i];
		test[i-rows][17] = df->feature_result.char_match_list[i];
		
		test[i-rows][18] = df->feature_result.word_vec_mean_kulsinski[i];
		test[i-rows][19] = df->feature_result.char_vec_mean_kulsinski[i];
		test[i-rows][20] = df->feature_result.word_vec_mean_russellrao_norm[i];
		test[i-rows][21] = df->feature_result.char_vec_mean_russellrao_norm[i];
		test[i-rows][22] = df->feature_result.word_vec_mean_kulsinski_norm[i];
		test[i-rows][23] = df->feature_result.char_vec_mean_kulsinski_norm[i];
		
		test[i-rows][24] = df->feature_result.word_match_list_stopword[i];
		test[i-rows][25] = df->feature_result.char_match_list_stopword[i];
		test[i-rows][26] = df->feature_result.tfidf_word_yule[i];
		
		test[i-rows][27] = df->feature_result.word_levenstein[i];
		test[i-rows][28] = df->feature_result.word_jaroWinkler[i];
		test[i-rows][29] = df->feature_result.word_tokenSetRatio[i];
		test[i-rows][30] = df->feature_result.word_partialRatio[i];
		test[i-rows][31] = df->feature_result.word_partialSortRatio[i];
		test[i-rows][32] = df->feature_result.word_sortRatio[i];	

	}
	DMatrixHandle h_test;
	
	cout<<"start test!!!!"<<endl;
	
	XGDMatrixCreateFromMat((float *) test, sample_rows, cols, -1, &h_test);
	
	bst_ulong out_len=sample_rows;
	const float *f;
	XGBoosterPredict(h_booster, h_test, 0,0,&out_len,&f);
    
	cout<<"test end!!!!"<<endl;
	
	//for (unsigned int i=0;i<out_len;i++){
	//	std::cout << "prediction[" << i << "]=" << f[i] << std::endl;
	//}
	
	ofstream out("out.txt");  
    if (out.is_open())   
    {   
        for (unsigned int i=0;i<sample_rows;i++){
		  out << i << "\t" << f[i] << endl;
		}
    }  
	out.close(); 
	
	// free xgboost internal structures
	XGDMatrixFree(h_train[0]);
	XGDMatrixFree(h_test);
	XGBoosterFree(h_booster);
		

    return 0;
}
