#include <fstream>
#include <bitset>
#include <set>
#include <sstream>
#include <map>
#include <vector>
#include <list>
#include <algorithm>
#include <math.h>
#include <time.h>
#include <limits.h>
#include <errno.h>
#include <stdio.h>
#include <pthread.h>
#include <string.h>
#include <ext/hash_map>
#include <ext/hash_set>

#include "slog.h"
#include "Config.h"
#include "gbk.h"
#include "svd.h"
#include "word.h"
#include "wordprocess.h"

#define wordlen 4
#define entro_thres 0.1
#define inde_thres 10.0
#define THREADS 24
#define GROUPS 10
#define sim_thres 0.2
#define sim_count 30

using namespace d_utils;

hash_map<int, vector<int> > i2w, w2i;
hash_map<int, string> id_word;
hash_map<string, int, wp_string_hash> word_id;


int main( int argc, char ** argv )
{
	char outfile[1000];
	char outfile1[1000];
	char outfile2[1000];
	char outfile3[1000];
	char outfile4[1000];
	char infile[1000];
	char infile1[1000];
	char datebuff[1000];
	char filename[1000];
	const char *conf_file = "tag_config";
	Config conf(conf_file);

	const char * dir = conf.get("root_dir").c_str();//argv[1];
	const char * itemfile = conf.get("item_file").c_str();
	const char * wordfile = conf.get("word_file").c_str();
	const char * simfile = conf.get("sim_file").c_str();

	time_t t;
	time(&t);
	t -= 3600*24*1;
	struct tm* tt=localtime(&t);
	strftime( datebuff, sizeof(datebuff), "%Y%m%d", tt );

	sprintf(filename, "%s/logs/word_%s.log", dir, datebuff);
	sprintf(infile, "%s/data/%s", dir, itemfile);
	sprintf(infile1, "%s/data/%s", dir, simfile);
	sprintf(outfile, "%s/data/sim_new_%s.txt", dir, datebuff);
	sprintf(outfile1, "%s/data/%s", dir, wordfile);
	sprintf(outfile2, "%s/data/item_word_%s.txt", dir, datebuff);
	sprintf(outfile3, "%s/data/sim_word_%s.txt", dir, datebuff);
	sprintf(outfile4, "%s/data/cluster_%s.txt", dir, datebuff);
	InitialLog(filename);
	Info("%s %s %s", filename, infile, outfile);

	ite_rank_map_t frequency,independence;
	neighbor_map_t neighbor;
	str_bucket_t str_chn, str_eng, str_short;
	entro_rank_map_t entro_rank_map;

	nbr_rank_map_t newmap;
/*
	PushAllNeighborAndCalEntropy(infile, entro_rank_map, wordlen, THREADS, GROUPS, entro_thres);
	{
		str_bucket_t new_buck;
		new_buck.clear();
		str_chn.swap(new_buck);
	}

	CalFrequency1(infile, frequency, wordlen, THREADS);
	{
		str_bucket_t new_buck;
		new_buck.clear();
		str_short.swap(new_buck);
	}

	IndependenceCheck(entro_rank_map, frequency, independence, inde_thres, 20);

	ofstream fout(outfile1);
	for(entro_rank_map_t::iterator it = entro_rank_map.begin();it!=entro_rank_map.end();it++)
	{
		fout<<(it->item)<<','<<(it->entrol)<<' '<<(it->entror)<<' '<<independence[(it->item)]<<' '<<frequency[(it->item)]<<endl;
	}
	fout.close();
*/

	LoadSimtable(infile1);
	LoadWords(outfile1);
	LoadItems(infile, THREADS, wordlen);
	//CalWordSim(THREADS, 30);
	//Emitcfscore(outfile3);
	//EmitItemWord(outfile2);
	//CalSim(1, sim_thres, sim_count, outfile);
	Clustering(outfile4, 0, 10, 20);

	/*
	for(__gnu_cxx::hash_map<int, vector<int> >::iterator it = i2w.begin();it!=i2w.end();it++)
	{
		fout<<(it->first)<<":\t";
		for(vector<int>::iterator itt = (it->second).begin(); itt!=(it->second).end(); itt++)
			fout<<id_word[*itt]<<'\t';
		fout<<endl;
	}
	fout.close();

	SVDTrainer trainer(i2w, 10, 0.01, 0.01);
	trainer.init(i2w, w2i);
	trainer.train(i2w, 0.001, INT_MAX);
	ofstream fout1(outfile3);
	for(__gnu_cxx::hash_map<int, vector<int> >::iterator it = i2w.begin();it!=i2w.end();it++)
	{
		fout1<<(it->first)<<":\t";
		vector<Itemscore> result = trainer.yield_item(it->first, 20);
		for(vector<Itemscore>::iterator itt = result.begin(); itt!=result.end(); itt++)
			fout1<<id_word[itt->item]<<','<<(itt->score)<<'\t';
		fout1<<endl;
	}
	fout1.close();
	*/
	Info("end");
}

