#include "ExtractKeyword.h"
#include "Utility.h"
#include "SharedFileName.h"
#include <fstream>
#include <iostream>
#include <string>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <set>
#include <sstream>
#include <unordered_map>
#include <map>
#include <vector>
#include <queue>  
#include <time.h>
#include <math.h>
using namespace std;

#define TIME_PARTITION 0 // this value may need to change later

struct KeywordStruct{
	string keyword;
	int idf;

	KeywordStruct(string& key_str, int count){keyword = key_str; idf = count;}

	bool operator<(const KeywordStruct& key_struct) const {
		return idf < key_struct.idf;
	}
};

ExtractKeyword::ExtractKeyword(string input_dir, string output_dir){
	set<string> croase_keyword_set;
	unordered_map<string,int> keyword_freq_count;

	int num_tweets = 0;
	string in_file_name = input_dir;
	ifstream inDataFile(in_file_name.append(sample_filtered_data).c_str());
	

	if(inDataFile.is_open()){
		time_t time_tracker = 0;
		int invalid_count = 0;
		while(true){
			string publish_time;
			string user_ID;
			string tweet;

			if(!getline(inDataFile,publish_time)) break;
			if(!getline(inDataFile,user_ID)) break;
			if(!getline(inDataFile,tweet)) break;

			string time_breaker = publish_time;
			struct tm tminfo;
			char* time_token; 
			time_token = strtok((char*)(time_breaker.c_str())," -:");  tminfo.tm_year = atoi(time_token);
			time_token = strtok(NULL," -:");  tminfo.tm_mon = atoi(time_token);
			time_token = strtok(NULL," -:");  tminfo.tm_mday = atoi(time_token);
			time_token = strtok(NULL," -:");  tminfo.tm_hour = atoi(time_token);
			time_token = strtok(NULL," -:");  tminfo.tm_min = atoi(time_token);
			time_token = strtok(NULL," -:");	tminfo.tm_sec = atoi(time_token);
			tminfo.tm_year -= 1900;
			tminfo.tm_mon -= 1;
			time_t doc_time = mktime(&tminfo);
			if(doc_time < time_tracker){
				invalid_count++;
				continue;
			}
			time_tracker = doc_time;

			stringstream ss(tweet);
			string token;
			while(ss >> token){
				croase_keyword_set.insert(token);
				if(keyword_freq_count.find(token) == keyword_freq_count.end()){
					keyword_freq_count[token] = 1;
				}else{
					keyword_freq_count[token]++;
				}
			}

			num_tweets++;
			if(num_tweets % 1000000 == 0) cout << "read " << (num_tweets / 1000000) << "M tweets in extract keyword" << endl;
		}
		cout << "number of invalid tweets for time: " << invalid_count << endl;
	}else{
		cout << "input filtered tweet data cannot be read" <<endl;
		exit(1);
	}
	

	unordered_map<string,int> keyword_map;
	vector<string> keywords_vec;

	int num_keywords = 0;
	int infreq_keyword_count = 0;
	for(auto it = croase_keyword_set.begin(); it != croase_keyword_set.end(); it++){
		if(keyword_freq_count[*it] < KEYWORD_THRESHOLD){
			infreq_keyword_count++;
			continue;
		}
		keyword_map[*it] = num_keywords;
		keywords_vec.push_back(*it);
		num_keywords++;
	}
	
	// read again, this is because probably the memory cannot hold so much tweets
	inDataFile.clear();
	inDataFile.seekg(0, inDataFile.beg);


	cout << "re-read the file" << endl;
	string ou_data_file_name(output_dir);
	ofstream outDataFile(ou_data_file_name.append(sample_transformed_data).c_str(),ios::out);
	vector<int> keywords_idf(num_keywords,0);

	num_tweets = 0;
	int total_num_token = 0; 
	time_t time_tracker = 0;
	while(true){
		string publish_time;
		string user_ID;
		string tweet;

		if(!getline(inDataFile,publish_time)) break;
		if(!getline(inDataFile,user_ID)) break;
		if(!getline(inDataFile,tweet)) break;

		char* time_breaker = new char[publish_time.size()];
		for(int i = 0; i < publish_time.size(); i++) time_breaker[i] = publish_time.at(i);
		struct tm tminfo;
		char* time_token; 
		time_token = strtok(time_breaker," -:");  tminfo.tm_year = atoi(time_token);
		time_token = strtok(NULL," -:");  tminfo.tm_mon = atoi(time_token);
		time_token = strtok(NULL," -:");  tminfo.tm_mday = atoi(time_token);
		time_token = strtok(NULL," -:");  tminfo.tm_hour = atoi(time_token);
		time_token = strtok(NULL," -:");  tminfo.tm_min = atoi(time_token);
		time_token = strtok(NULL," -:");	tminfo.tm_sec = atoi(time_token);
		tminfo.tm_year -= 1900;
		tminfo.tm_mon -= 1;
		time_t doc_time = mktime(&tminfo);
		if(doc_time < time_tracker) continue;
		time_tracker = doc_time;
		delete[] time_breaker;

		stringstream iss(tweet);
		string token;

		int num_token = 0;
		map<int,int> keyword_count; // count the keyword in inside this tweet

		while(iss >> token){
			unordered_map<string,int>::iterator itor = keyword_map.find(token);
			if(itor == keyword_map.end()) continue;

			num_token++;
			total_num_token++;

			int keyword_idx = itor->second;
			
			map<int,int>::iterator it = keyword_count.find(keyword_idx);
			if(it == keyword_count.end())
				keyword_count[keyword_idx] = 1;
			else
				keyword_count[keyword_idx]++;
		}
		if(num_token == 0) continue;

		// write publish time and user ID back
		outDataFile << publish_time << endl;
		outDataFile << user_ID << endl;

		double keyword2dsum = 0;
		for(map<int,int>::iterator it = keyword_count.begin(); it != keyword_count.end(); it++){
			keywords_idf[it->first] ++; // only count once because this is idf
			keyword2dsum += (it->second * it->second);
		}
		keyword2dsum = sqrt(keyword2dsum);
		for(map<int,int>::iterator it = keyword_count.begin(); it != keyword_count.end(); it++){
			double freq = (double)it->second/keyword2dsum; // term frequency
			outDataFile << it->first << " " << freq << " ";
		}
		outDataFile << endl;

		num_tweets++;
		if(num_tweets % 1000000 == 0) cout << "re-read " << (num_tweets / 1000000) << "M tweets in extract keyword" << endl;
	}
	outDataFile.close();
	inDataFile.close();

	// write keyword and idf in file
	int total_idf = 0;
	int max_idf = 0;
	string max_idf_keyword;
	priority_queue<KeywordStruct> keyword_queue;

	string keyword_file_name(output_dir);
	ofstream keywordFile(keyword_file_name.append(sample_keyword_file).c_str(),ios::out);
	for(int i = 0; i < num_keywords; i++){
		total_idf += keywords_idf[i];
		KeywordStruct new_key(keywords_vec[i],keywords_idf[i]);
		keyword_queue.push(new_key);
		keywordFile << keywords_vec[i] << " " <<  keywords_idf[i] << endl;
	}
	keywordFile.close();
	cout << "finish reading the keyword file" << endl;
	cout << "number of tweets: " << num_tweets << endl;
	cout << "number of tokens in total: " << total_num_token << endl;
	cout << "average tweet size: " << (double)total_num_token / num_tweets << endl;
	cout << "number of keyword in the sample: " << num_keywords << endl;
	cout << "There are: " << infreq_keyword_count << " which appear at most " << (KEYWORD_THRESHOLD - 1) << " times in the entire dataset" << endl;
	cout << "average idf: " << (double)total_idf / num_keywords << endl;
	for(int i = 0; i < 100; i++){
		KeywordStruct key_struct = keyword_queue.top();
		cout << "keyword: " << key_struct.keyword << " and idf: " << key_struct.idf << endl;
		keyword_queue.pop();
	}

	WriteToDB(input_dir); return;
}

void ExtractKeyword::WriteToDB(string input_dir){
	sqlite3 *db;
	char *zErrMsg = 0;
	int rc;

	string db_name(input_dir);
	db_name.append(sample_database_name);

	// remove the db
	string clear_db_cmd("rm ");
	clear_db_cmd.append(db_name);
	system(clear_db_cmd.c_str());

	/* Open database */
	rc = sqlite3_open(db_name.c_str(), &db);
	if( rc ){
		fprintf(stderr, "Can't open database: %s\n", sqlite3_errmsg(db));
		exit(0);
	}else{
		fprintf(stdout, "Opened database successfully\n");
	}

	// drop existing table
	stringstream sql;
	sql << "drop table if exists " << doc_table_name << ";";
	sql << "drop table if exists " << doc_term_table_name << ";";
	rc = sqlite3_exec(db, sql.str().c_str(), NULL, NULL, &zErrMsg);
	SQLerror(rc,zErrMsg);

	// create new tables
	sql.str(string());
	sql << "create table " << doc_table_name << "(";
	sql << doc_id_name << " int primary key not null,";
	sql << person_id_name << " int not null,";
	sql << time_name << " datetime int not null,";
	sql << time_partition_name << " int not null);";

	sql << "create table " << doc_term_table_name << "(";
	sql << doc_id_name << " int not null,";
	sql << term_id_name << " int not null,";
	sql << term_freq_name << " real not null,";
	sql << time_partition_name << " int not null,";
	sql << "primary key(" << doc_id_name << "," << term_id_name << "));";
	rc = sqlite3_exec(db, sql.str().c_str(), NULL, NULL, &zErrMsg);
	SQLerror(rc,zErrMsg);

	// set paramter to speed up the insertion
	char * sErrMsg = 0;
	sqlite3_exec(db, "PRAGMA synchronous = OFF", NULL, NULL, &sErrMsg);

	// open the file
	string file_name = input_dir;
	file_name.append(sample_transformed_data);
	cout << "tweet file name: " << file_name << endl;
	ifstream tweetFile(file_name.c_str());
	if(!tweetFile.is_open()){
		cout << "cannot open transformed tweet file" << endl;
		sqlite3_close(db);
		exit(1);
	}
	
	time_t start = clock();
	// open transction
	sqlite3_exec(db, "BEGIN TRANSACTION", NULL, NULL, &sErrMsg);

	cout << "start to insert data" << endl;
	int tweet_id = 0;
	string empty_line;
	while(true){
		string publish_time;
		string author;
		string tweet;

		getline(tweetFile,publish_time);
		if(publish_time.empty()) break;

		getline(tweetFile,author);
		if(author.empty()) break;

		getline(tweetFile,tweet);
		if(tweet.empty()) break;
		
		// insert into document table
		
		publish_time.insert(0,"\"");
		publish_time.append("\"");

		sql.str(string());
		sql << "insert into " << doc_table_name << " values(";
		sql << tweet_id << "," << author << "," << publish_time << "," << TIME_PARTITION << ")";
		rc = sqlite3_exec(db, sql.str().c_str(), NULL, NULL, &zErrMsg);
		SQLerror(rc,zErrMsg);

		// insert into keyword table
		stringstream ss(tweet);
		while(true){
			int keyword_id;
			double frequency;
			if(!(ss >> keyword_id)) break;
			ss >> frequency;

			sql.str(string());
			sql << "insert into " << doc_term_table_name << " values(";
			sql << tweet_id << "," << keyword_id << "," << frequency << "," << TIME_PARTITION << ")";
			rc = sqlite3_exec(db, sql.str().c_str(), NULL, NULL, &zErrMsg);
			SQLerror(rc,zErrMsg);

			/*if(rc != 0){
				cout << "big sql is wrong" << endl;
				cout << "error code: " << rc << endl;
				exit(1);
			}*/

		}

		tweet_id++;
		if(tweet_id % 100000 == 0){
			cout << "processed :" << (tweet_id/1000) << "k tweets" << endl;
		}

	}
	tweetFile.close();
	cout << "number of tweets: " << tweet_id << endl;

	// close transction
	sqlite3_exec(db, "END TRANSACTION", NULL, NULL, &sErrMsg);

	cout << "time taken to load tweet is: " << ((double)clock()-start)/CLOCKS_PER_SEC << endl;

	// select example
	sql.str(string());
	sql << "select * from " << doc_table_name << " where " << doc_id_name << " < 2;";
	sqlite3_stmt *statement;   
	if (sqlite3_prepare(db, sql.str().c_str(), -1, &statement, 0) == SQLITE_OK ) 
	{
		int ctotal = sqlite3_column_count(statement);
		int res = 0;

		while ( 1 )         
		{
			res = sqlite3_step(statement);

			if ( res == SQLITE_ROW ) 
			{
				for ( int i = 0; i < ctotal; i++ ) 
				{
					string s = (char*)sqlite3_column_text(statement, i);
					// print or format the output as you want 
					cout << s << " " ;
				}
				cout << endl;
			}

			if ( res == SQLITE_DONE || res==SQLITE_ERROR)    
			{
				cout << "done " << endl;
				break;
			}    
		}
	}
	sqlite3_close(db);
}

int main(int argc, char *argv[]){
	if(argc != 3){
		cout << "wrong arguments for " << argv[0] << endl;
		cout << "1. input directory (filtered tweets)" << endl;
		cout << "2. output directory for tweets that represented by keyword IDs and keyword file" << endl;
		exit(1);
	}

	string input_dir(argv[1]);
	string output_dir(argv[2]);

	ExtractKeyword extractor(input_dir,output_dir);
	cout << "finish extracting" << endl;
	/*
	cout << "args: " << sample_size << " " << in_node_file << " " << in_edge_file << " " << out_dir << endl;

	SampleGraph sample_graph(sample_size,in_node_file,in_edge_file,out_dir);
	cout << argv[0] << " finished" << endl;*/
}
