/*
 * CWhiteSpaceParser.cpp
 *
 *  Created on: Sep 6, 2010
 *      Author: data
 */

#include "CWhiteSpaceParser.h"
#include "../DictionaryBuilders/TermDictBuilder.h"
#include "../Indexer/InvertedIndexer.h"
#include "../include/stemmer.h"

CWhiteSpaceParser::CWhiteSpaceParser() {

}

CWhiteSpaceParser::~CWhiteSpaceParser() {

}

CWhiteSpaceParser* CWhiteSpaceParser::_white_space_parser = 0;

/**
 * @brief
 * @param filename
 * @return
 */

bool CWhiteSpaceParser::parse_document(CDocument& doc) {
	bool status = false;
	cout
			<< "Parsing news or any other document using simple whitespace tokenization"
			<< endl;

	string file_contents = "";
	char * buffer;
	string line;
	vector<string> token_vec;
	string delim = "\r\n\t ";

	string filename = doc.get_resource_name();

	ifstream ifs(filename.c_str());
	if (ifs.is_open()) {
		ifs.seekg(0, ios::end);
		int length = ifs.tellg();
		ifs.seekg(0, ios::beg);
		buffer = new char[length];
		ifs.read(buffer, length);
		file_contents = buffer;

//		while ( !ifs.eof() ) {
//			getline( ifs, line );
//			file_contents += line;
//		}

		CUtilities::tokenize(file_contents, token_vec, delim);
		for (int i = 0; i < token_vec.size(); i++) {
			//Gassa commment out
			TermDictBuilder *termDictBuilder = TermDictBuilder::getInstance();
			//termDictBuilder->addRawDataToMap(token_vec.at(i));
			string term = linguisticProcess(token_vec.at(i));
			if (term.size() > 0) {
				size_t tid;
				tid = termDictBuilder->addTermToMap(term);
				InvertedIndexer *invertedIndex = InvertedIndexer::getInstance();
				invertedIndex->addPostingList(tid, doc.getDocId(), term);
			}
		}
		cout << "Total number of raw tokens = " << token_vec.size() << endl;
	} else {
		cout << "Couldn't open file " << filename << endl;
	}

	ifs.close();
	status = true;

	return status;
}

string CWhiteSpaceParser::linguisticProcess(string token) {
	//lower case and delete punctuation
	string term = token;
	CUtilities::to_lower(term);
	CUtilities::delete_non_alphanum_left_right(term);
	CUtilities::trim_left_right(term);
	if (CUtilities::isStopWord(term)) {
		return "";
	} else {
		CUtilities::normalize(term);
		/* the previous line calls the stemmer and uses its result to
		 zero-terminate the string in s */
		char * s = (char *) term.c_str();
		s[stem(s, 0, term.length() - 1) + 1] = '\0';
		//stem(s,0, term.length());
		return s;
	}
}

void CWhiteSpaceParser::parseText(CDocument & doc) {
	string file_contents = doc.getFile_contents();
	string line;
	vector<string> token_vec;
	string delim = "\r\n\t ";
	string filename = doc.get_resource_name();

	//map<string, termData> tokenTermMap = doc.getTokenTermMap();
	//map<string, termData>::iterator iter;
	//termData *termUnit;
	size_t termId;
	CUtilities::tokenize(file_contents, token_vec, delim);

	// For Specificity
	vector<size_t> catTermIds = doc.getCatTermIds();
	vector<size_t>::iterator catIter;
	bool catOp = !catTermIds.empty();
	size_t JCIntersectCount=0;
	size_t allTermsCount=0;

	for (int i = 0; i < token_vec.size(); i++) {
		TermDictBuilder *termDictBuilder = TermDictBuilder::getInstance();
		//termDictBuilder->addRawDataToMap(token_vec.at(i));
		string term = linguisticProcess(token_vec.at(i));
		if (term.size() > 0) {
			termId = termDictBuilder->addTermToMap(term);
			//inverted index
			InvertedIndexer *invertedIndex = InvertedIndexer::getInstance();
			invertedIndex->addPostingList(termId, doc.getDocId(), term);
			//termUnit = new termData();
			//termUnit->termId = termId;
			//tokenTermMap.insert(
			//		pair<string, termData>(token_vec.at(i), *termUnit));

			// Cat & Term count for Jaccard Coeff
			if (catOp) {
				allTermsCount++;
				catIter = find(catTermIds.begin(), catTermIds.end(), termId);
				if (catIter != catTermIds.end()) {
					JCIntersectCount++;
				}
			}

		}
	}
	doc.setJcIntersectCount(JCIntersectCount);
	doc.setAllTermsCount(allTermsCount);
	//doc.setTokenTermMap(tokenTermMap);
}

