#include <math.h>
#include "file_info.h++"
using namespace std;

const string FileInfo::wordFilter[0] = {};

FileInfo::FileInfo(string const& filePath) : wordsNumber(), numberOfWords(0), fileName(filePath)
{
	//open the stream
	ifstream openFile(filePath.c_str(), ifstream::in);
	//read and add it
	this->read(openFile);
	//close the stream
	openFile.close();
}

FileInfo::~FileInfo()
{
	//no pointer to remove manually right now
}


bool FileInfo::filter(const string& word)
{
     // We don't keep words which are composed of 3 characters or less
     // (not significant enough)
	if(word.length() <= 3)
		return false;
	// We don't keep words which are stop-words, that are not important,
	// and also too frequent (conjonctions, verb "be" in all its forms...)
	for(int i = 0; i < sizeof(wordFilter)/sizeof(string); ++i)
	{
		if(word.compare(wordFilter[i]) == 0)
			return false;
	}
	return true;
}

bool isWhiteSpace(char c)
{
     // all that looks like whitespaces... (tabulation, carriage return,space...)
	return (c == ' ' || c == '\n' || c == '\t' || c == '\r' || c == '\f');
		
}

void FileInfo::read(std::ifstream& stream)
{
	//while the file isn't at the end
	while(!stream.eof())
	{
		char wordChars[30];
		int i = 0;

		//read a word ignoring white space
		while(!stream.eof())
		{
			char c;
			stream.read(&c, 1);
			if(!isWhiteSpace(c))
			{
                // We put each character in lower case, so we can easily
                // manage it
				wordChars[i] = tolowercase(c);
				i++;
			}
			else
			{
                // If there's a space encountered, we consider that we reached 
                // the end of a word
                
				if(i != 0)
					break;
			}
		}
		// If there's no significant character in the stream...
		if( i == 0) return;
		wordChars[i] = '\0';
		
		// we reconstruct a string, with all characters we just read
		string s(wordChars);

        // if this is not a word filtered...
		if(this->filter(s))
		{
			numberOfWords ++;
			if(wordsNumber.find(s) == wordsNumber.end())
				wordsNumber[s] = 0;
			wordsNumber[s] ++;
		}
	}
}


//CorpusInfo

CorpusInfo::CorpusInfo(vector<string> files) : filesInfo()
{
	for(unsigned int i = 0; i < files.size(); i++)
	{
        // For each file found in the corpus, we store its information
		filesInfo.push_back(new FileInfo(files[i]));
	}
}

CorpusInfo::CorpusInfo(string files[], unsigned int size) : filesInfo()
{
	for(unsigned int i = 0; i < size; i++)
	{
        // For each file found in the corpus, we store its information
		filesInfo.push_back(new FileInfo(files[i]));
	}
}

CorpusInfo::CorpusInfo() {}

void CorpusInfo::addDocument(const std::string& file)
{
     // add another file information in the filesInfo list
	filesInfo.push_back(new FileInfo(file));
}

CorpusInfo::~CorpusInfo()
{
}

double CorpusInfo::tfIdf(string const& word, int fileIndex)
{
	double _tf = tf(word, fileIndex);

    // whatever is the value of the idf(for a special word), we'll return 0,
    // because tf is 0 !
	if(_tf == 0)
		return 0;

	return _tf * this->idf(word);
}

double CorpusInfo::tf(string const& word, int fileIndex)
{
    // We retrieve the nomber of occurences of the given word in the file
	double nij = filesInfo[fileIndex].getOccurences(word);
	
	// ... and also the number of occurences of ALL words in the file
	double nkj = filesInfo[fileIndex].getNumberOfWords();
	if(nkj == 0)
		return 0;
	// ... the division of these two gives us the tf (term frequency, for
	// one specified file)
	return nij/nkj;
}

double CorpusInfo::idf(string const& word)
{
    // To have the lower part of the idf : the number of documents
    // where the given word appears
	double nbOfDocumentWithWord = 0;
	for(unsigned int i = 0; i < filesInfo.size(); i++)
	{
		if(filesInfo[i].getOccurences(word) != 0)
			nbOfDocumentWithWord ++;
	}

    // We don't want to have log(0) operation, which is incorrect...
	if(nbOfDocumentWithWord == 0)
		return 0;

    // The upper part of the division (for idf) is the total number of 
    // documents in the directory
	return log(filesInfo.size() / nbOfDocumentWithWord);


}
