#include <iterator>

#include "indexer.h++"
#include "serializer.h++"
#include "word_tree.h++"
#include "word_filter.h++"

Indexer::Indexer(Serializer& _serializer)
		: serializer(_serializer) {}

using namespace boost::filesystem;

void Indexer::index_dir(const path& path) {
	recursive_directory_iterator end;
	recursive_directory_iterator iter(path);


	current_basepath = path;

	// sadly, boost_foreach cannot iterate over iterators references.
	for (; iter != end; ++iter) {

		// i hate this chunk of code. hopefully, the C++ standard
		// commitee hate it too. let's hope their symlink_option idea
		// make it to the standard.
		if (is_symlink(iter->status()))
			iter.no_push(); // don't traverse directories synlinks.

		if (!is_regular_file(iter->status()))
			continue;

		index_file(iter->path());
	}
}


void Indexer::found_word(UnserializedDocument& doc,
		const Word& word) {
	// FIXME : filter the word here.
	WordTree::UnserializedNode& node = root_node->create_get_word(word.lemma);
	calculator.addWord(&doc, word.lemma);

	postings_to_serialize.insert(
		&node.addDocumentMatch(&doc, word.word_index, word.offset)
	);
}

static path relativize_path(const path& _base, const path& base_and_suffix) {
	path real_base((_base/".").parent_path());

	path::const_iterator iter_full = base_and_suffix.begin();
	BOOST_FOREACH(const path& current_part, real_base) {
		if (iter_full == base_and_suffix.end())
			throw std::runtime_error("long path is shortest ?!?");
		if (current_part != *iter_full)
			throw std::runtime_error("diverging path");
		++iter_full;
	}
	path relative_path(".");
	while (iter_full != base_and_suffix.end()) {
		relative_path/=*iter_full++;
	}
	return relative_path;
}

void Indexer::index_file(const boost::filesystem::path& filefullpath) {

	// i need the relative path. boost doesn't have this yet.
	path relative_path(relativize_path(current_basepath, filefullpath));

    // adds a new entry in the documents list, according to the given file path
	documents.push_back(new UnserializedDocument(relative_path));

	postings_to_serialize.clear();

	// create a word filter proxy that will filter words before
	// they hit our "found_word" callback.
	typedef WordFilter::Proxy<Indexer> word_filter_type;
	word_filter_type word_filter(WordFilter::get_for(*this));

	//StupidFileReader reader;
	TreeTaggerFileReader reader;

	// basically, reader reads the file, pass the found words to
	// word_filter, which will pass those words to us (if they aren't filtered).
	reader.read_file<word_filter_type>(current_basepath, documents.back(),
			word_filter);

	serializer.serialize_document(documents.back());
	BOOST_FOREACH(mdoc* matchingdoc, postings_to_serialize) {
		matchingdoc->now_serialized(
				serializer.serialize_postings(*matchingdoc));
	}
	postings_to_serialize.clear();
}


void Indexer::init() {
	root_node.reset(new WordTree::UnserializedNode('\0'));
}

void Indexer::calculate_tfidfs(WordTree::UnserializedNode& node) {
    
    // We want to calculate the tfidfs, so we first retrieve the idf for the current
    // document
	double idf = calculator.idf(node.get_document_count());
	typedef WordTree::UnserializedNode::document_content_type doc_content;
	
	// For each word, we'll get its tf.idf, refering to "file_info" sources
	BOOST_FOREACH(doc_content& worddocinfo, node.get_documents()) {
		size_t word_count_in_doc = worddocinfo.second.get_nb_occurences();
		// We retrieve the tf for the current word, so that we can calculate
		// tf.idf(current_word,node) --> the importance of the word "current_word"
		// in the document "node"
		double tf = calculator.tf(word_count_in_doc, worddocinfo.first);
		worddocinfo.second.tf_idf = tf * idf;
	}

	typedef WordTree::UnserializedNode::childs_type node_child;
	BOOST_FOREACH(node_child& child, node.get_childs())
		calculate_tfidfs(*child.second);
}
void Indexer::finish_and_serialize() {
	calculate_tfidfs(*root_node);
	serializer.serialize_node_header(*root_node);
}
