#include <boost/foreach.hpp>
#include <cmath>
#include <map>
#include <set>
#include "setutility.h++"
#include "index_accessor.h++"
#include "word_tree.h++"
#include "mapped_file.h++"
#include "file_reader.h++"

class Searcher : public IndexAccessor<ReadOnlyMappedMemory> {
public:
	typedef std::vector<Word> vector_word_type;
	typedef std::pair<size_t, size_t> group_type;
	typedef std::vector<group_type> vector_group_type;
private:
	vector_word_type current_search;
	vector_group_type current_search_group;
	
	std::vector<WordTree::NodeHeader*> word_nodes;

	// reminds me of Lucene ...
	class TermVector {
//		Searcher& parent;
		PointerTo<Document> document;
		float cached_sum;
		typedef std::map<size_t, float> coord_map_type;
		coord_map_type coords;
		typedef coord_map_type::value_type base_val;

		public:
		TermVector(const PointerTo<Document>& doc) : document(doc) {}
		TermVector() : document() {}
		float get_cached_sum() {
			return cached_sum;
		}
		PointerTo<Document>& get_document() { return document; }
//		TermVector(Searcher& searcher) : parent(searcher), coords() {}
		// i suppose it doesn't already exists. don't care.
		void add_term(size_t word, float tfidf) {
			// ok, i looked the standart : pair initialize
			// values with their default constructor.
			coords[word] += tfidf;
		}
		float sum() {
			cached_sum = 0;
			BOOST_FOREACH(const base_val& v, coords)
				cached_sum += v.second;
			return cached_sum;
		}
		bool operator <(const TermVector& o) {
			// i inverted the order ;)
			return cached_sum > o.cached_sum;
		}
		void normalize() {
			float squaredist = 0;
			BOOST_FOREACH(const base_val& v, coords)
				squaredist += v.second * v.second;
			squaredist = sqrt(squaredist);
			if (squaredist <= 0.0001)
				return;
			BOOST_FOREACH(base_val& v, coords)
				v.second /= squaredist;
		}
	};
	
	void find_the_words() {
		WordTree::NodeHeader& root = wordtree_file->get_header(
			).get_object_after<WordTree::NodeHeader>();
		
		size_t i;
		for (i = 0; i < current_search.size(); ++i) {
			if (current_search[i].lemma.empty()) {
				// suppressed word.
				word_nodes.push_back(&root);
				continue;
			}
			// we will push NULL values if the word doesn't exist in the index.
			WordTree::NodeHeader* node = root.getWord(
						*wordtree_file,
						current_search[i].lemma);
			if (node && !node->get_detailed_data().pointer.address)
				node = NULL;
			word_nodes.push_back(node);
		}
	}
	// this could be greatly optimized, but i think we don't need it.
	typedef std::set<PointerTo<Document> > documents_type;

	documents_type get_word_documents(size_t index) {
		if (!word_nodes[index])
			return documents_type();

		PointerTo<DetailedDataHeader> detail_ptr =
				word_nodes[index]->get_detailed_data();

		DetailedDataHeader& detail = moreinfo_file->get(detail_ptr);
		DetailedDataHeader::document_table_type docs(
				detail.getDocumentTable());
		return documents_type(docs.begin(), docs.end());
	}

	bool has_match(PointerTo<PostingData> smallest,
			PointerTo<PostingData> biggest,
			int wanted_offset) {

		PostingData::posting_table_type bigtable = postings_file->get(
				biggest).getPostingTable();

		PostingData::posting_table_type smalltable = postings_file->get(
				smallest).getPostingTable();
		
		// ahah, it's just a uint32_t* ;)
		PostingData::posting_table_type::iterator iter;
		iter = bigtable.begin();

		BOOST_FOREACH(uint32_t pos, smalltable) {
			uint32_t wanted = pos + wanted_offset;
			while (iter != bigtable.end()) {
				if (*iter == wanted)
					return true;
				else if (*iter > wanted)
					break;
				++iter;
			}
		}
		return false;
	}

	size_t index_of_doc_in_node(size_t index, PointerTo<Document> wanted) {
		DetailedDataHeader::document_table_type table(
			moreinfo_file->get(
				word_nodes[index]->get_detailed_data()
			).getDocumentTable());
		size_t ret = 0;
		BOOST_FOREACH(PointerTo<Document>& doc, table) {
			if (doc == wanted)
				return ret;
			ret++;
		}
		throw std::out_of_range("no such document on that word");
	}

	documents_type find_group(size_t start, size_t end) {
		std::vector<size_t> nb_docs_for_each;
		size_t w;
		for (w = start; w < end; ++w) {
			// one of the word doesn't even exist.
			if (!word_nodes[w+start])
				return documents_type();

			if (!word_nodes[w+start]->get_detailed_data(
					).pointer.address) {
				nb_docs_for_each.push_back(0);
				continue;
			}

			DetailedDataHeader& detailed = moreinfo_file->get(
				word_nodes[w+start]->get_detailed_data());
			if (!detailed.nb_docs)
				return documents_type();
			nb_docs_for_each.push_back(detailed.nb_docs);
		}
		size_t min_w = 0;
		size_t curr_index = 0;
		BOOST_FOREACH(size_t nb, nb_docs_for_each) {
			if (nb && nb < nb_docs_for_each[min_w])
				min_w = curr_index;
			curr_index++;
		}
	
		if (nb_docs_for_each[min_w] == 0) {
			// we are search for words that are filtered...
			return documents_type();
		}
		
		documents_type docs_candidates(get_word_documents(
				min_w+start));
		

		for (w = start; w < end; ++w) {
			if (w == min_w || !nb_docs_for_each[w])
				continue;
			docs_candidates = SetUtility::intersection(
				docs_candidates,
				get_word_documents(w+start));
		}

		typedef std::map<PointerTo<Document>, PointerTo<PostingData> >
							doc_postings_map;
		doc_postings_map min_doc_posts;
		
		DetailedDataHeader& min_detail = moreinfo_file->get(
			word_nodes[start+min_w]->get_detailed_data());
		DetailedDataHeader::document_table_type min_doctable(
				min_detail.getDocumentTable());
		DetailedDataHeader::posting_table_type min_postings(
				min_detail.getPostingTable());
		for (size_t ind = 0; ind < min_detail.nb_docs; ind++) {
			PointerTo<Document> doc(*min_doctable.getElement(ind));
			if (docs_candidates.find(doc) == docs_candidates.end())
				continue;
			min_doc_posts[doc] = *min_postings.getElement(ind);
		}
		for (w = start; w < end; ++w) {
			if (w == min_w || !nb_docs_for_each[w])
				continue;
			DetailedDataHeader& detailed = moreinfo_file->get(
				word_nodes[w+start]->get_detailed_data());
			DetailedDataHeader::document_table_type table =
					detailed.getDocumentTable();
			DetailedDataHeader::posting_table_type postingtable =
					detailed.getPostingTable();

			size_t index = 0;
			BOOST_FOREACH(PointerTo<Document>& doc, table) {
				index++;
				if (docs_candidates.find(doc) ==
						docs_candidates.end()) {
					continue;
				}
				index--;

				if (!has_match(min_doc_posts[doc],
						*postingtable.getElement(index),
						w - min_w))
					docs_candidates.erase(doc);
				index++;
			}
		}

		

		return docs_candidates;
	}

	documents_type find_all_matching_docs() {
		documents_type matching;
		bool only_matching = false;
		BOOST_FOREACH(group_type& v, current_search_group) {
			if (!only_matching) {
				matching = find_group(v.first, v.second);
				only_matching = true;
			} else {
				matching = SetUtility::intersection(matching,
					find_group(v.first, v.second));
			}
		}
		if (only_matching)
			return matching;
	
		
		size_t i;
		for (i=0; i < current_search.size(); ++i) {
			if (!word_nodes[i] || current_search[i].lemma.empty())
				continue;
			matching = SetUtility::_union(matching,
				get_word_documents(i));
		}

		return matching;
	}
	typedef std::vector<TermVector> termvectors_type;
	termvectors_type get_sorted_n_best_documents(
				documents_type& docs, size_t nb_max) {
		std::map<PointerTo<Document>, size_t> doc_to_index;
//		sorted_documents_type doc_vector;

		std::vector<TermVector> /* the irony */ termvectors;
		BOOST_FOREACH(const PointerTo<Document>& doc, docs) {
			doc_to_index[doc] = termvectors.size();
			termvectors.push_back(TermVector(doc));
		}
		docs.clear(); // saves memory.

		
		for (size_t w = 0; w < current_search.size(); ++w) {
			if (!word_nodes[w] || current_search[w].lemma.empty())
				continue;
			PointerTo<DetailedDataHeader> detail_ptr =
					word_nodes[w]->get_detailed_data();
			DetailedDataHeader& detail = moreinfo_file->get(detail_ptr);
			DetailedDataHeader::document_table_type doctable(
				detail.getDocumentTable());
			DetailedDataHeader::tfidf_table_type tfidfs(
				detail.getTFIDFTable());
			
			for (size_t idoc = 0; idoc < detail.nb_docs; ++idoc) {
				PointerTo<Document> doc(*doctable.getElement(
									idoc));
				if (doc_to_index.find(doc) == doc_to_index.end())
					continue;
				
				termvectors[doc_to_index[doc]].add_term(w, 
					tfidfs.getElement(idoc)->value);
			}
		}
		doc_to_index.clear();
		// my request document will always be (1,1,1,1,1,1,1)
		// so a dot prod with a termvector will always be it's sum.
		// note that we don't divide by sqrt(number of words) here.
		BOOST_FOREACH(TermVector& tv, termvectors) {
			tv.normalize();
			tv.sum();
		
		}
		size_t nb_results = termvectors.size();
		if (nb_max < nb_results)
			nb_results = nb_max;
		
		termvectors_type bests(nb_results);
		std::partial_sort_copy(termvectors.begin(), termvectors.end(),
			bests.begin(), bests.end());
		return bests;
	}
public:

	class Result {
		std::string filename;
		float score;
		std::string hl;
	};
	typedef std::pair<PointerTo<Document>, float> doc_score_pair;
	typedef std::vector<doc_score_pair> doc_score_vector_type;

	doc_score_vector_type search(
		const vector_word_type& words, vector_group_type& groups,
			size_t& nb_max_results) {
		current_search = words;
		current_search_group = groups;
		find_the_words();
		
		documents_type docs(find_all_matching_docs());
		size_t really_found = docs.size();
		termvectors_type bestdocs(get_sorted_n_best_documents(docs,
				nb_max_results));
		
		nb_max_results = really_found;
		doc_score_vector_type ret;
		BOOST_FOREACH(TermVector& tv, bestdocs) {
			ret.push_back(std::make_pair(tv.get_document(),
						tv.get_cached_sum()/
				sqrt(current_search.size())));
		}
		bestdocs.clear();
		current_search.clear();
		current_search_group.clear();

//		std::cout << "well ... i found " << ret.size()
//			<< " documents." << std::endl;

		return ret;
/*
		BOOST_FOREACH(PointerTo<Document> docptr, docs) {
			Document& document = documents_file->get(docptr);
			std::cout << document.get_path_string(*paths_file) << std::endl;
		}
*/
	}



};
