/*-
 * Copyright (c) 2006, 2007 FTS Team
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 * 1. Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer.
 * 2. Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in the
 *    documentation and/or other materials provided with the distribution.
 * 4. Neither the name of the FTS Team nor the names of its contributors
 *    may be used to endorse or promote products derived from this software
 *    without specific prior written permission.
 *
 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
 * SUCH DAMAGE.
 *
 *      Retriever.cpp
 *
 * $FTS$
 */

#include <Retriever.hpp>

// C Includes
#ifdef _DEBUG
  #include <stdio.h>
#endif

// Local Includes
#include <DefaultRanker.hpp>
#include <DocumentEntryIterator.hpp>
#include <DocumentInfoStorage.hpp>
//#include <InvIndexStorage.hpp>
#include <InvIndexBPlusStorage.hpp>
#include <IRConfig.hpp>
#include <LemmatizerInterface.hpp>
#include <OutputDataCollector.hpp>
#include <QueryParser.hpp>
#include <Ranker.hpp>
#include <SafePtr.hpp>
#include <TimeProfiler.hpp>
namespace FTSS // FullTextSearchSystem
{

//
// Constructor
//
Retriever::Retriever(IRConfig            & oIConfig,
                     LemmatizerInterface & oILemmatizer,
                     //InvIndexStorage     * pIStorage,
                     InvIndexBPlusStorage     * pIStorage,
                     DocumentInfoStorage * pIDocumentInfoStorage,
                     DocumentStorage     * pIDocumentStorage,
                     StopWords           & oIStopWords,
                     TimeProfiler        & oITimeProfiler): oConfig(oIConfig),
                                                            oLemmatizer(oILemmatizer),
                                                            pStorage(pIStorage),
                                                            pDocumentInfoStorage(pIDocumentInfoStorage),
                                                            pDocumentStorage(pIDocumentStorage),
                                                            oStopWords(oIStopWords),
                                                            oTimeProfiler(oITimeProfiler)
{

}

//
// Destructor
//
Retriever::~Retriever() throw()
{
	;;
}

//
// Merge two lists with quorums
//
void Retriever::MergeQuorumLists(STLW::vector<DocumentEntry *>  & vFirst,
                                 STLW::vector<W_FLOAT>          & vFirstWeight,
                                 STLW::vector<DocumentEntry *>  & vSecond,
                                 W_FLOAT                          dWeight)
{
	using STLW::vector;
	vector<DocumentEntry *> vTMP;
	vector<W_FLOAT>         vTMPWeight;

	UINT_32 iMaxSize = vFirst.size();
	if (iMaxSize < vSecond.size()) { iMaxSize = vSecond.size(); }

	vTMP.reserve(iMaxSize);
	vTMPWeight.reserve(iMaxSize);

	UINT_64 iFirstPos  = 0;
	UINT_64 iSecondPos = 0;
	for (;;)
	{
		// Empty FIRST vector
		if (iFirstPos == vFirst.size())
		{
			while (iSecondPos != vSecond.size())
			{
				vTMP.push_back(vSecond[iSecondPos]);
				vTMPWeight.push_back(dWeight);
				++iSecondPos;
			}
			break;
		}

		// Empty SECOND vector
		if (iSecondPos == vSecond.size())
		{
			while (iFirstPos != vFirst.size())
			{
				vTMP.push_back(vFirst[iFirstPos]);
				vTMPWeight.push_back(vFirstWeight[iFirstPos]);
				++iFirstPos;
			}
			break;
		}

		// Store from first vector
		if (vFirst[iFirstPos] -> doc_id < vSecond[iSecondPos] -> doc_id)
		{
			vTMP.push_back(vFirst[iFirstPos]);
			vTMPWeight.push_back(vFirstWeight[iFirstPos]);
			++iFirstPos;
		}
		// Store from second vector
		else if (vFirst[iFirstPos] -> doc_id > vSecond[iSecondPos] -> doc_id)
		{
			vTMP.push_back(vSecond[iSecondPos]);
			vTMPWeight.push_back(dWeight);
			++iSecondPos;
		}
		// Store from both vectors
		else
		{
			vTMP.push_back(vFirst[iFirstPos]);
			vTMPWeight.push_back(vFirstWeight[iFirstPos] + dWeight);
			++iFirstPos; ++iSecondPos;
		}
	}

	vFirst.swap(vTMP);
	vFirstWeight.swap(vTMPWeight);
}

//
// Merge two lists
//
void Retriever::MergeLists(STLW::vector<DocumentEntry *> & vFirst,
                           STLW::vector<DocumentEntry *> & vSecond)
{
	using STLW::vector;

	vector<DocumentEntry *> vTMP;

	UINT_32 iMaxSize = vFirst.size();
	if (iMaxSize < vSecond.size()) { iMaxSize = vSecond.size(); }

	vTMP.reserve(iMaxSize);

	UINT_64 iFirstPos  = 0;
	UINT_64 iSecondPos = 0;

	for (;;)
	{
		// Empty Iterator or empty vector
		if (iFirstPos == vFirst.size())
		{
			while (iSecondPos != vSecond.size())
			{
				vTMP.push_back(vSecond[iSecondPos]);
				++iSecondPos;
			}
			break;
		}

		// Empty Iterator or empty vector
		if (iSecondPos == vSecond.size())
		{
			while (iFirstPos != vFirst.size())
			{
				vTMP.push_back(vFirst[iFirstPos]);
				++iFirstPos;
			}
			break;
		}

		if      (vFirst[iFirstPos] -> doc_id < vSecond[iSecondPos] -> doc_id)
		{
			vTMP.push_back(vFirst[iFirstPos]);
			++iFirstPos;
		}
		else if (vFirst[iFirstPos] -> doc_id > vSecond[iSecondPos] -> doc_id)
		{
			vTMP.push_back(vSecond[iSecondPos]);
			++iSecondPos;
		}
		else
		{
			vTMP.push_back(vFirst[iFirstPos]);
			++iFirstPos; ++iSecondPos;
		}
	}

	vFirst.swap(vTMP);
}

//
// Join two lists
//
void Retriever::JoinLists(STLW::vector<DocumentEntry *> & vFirst,
                          STLW::vector<DocumentEntry *> & vSecond)
{
	using STLW::vector;

	vector<DocumentEntry *> vTMP;

	UINT_32 iMinSize = vFirst.size();
	if (iMinSize > vSecond.size()) { iMinSize = vSecond.size(); }

	vTMP.reserve(iMinSize);

	UINT_64 iFirstPos  = 0;
	UINT_64 iSecondPos = 0;
	for (;;)
	{
		// Empty Iterator or empty vector
		if (iFirstPos == vFirst.size() || iSecondPos == vSecond.size()) { break; }

		// Just copy elements
		if (vFirst[iFirstPos] -> doc_id == vSecond[iSecondPos] -> doc_id)
		{
			vTMP.push_back(vFirst[iFirstPos]);
			++iFirstPos; ++iSecondPos;
		}
		else
		{
			if (vFirst[iFirstPos] -> doc_id < vSecond[iSecondPos] -> doc_id) { ++iFirstPos;  }
			else                                                             { ++iSecondPos; }
		}
	}

	vFirst.swap(vTMP);
}

//
// OR Query
//
void Retriever::ORQuery(QueryOperator * pOperator)
{
#ifdef _DEBUG
	fprintf(stderr, "ORQuery with %d child(s).\n", INT_32(pOperator -> child_count));
#endif // _DEBUG

	QueryTokenList * pList = pOperator -> children;

	pOperator -> doc_entries = pList -> token -> doc_entries;
	pList = pList -> next;
	while (pList != NULL)
	{
		MergeLists(pOperator -> doc_entries, pList -> token -> doc_entries);
		pList = pList -> next;
	}
}

//
// NOT Query
//
void Retriever::NOTQuery(QueryOperator * pOperator)
{
#ifdef _DEBUG
	fprintf(stderr, "NOTQuery with %d child(s).\n", INT_32(pOperator -> child_count));
#endif // _DEBUG
	ANDQuery(pOperator);
	pOperator -> idf *= -1;
}

//
// AND Query
//
void Retriever::ANDQuery(QueryOperator * pOperator)
{
#ifdef _DEBUG
	fprintf(stderr, "ANDQuery with %d child(s).\n", INT_32(pOperator -> child_count));
#endif // _DEBUG

	QueryTokenList * pList = pOperator -> children;

	pOperator -> doc_entries = pList -> token -> doc_entries;
	pList = pList -> next;
	while (pList != NULL)
	{
		JoinLists(pOperator -> doc_entries, pList -> token -> doc_entries);
		pList = pList -> next;
	}
}

//
// AND Query with quorum
//
void Retriever::ANDQuorumQuery(QueryOperator * pOperator,
                               const W_FLOAT & dSoftness,
                               const W_FLOAT & dStopWordWeight)
{
	using STLW::vector;

#ifdef _DEBUG
	fprintf(stderr, "ANDQuorumQuery with %d child(s), softness %f, stopword weight %f.\n", INT_32(pOperator -> child_count), dSoftness, dStopWordWeight);
#endif // _DEBUG

	QueryTokenList * pList = pOperator -> children;
	pOperator -> doc_entries = pList -> token -> doc_entries;
	vector<W_FLOAT> vWeight;
	vWeight.reserve(pOperator -> doc_entries.size());
	UINT_32 iI;
	for (iI = 0; iI < pOperator -> doc_entries.size(); ++iI)
	{
		vWeight[iI] = pList -> token -> idf;
	}

	pList = pList -> next;
	while (pList != NULL)
	{
		MergeQuorumLists(pOperator -> doc_entries,  vWeight, pList -> token -> doc_entries, pList -> token -> idf);
		pList = pList -> next;
	}

	vector<DocumentEntry *> vTMP;
	for (iI = 0; iI < vWeight.size(); ++iI)
	{
		if (vWeight[iI] > pOperator -> quorum_weight)
		{
			vTMP.push_back(pOperator -> doc_entries[iI]);
		}
	}
	pOperator -> doc_entries.swap(vTMP);
}

//
// Recursive tree descending
//
DocumentId_t Retriever::DescendTree(QueryToken * pToken, INT_32 & iLevel, const W_FLOAT & dSoftness, const W_FLOAT & dStopWordWeight)
{
	++iLevel;
	if (pToken -> token_type == Q_WORD)
	{
		// YES, I know about dynamic_cast, but it not need here
		// QueryWord * pTMP = dynamic_cast<QueryWord *>(pToken); FUCK OFF!!!
		QueryWord * pTMP = (QueryWord *)pToken;
//
// Removed from QueryPreprocessor to let this operations be made
// on a different levels of search cluster.
//
		// Get begin() and end() of sequence

	

		Pair<DocumentEntryIterator, DocumentEntryIterator> oDocEntryPair = pStorage -> GetDocumentEntry(pTMP -> word_hash);
		if (oDocEntryPair.first != oDocEntryPair.second)
		{
			DocumentEntryIterator oIt = oDocEntryPair.first; 
			while(oIt != oDocEntryPair.second)
			{
				pTMP -> doc_entries.push_back(oIt());


				++oIt;
			}
		}
#ifdef _DEBUG
		fprintf(stderr, "DescendTree: '");
		fwrite(pTMP -> begin_token, pTMP -> end_token - pTMP -> begin_token, 1, stderr);
		fprintf(stderr, "' documents %d\n", INT_32(pTMP -> doc_entries.size()));
#endif // _DEBUG

	}
	else if (pToken -> token_type != Q_END)
	{
		// YES, I know about dynamic_cast
		// QueryOperator * pTMP = dynamic_cast<QueryOperator *>(pToken); FUCK OFF!!!
		QueryOperator * pTMP = (QueryOperator *)pToken;

		QueryTokenList * pList = pTMP -> children;
		while (pList != NULL)
		{
			DescendTree(pList -> token, iLevel, dSoftness, dStopWordWeight);
			pList = pList -> next;
		}

		switch(pTMP -> token_type)
		{
			case Q_OR:
				{
					ORQuery(pTMP);
				}
				break;
			case Q_AND:
				{
					if (pTMP -> quorum_weight != 0.0) { ANDQuorumQuery(pTMP, dSoftness, dStopWordWeight); }
					else                              { ANDQuery(pTMP);                                   }
				}
				break;
			case Q_NOT:
				{
					NOTQuery(pTMP);
				}
				break;
			default:
#ifdef _DEBUG
fprintf(stderr, "OUCH!\n");
#endif
				;;
		}
	}
	--iLevel;

	return (DocumentId_t)-1;
}

//
// Process query tree
//
void Retriever::ProcessTree(QueryToken           * pToken,
                            const W_FLOAT        & dSoftness,
                            const W_FLOAT        & dStopWordWeight)
{
#ifdef _DEBUG
	PrintTreeDebug(pToken);
#endif

	// Process query
	INT_32 iLevel = 0;
	DescendTree(pToken, iLevel, dSoftness, dStopWordWeight);

#ifdef _DEBUG
	UINT_32 iPos = 0;
	STLW::vector<DocumentEntry *>::const_iterator itvFound = pToken -> doc_entries.begin();
	fprintf(stderr, "Documents %d with rank > %f: ", INT_32(pToken -> doc_entries.size()), pToken -> quorum_weight);
	while (itvFound != pToken -> doc_entries.end())
	{
		if (iPos % 10 == 0) { fprintf(stderr, "\n"); }
		fprintf(stderr, "%8lu ", long((*itvFound) -> doc_id));
		++itvFound; ++iPos;
	}
	fprintf(stderr, "\n");
#endif // _DEBUG
}

//
// Search query
//
void Retriever::Search(CCHAR_P sQuery, Size_t iQueryLength, SnippetCollector & oSnippetCollector)
{
	// Clear all timings
	oTimeProfiler.Clear();

	// Execution time
	oTimeProfiler.StartPoint("Retriever::Search");
	// Parse query
	oTimeProfiler.StartPoint("Retriever::QueryParser");
	QueryParser oParser(oLemmatizer, (UCCHAR_8 *)sQuery, iQueryLength);
	oTimeProfiler.EndPoint("Retriever::QueryParser");

	// FIXME!
	INT_32 iMaxIterations = 1;
	/*
	  Double refinement might be needed for weight recalculation
	  in case of large number of removed documents and/or small number of documents found.
	*/

	W_FLOAT dSoftness       = oConfig.GetFloat("QuorumSoftness", 0.05);
	W_FLOAT dStopWordWeight = oConfig.GetFloat("StopWordWeight", 0.00);
	QueryToken * pRootNode = NULL;
	INT_32 iI;
	for (iI = 0; iI < iMaxIterations; ++iI)
	{
		// Pre-Process query (calculate quorum weights, check stopwords, etc)
		oTimeProfiler.StartPoint("Retriever::QueryPreprocessor");
		QueryPreprocessor oPreprocessor(dStopWordWeight, dSoftness, pStorage, oStopWords);
		pRootNode = oPreprocessor.ProcessQuery(oParser.GetParsed());
		oTimeProfiler.EndPoint("Retriever::QueryPreprocessor");

		oTimeProfiler.StartPoint("Retriever::ProcessTree");
		// Process query tree
		ProcessTree(pRootNode, dSoftness, dStopWordWeight);
		oTimeProfiler.EndPoint("Retriever::ProcessTree");
	}

	// Get array of terms
	QueryWord * aAllWords   = oParser.GetAllWords();
	// Get all terms
	INT_32      iTermsCount = oParser.GetWordsNum();
	// Get maximum word position
	INT_32      iMaxWordPos = oParser.GetMaxWordPos();

#ifdef _DEBUG
	fprintf(stderr, "Retriever::Search: Max. word position: %d\n", iMaxWordPos);
#endif // _DEBUG


#ifdef _DEBUG
	for (iI = 0; iI < iTermsCount; ++iI)
	{
		fprintf(stderr, "WORD: ");
		fwrite(aAllWords[iI].begin_token, aAllWords[iI].end_token - aAllWords[iI].begin_token, 1, stderr);
		fprintf(stderr, " ENDOFWORDS\n");
//		fprintf(stderr, " Pos: %d\n", aAllWords[iI].word_pos);
	}
#endif // _DEBUG
/*{
*/
	// Create ranker
	SafePtr<Ranker> oSafeRanker(new DefaultRanker);
	Ranker * pRanker = oSafeRanker();

	oTimeProfiler.StartPoint("Ranker::SetData");
	pRanker -> SetData(&oConfig, pStorage, pDocumentInfoStorage);
	oTimeProfiler.EndPoint("Ranker::SetData");

	// Construct output
	SafeArrayPtr<TermStatistic> aSafeTerms(new TermStatistic[iTermsCount]);
	TermStatistic * aTerms = aSafeTerms();

	for (iI = 0; iI < iTermsCount; ++iI)
	{
		aTerms[iI].term            = (CCHAR_8 *)aAllWords[iI].begin_token;
		aTerms[iI].term_length     = aAllWords[iI].end_token - aAllWords[iI].begin_token;
		aTerms[iI].doc_count       = 0;
		aTerms[iI].doc_group_count = 0 ;
	}

	// Rank documents
	oTimeProfiler.StartPoint("Ranker::Rank");
	if (pRootNode -> doc_entries.size() != 0)
	{
		pRanker -> Rank(&pRootNode -> doc_entries[0],
		                 pRootNode -> doc_entries.size(),
		                 pRootNode -> quorum_weight,
		                 aAllWords,
		                 iTermsCount,
		                 iMaxWordPos,
		                 oSnippetCollector,
		                 oTimeProfiler);
	}
#ifdef _DEBUG
	else { fprintf(stderr, "*** Empty result vector ***\n"); }
#endif
	oTimeProfiler.EndPoint("Ranker::Rank");

	// All Done
	oTimeProfiler.EndPoint("Retriever::Search");
//#ifdef _DEBUG
	fprintf(stderr, "Retriever::Search                                |\n"
	                "  Retriever::QueryParser:                %f|\n"
	                "  Retriever::QueryPreprocessor:          %f|\n"
	                "  Retriever::ProcessTree:                %f|\n"


	                "  Ranker::SetData                        %f|\n"
	                "  Ranker::Rank                                            |\n"
	                "    Ranker::Rank::CreateBasePoints:               %f|\n"
	                "    Ranker::Rank::ConstructPassages                                |\n"
	                "      Ranker::Rank::RankByPassageWeight:                   %f|\n"
	                "      Ranker::Rank::ClearPassage:                          %f|\n"
	                "    Ranker::Rank::ConstructPassages               %f|\n"
	                "    Ranker::Rank::RankByTFIDF:                    %f|\n"
	                "  Ranker::Rank                           %f|\n"
	                "-------------------------------------------------|\n"
	                "Retriever::Search                        %f|\n",

	                oTimeProfiler.CheckPoint("Retriever::QueryParser")            / 1000,
	                oTimeProfiler.CheckPoint("Retriever::QueryPreprocessor")      / 1000,
	                oTimeProfiler.CheckPoint("Retriever::ProcessTree")            / 1000,

	                oTimeProfiler.CheckPoint("Ranker::SetData")                   / 1000,

	                oTimeProfiler.CheckPoint("Ranker::Rank::CreateBasePoints")    / 1000,

	                oTimeProfiler.CheckPoint("Ranker::Rank::RankByPassageWeight") / 1000,
	                oTimeProfiler.CheckPoint("Ranker::Rank::ClearPassage")        / 1000,
	                oTimeProfiler.CheckPoint("Ranker::Rank::ConstructPassages")   / 1000,

	                oTimeProfiler.CheckPoint("Ranker::Rank::RankByTFIDF")         / 1000,
	                oTimeProfiler.CheckPoint("Ranker::Rank")                      / 1000,

	                oTimeProfiler.CheckPoint("Retriever::Search")                 / 1000);
//#endif
}

#ifdef _DEBUG
//
// Print contents of tree if debugging mode is enabled
//
void Retriever::PrintTreeDebug(QueryToken * pToken)
{
	INT_32 iLevel = 0;
	DescendTreeDebug(pToken, iLevel);
}

//
// Recursive tree descending
//
void Retriever::DescendTreeDebug(QueryToken * pToken, INT_32 & iLevel)
{
	if (pToken == NULL) { fprintf(stderr, "pToken == NULL !!!\n"); return ; }

	++iLevel;
	if (pToken -> token_type == Q_WORD)
	{
		for (INT_32 iI = 0; iI < iLevel; iI++) { fprintf(stderr, "      "); }

		// QueryWord * pTMP = dynamic_cast<QueryWord *>(pToken); FUCK OFF!!!
		QueryWord * pTMP = (QueryWord *)(pToken);

		fwrite(pTMP -> begin_token, pTMP -> end_token - pTMP -> begin_token, 1, stderr);
		fprintf(stderr, "(%llu ", (long long)pTMP -> word_form);
		if       (pTMP -> word_type & C_FIRST_CAPITAL)                       { fprintf(stderr, "Fc  "); }
		if       (pTMP -> word_type & C_ALL_CAPITAL)                         { fprintf(stderr, "AC  "); }
		if      ((pTMP -> word_type & C_MIXED_LANGUAGE) == C_MIXED_LANGUAGE) { fprintf(stderr, "MIX "); }
		else if  (pTMP -> word_type & C_CYRILLIC)                            { fprintf(stderr, "CYR "); }
		else if  (pTMP -> word_type & C_LATIN)                               { fprintf(stderr, "LAT "); }
		if       (pTMP -> word_type & C_NUMBER)                              { fprintf(stderr, "NUM "); }
		if       (pTMP -> word_type & C_EXACT_MATCH)                         { fprintf(stderr, "!   "); }
		fprintf(stderr, ") pos: %d, weight %f, form %d\n", pTMP -> query_pos, pTMP -> idf, WordEntry::AdjustForm(pTMP -> word_form));

	}
	else if (pToken -> token_type != Q_END)
	{
		//QueryOperator * pTMP = dynamic_cast<QueryOperator *>(pToken); FUCK OFF!!!
		QueryOperator * pTMP = (QueryOperator *)(pToken);

		for (INT_32 iI = 0; iI < iLevel; iI++) { fprintf(stderr, "      "); }
		switch(pTMP -> token_type)
		{
			case Q_OR:
				fprintf(stderr, "OR(%d)\n", 0/* pTMP -> child_count */);
				break;
			case Q_AND:
				fprintf(stderr, "AND(%d)\n", 0/* pTMP -> child_count */);
				break;
			case Q_NOT:
				fprintf(stderr, "NOT(%d)\n", 0/* pTMP -> child_count */);
				break;
			default:
				fprintf(stderr, "ERROR!");
		}
		QueryTokenList * pList = pTMP -> children;
		while (pList != NULL)
		{
			DescendTreeDebug(pList -> token, iLevel);
			pList = pList -> next;
		}
	}
	--iLevel;
}
#endif // _DEBUG

} // namespace FTSS
// End.
