package Searcher;

import index.MyAnalyzer;

import java.io.IOException;
import java.math.MathContext;
import java.util.ArrayList;


import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.TermFreqVector;
import org.apache.lucene.queryParser.ParseException;
import org.apache.lucene.queryParser.QueryParser;
import org.apache.lucene.search.Hits;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.Searcher;

import Utils.VerseInfoEncoder;


public class TestSearcher {

	private  String index;
	private Searcher searcher;
	private VerseInfoEncoder verseInfoEncoder;

	
	
	
	private static final String[] HIGHLIGHTS_COLOR = {
		"#FFFF33", //yellow
		"#3366FF", //blue
		"#FF0033", //red
		"#00CCFF", //light_blue
		"#FF00CC", //pink
		"#999999", //grey
		"#00ff00", //green
		"#CCFFFF", //cyan
		"#FF6633", //orange
		"#CC00CC", //violet
		"fffafa",
		"8470ff",
		"00ff7f",
		"eedd82",
		"ff69b4",
		"ff6347"
	};


	public TestSearcher(String index, int hitsPerPage){
		this.index = index;
		verseInfoEncoder = new VerseInfoEncoder();
	}

	/*
	 * It will be better to pass an array of fields, but for now it's ok to test
	 */
	public SearchResults search(String toSearch, String field, String field2, int hitsPerPage, int offset) {
		
		int matches = 0;
		//ArrayList<String> results = new ArrayList<String>();
		String[] results;
		ArrayList<ArrayList<String>> topScored = new ArrayList<ArrayList<String>>();


		String contentsField = field;

		
		//Index searcher:
		IndexReader reader = null;
		try {
			reader = IndexReader.open(index);
		} catch (CorruptIndexException e) { 
			e.printStackTrace();
		} catch (IOException e) {
			e.printStackTrace();
		}
		//		if (normsField != null)
		//	 	 reader = new OneNormsReader(reader, normsField);
		searcher = new IndexSearcher(reader);

		//Query:
		Analyzer analyzer = new MyAnalyzer();			//MySt.....
		QueryParser parser = new QueryParser(field, analyzer);
		Query query = null;
		try {
			query = parser.parse(toSearch);
		} catch (ParseException e) {
			e.printStackTrace();
		}

		//Results:
		Hits hits = null;
		try {
			hits = searcher.search(query);
		} catch (IOException e) {
			e.printStackTrace();
		}


		matches = hits.length();
		int resultsThatCanBeReturned = (hits.length() > hitsPerPage ? hitsPerPage : hits.length());
		results = new String[resultsThatCanBeReturned];

		try {			
			System.out.println("\n");
			Term t = new Term("verseContents", "bent");
			System.out.println(";;; docFreq  "+reader.docFreq(t));
			System.out.println(";;; termDocs "+reader.termDocs(t));
			System.out.println(";;; "+reader.getTermInfosIndexDivisor());
			System.out.println(reader.getTermFreqVector(0, "verseContents"));	
			//TermVector tVector = 
		} catch (CorruptIndexException e1) {
			e1.printStackTrace();
		} catch (IOException e1) {
			e1.printStackTrace();
		}
		if ( hits.length() == 0) {
			return null;
		}

		//First n results: ---------------------------------------------

		//IF hits.length > H.P.S. then n = H.P.S. ELSE n = hits.length
		int end = Math.min(hits.length(), 0/*if rpt*/ + hitsPerPage);
		int rest;
		int docs_in_collection = reader.numDocs();
		System.out.println(docs_in_collection + "verses indexed");				//DBG

		int last_id = 0;
		
		for(int i = 0; i < end; i++) {
			String currResult = "";
			Document doc = null;
			try {
				int pointer = i+offset*hitsPerPage;
//				rest = matches - pointer;
//				System.out.println("R E S T:" + (matches - pointer));
//				if (pointer == end+1) {
////					System.out.println("M A T C H E S:" + matches + "\n" + "E N D:" + end + "IIIII:" + i);
//					break;
//				}
				
				doc = hits.doc(pointer);
				int verseID = new Integer(doc.getField("absoluteID").stringValue());


//				System.out.println("V E R S E  I D:" + verseID);
				TermFreqVector tv = reader.getTermFreqVector(verseID, contentsField);
				// problem here
//				System.out.println(tv);///

				topScored.add(getTopScored(tv, docs_in_collection, matches));
				}
			catch (CorruptIndexException e) {
				e.printStackTrace();
			} catch (IOException e) {
				e.printStackTrace();
			}
			String content = doc.get(contentsField);
			content = highlightQueryWords(toSearch, content);					//highlight
			if (content != null) {
				String encodedUrl = verseInfoEncoder.encode(doc);
				currResult += new Integer(i+1+offset*hitsPerPage).toString() + ") ";
				String chapter = doc.get(field2);				
				if (chapter != null) {
					//currResult += "   Chapter title: " + chapter + "<br>";		//write
					currResult += "   Chapter title: " + "<a href='"+encodedUrl+"'>" + chapter + "</a> <br>";		//write
				}
				currResult += content + "<br>";					//write
				//			String verseID = doc.get("verseID");
				//currResult += ">>-<a href='v://"+verseID+"'>-GO-</a>--><br><br>";
				//currResult += "[<a href='"+encodedUrl+"'>Context</a>]";
			} else {

				currResult += "No path for this document<br><br>"; //write
			} 
			results[i] = currResult;
		}
		
		

		//return results;
		SearchResults sResults = new SearchResults(matches, results, topScored);
		System.out.println("T O T A L  R E S U L T S:" + sResults.getMatches());
		
//		int fraction = sResults.getMatches() / hitsPerPage;
//		if (offset == fraction - 1) {
//			hitsPerPage = 
//		}
		return sResults;
	}
	
	

	private final ArrayList<String>  getTopScored(TermFreqVector tv, int docs_in_collection, int matches) {
		ArrayList<String> topScored = new ArrayList<String>();
		//TODO !!!

		String[] terms = tv.getTerms();											//get the terms 
		int [] freqs = tv.getTermFrequencies();									//get the frequencies 
		double[] scores = new double[tv.size()];								//computed scored

		//compute scored
		for (int i = 0 ; i < tv.size() ; i++) {
			double score = freqs[i] * (Math.log(docs_in_collection/matches));   //simplified formula
			scores[i] = score;
		}

		//fin max score:
		double maxScore = scores[0];
		for (int i = 0 ; i < scores.length ; i++)
			if(scores[i] > maxScore)
				maxScore = scores[i];

		//return all words with score = maxScore:
		for (int i = 0 ; i < scores.length ; i++)
			if(scores[i] == maxScore)
				topScored.add(terms[i]);

		return topScored;
	}


	private String highlightQueryWords(String query, String resultsRext) {
		//	String query = queryText.getText();
		String[] queryArray = query.split(" ");
		String highlightedResultsText = resultsRext;
		for (int i = 0 ; i < queryArray.length ; i++) {
			int colorID = Math.min(HIGHLIGHTS_COLOR.length-1, i);
			String word = queryArray[i];
			String highlightedWord = " <font style=background-color:'" + HIGHLIGHTS_COLOR[colorID] + ";'>" + word.replaceAll("(\\*)|(\\?)", "") + "</font>";
			String regex = "(^|\\ )"+word+"";
			highlightedResultsText = highlightedResultsText.replaceAll(regex, highlightedWord);
		}
		return highlightedResultsText; 
	}

//	private float weight(int d, int i, int docs_in_collection) {
//	//Term Weight = number of times a term occurs in a document 
//	//multiplied by: the Log of: Number of documents in the database 
//	//divided by Number of documents containing the term.

//	return 0;
//	}

//	private float weight(int d, int i) {
//	float t_tf  = tf(d, i);
//	float t_idf = idf(d, i);

//	float result = 0;
//	for(int k = 0 ; k < ALL_DOCS-1 ; k++) {
//	result += tf(k, i)^2 * idf(k, i)^2
//	}
//	return (( t_tf * t_idf ) / (Math.sqrt(result)));
//	}

//	private float tf(int k, int i) {
//	return 0;
//	}
//	private float idf(int k, int i) {
//	return 0;
//	}

}
