package com.webmining.sherlock.search;

import org.apache.log4j.Logger;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.ru.RussianAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.queryparser.classic.ParseException;
import org.apache.lucene.queryparser.classic.QueryParser;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.TopScoreDocCollector;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.Version;

import java.io.IOException;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.StringTokenizer;

public class Search {

    public static Directory index;

	private Logger log;
	
	protected ScoreDoc[] hits = null;

	protected int totalDocumentsInIndex = 0;
	
	protected String queryString;

	// The number of characters to collect for
	// dynamic summaries when a query keyword
	// ist detected in the document content

	protected int summaryKeywordRatio = 50;
	
	protected int maxSummarySize = 250;
    private IndexSearcher indexSearcher;

    public Search(String queryString) throws SearchException {
		log = Logger.getLogger(Search.class);
		// register the queryString
		this.queryString = queryString;
		// perform search based on the query string
		search();
	}

    public static void setIndex(Directory index) {
        Search.index = index;
    }

    protected void search() throws SearchException {
		// Open access to the Apache Lucene Index
		IndexReader indexReader = null;
		try {
			indexReader = IndexReader.open(index);
			indexSearcher = new IndexSearcher(indexReader);
			// register the total number of documents in the index
			totalDocumentsInIndex = indexReader.numDocs();
		} catch (Exception ioe) {
			log.error("Unable to access the index", ioe);
			throw new SearchException("Unable to access the index", ioe);
		}
		// TODO: research about analyzers
		Analyzer analyzer = new RussianAnalyzer(Version.LUCENE_45);
		// Create and assemble an Apache Lucene Query
		Query query = null;
		try {
            query = new QueryParser(Version.LUCENE_45, "contents", analyzer)
                    .parse(queryString);
		} catch (ParseException pe) {
			log.error("Query syntax error", pe);
			throw new SearchException("Query syntax error", pe);
		}
        TopScoreDocCollector collector = TopScoreDocCollector.create(100, true);

        ScoreDoc[] hits = null;
		try {
            indexSearcher.search(query, collector);
            hits = collector.topDocs().scoreDocs;
		} catch (IOException ioe) {
			log.error("Error searching the index", ioe);
			throw new SearchException("Error searching the index", ioe);
		}
		// register the hits found
		this.hits = hits;
	}
	
	public String getQueryString() {
		return queryString;
	}
	
	public int getTotalDocumentsInIndex() throws SearchException {
		return totalDocumentsInIndex;
	}
	
	public int getTotalResults() throws SearchException {
		return hits.length;
	}
	
	public String getTitle(int index) throws SearchException {
		String title = get(index, "title");
		if (title == null) {
			title = "No title";
		}
		return escape(title);
	}
	
	public ArrayList getQueryKeywords(boolean inLowerCase) {
		ArrayList keywords = new ArrayList();
		StringTokenizer queryTokens = new StringTokenizer(getQueryString());
		while (queryTokens.hasMoreTokens()) {
			String token = queryTokens.nextToken();
			if (inLowerCase) {
				token = token.toLowerCase();
			}
			if (!keywords.contains(token)) {
				keywords.add(token);
					
			}
		}
		return keywords;
	}
	
	/**
	 * Looks for a word in a list of query keywords. NOTE: This method is case sensitive
	 * @param queryKeywords an array list with the keywords used in the query.
	 * 						the keywords are normally lowercase
	 * @param word the word to verify it's a query keyword, usually lowercase
	 */
	public boolean isQueryKeyword(ArrayList queryKeywords, String word) {
		boolean isKeyword = false;
		Iterator iterator = queryKeywords.iterator();
		while (iterator.hasNext()) {
			String queryKeyword = (String) iterator.next();
			if (word.equals(queryKeyword)) {
				isKeyword = true;
			}
		}
		return isKeyword;
	}
	
	public String getSummary(int index) throws SearchException {
		String content = get(index,"contents");
		if (content == null || content.equals("")) {
			return "";
		}
		boolean firstChunk = true;
		boolean recentTokensChunked = false;
		StringBuffer summary = new StringBuffer(maxSummarySize);
		ArrayList queryKeywords = getQueryKeywords(true);
		StringTokenizer contentTokens = new StringTokenizer(content);
		StringBuffer recentTokens = new StringBuffer();
		while(contentTokens.hasMoreTokens() && summary.length() < maxSummarySize) {
			String contentToken = contentTokens.nextToken();
			if (isQueryKeyword(queryKeywords, contentToken.toLowerCase())) {
				if (firstChunk && recentTokensChunked) {
					summary.append("... ");
				}
				summary.append(recentTokens);
				summary.append(contentToken + " ");
				int added = 0, initialLength = summary.length();
				while (contentTokens.hasMoreTokens() &&
						summary.length() < maxSummarySize &&
						added < summaryKeywordRatio) {
					contentToken = contentTokens.nextToken();
					summary.append(contentToken + " ");
					added = summary.length() - initialLength;
				}
				if (contentTokens.hasMoreTokens()) {
					summary.append("... ");
				}
				firstChunk = false;
				// resets recent tokens
				recentTokensChunked = false;
				recentTokens.delete(0, recentTokens.length());
			} else {
				recentTokens.append(contentToken + " ");
				if (recentTokens.length() > summaryKeywordRatio) {
					int newStart = recentTokens.length() - summaryKeywordRatio;
					recentTokens.delete(0, newStart);
					recentTokensChunked = true;
				}
			}
		}
		StringBuffer formattedSummary = new StringBuffer();
		StringTokenizer summaryTokens = new StringTokenizer(summary.toString());
		while (summaryTokens.hasMoreTokens()) {
			String token = summaryTokens.nextToken();
			if (isQueryKeyword(queryKeywords, token.toLowerCase())) {
				formattedSummary.append("<b>" + escape(token) + "</b> ");
			} else {
				formattedSummary.append(escape(token) + " ");
			}
		}
		return formattedSummary.toString();
	}

	public String getUrl(int index) throws SearchException {
		return escape(get(index, "url"));
	}

	public String getContent(int index) throws SearchException {
		return escape(get(index, "contents"));
	}
	
	public String escape(String value) {
		if (value != null) {
			value = value.replaceAll("<", "&lt;");
			value = value.replaceAll(">", "&gt;");
		}
		return value;
	}
	
	public String get(int index, String field) throws SearchException {
		try {
            int docId = hits[index].doc;
            Document d = indexSearcher.doc(docId);
			return d.get(field);
		} catch (IOException ioe) {
			log.error("Unable to get " + field + " for document #" + index, ioe);
			throw new SearchException("Unable to get " + field + " for document #"
					+ index, ioe);
		}
	}

}
