/*
 * @(#)LuceneSearchEngine.java	0.1 12/28/04
 *
 * Copyright 2004 Fabio Gasparetti. All rights reserved.
 */

package org.tabularium.text.se;

import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import java.util.Set;
import java.util.logging.Logger;
import java.io.File;
import java.io.IOException;

import org.apache.commons.io.FileUtils;
import org.apache.lucene.document.*;
import org.apache.lucene.analysis.*;
import org.apache.lucene.index.*;
import org.apache.lucene.search.*;
import org.apache.lucene.queryParser.*;
import org.tabularium.text.TermWeightVector;
import org.tabularium.text.TermFreq;

/**
 * Note: if you use the same index for indexing/searching and raw storing pay
 * attention to concurrent access issues.
 */
public class LuceneSearchEngine implements SearchEngine {

	protected String dir;

	protected org.apache.lucene.analysis.Analyzer analyzer;

	protected PerFieldAnalyzerWrapper fieldAnalyzer;

	protected IndexWriter writer = null;

	protected Logger logger = Logger
			.getLogger("org.tabularium.text.se.LuceneSearchEngine");

	public LuceneSearchEngine(String dir, Analyzer analyzer) throws IOException {
		this.dir = dir;
		// creates analyzer instances
		this.analyzer = analyzer;
		fieldAnalyzer = new PerFieldAnalyzerWrapper(analyzer);
		fieldAnalyzer.addAnalyzer("idse", new UrlFieldAnalyzer());

		if (!IndexReader.indexExists(dir)) {
			clear();
		}
	}

	/**
	 * Index the text of a document. The key field idse must be escaped by calling
	 * {@link #escapeQuotes(String)}. 
	 */
	public void index(String id, String text) throws IOException {
		Document lucenedoc = new Document();
		// not tokenized but indexed
		if (id != null) {
			lucenedoc.add(new org.apache.lucene.document.Field("idse", id/*
																		 * escapeQuotes
																		 * (id)
																		 */,
					true, true, false));
		}
		/*
		 * Constructs a String-valued Field that is: stored, indexed, tokenized
		 * and termvector will be stored as well.
		 */
		lucenedoc.add(new org.apache.lucene.document.Field("text", text, true,
				true, true, true));

		// no one must delete while new docs are inserted
		synchronized (this) {
			writer = new IndexWriter(dir, fieldAnalyzer, false);
			writer.addDocument(lucenedoc);
			writer.close();
		}
	}

	/**
	 * Remember to call {@link LuceneHitsIterator#interrupt()} at the end of the
	 * iteration over the results. By default, the document's fields "idse" and
	 * "text" contain the id (i.e., url) and the text used for matching,
	 * respectively.
	 */
	public HitsIterator query(String q) throws Exception {
		Searcher searcher = new IndexSearcher(dir);
		Query query = null;
		query = QueryParser.parse(q, "text", fieldAnalyzer);
		Hits hits = searcher.search(query);

		return new LuceneHitsIterator(hits, searcher);
	}

	// delete the index
	public void clear() throws IOException {
		FileUtils.cleanDirectory(new File(dir)); 
		writer = new IndexWriter(dir, fieldAnalyzer, true);	
		writer.close();
	}

	public int numDocs() throws IOException {
		int n = 0;
		IndexReader reader = IndexReader.open(dir);
		try {
			n = reader.numDocs();
		} finally {
			reader.close();
		}
		return n;
	}

	/** @todo to test! */
	public boolean isIndexed(String id) throws IOException {
		boolean ret = false;
		Searcher searcher = new IndexSearcher(dir);

		String query = new String("+idse:\"" + escape(id) + "\"");

		Query q = null;
		try {
			q = QueryParser.parse(query, "idse", fieldAnalyzer);
		} catch (Exception ex) {
			searcher.close();
			throw new IOException("Query exception: " + ex.toString());
		}
		Hits hits = searcher.search(q);
		if (hits.length() > 0) {
			ret = true;
		}
		searcher.close();
		return ret;
	}

	public TermFreq[] docFreqs() throws IOException {
		ArrayList termFreqs = new ArrayList();
		IndexReader reader = IndexReader.open(dir);
		try {
			TermEnum terms = reader.terms();
			while (terms.next()) {
				// ignoring IDs of documents
				if ("text".equals(terms.term().field()))
					termFreqs.add(new TermFreq(terms.term().text(), terms
							.docFreq()));
			}
			// while (terms.next()) {
			// Term term = terms.term();
			// int freq = terms.docFreq();
			// map.put(term.text(), new Integer(freq));
		} finally {
			reader.close();
		}
		Collections.sort(termFreqs);
		return (TermFreq[]) termFreqs.toArray(new TermFreq[] {});
	}

	@Override
	public String[] lexicon() throws Exception {
		ArrayList termFreqs = new ArrayList();
		IndexReader reader = IndexReader.open(dir);
		try {
			TermEnum terms = reader.terms();
			while (terms.next()) {
				// ignoring IDs of documents
				if ("text".equals(terms.term().field()))
					termFreqs.add(terms.term().text());
			}
		} finally {
			reader.close();
		}
		Collections.sort(termFreqs);
		return (String[]) termFreqs.toArray(new String[] {});
	}

	public TermWeightVector termFreqs(String id) throws Exception {
		int docid = -1;
		TermFreqVector[] fv = null;
		TermWeightVector wv = null;
		Searcher searcher = new IndexSearcher(dir);

		String query = new String("+idse:\"" + escape(id) + "\"");

		Query q = null;
		try {
			q = QueryParser.parse(query, "idse", fieldAnalyzer);
		} catch (Exception ex) {
			searcher.close();
			throw new IOException("Query exception: " + ex.toString());
		}
		Hits hits = searcher.search(q);
		if (hits.length() > 0) {
			docid = hits.id(0);
		}
		searcher.close();
		if (docid < 0)
			return null;

		IndexReader reader = IndexReader.open(dir);
		try {
			/** @todo new lucene versions have enhanced getTermFreqVectors */
			fv = reader.getTermFreqVectors(docid);
		} finally {
			reader.close();
		}
		// convert TermFreqVector to TermWeightVector
		if ((fv != null) && (fv[0] != null)) {
			String[] terms = fv[0].getTerms();
			int[] freqs = fv[0].getTermFrequencies();
			double[] dfreqs = new double[freqs.length];
			for (int i = 0; i < freqs.length; i++)
				dfreqs[i] = freqs[i];
			wv = TermWeightVector.build(terms, dfreqs);
		}
		return wv;
	}

	@Override
	public int getMaxClauseCount() {
		return BooleanQuery.getMaxClauseCount();
	}

	//
	// public static String escapeQuotes(String s) {
	// // return s.replaceAll("\"", "%22");
	// return s.replaceAll("\"", "\\\"");
	// }

	/**
	 * Basically the QueryParseUtil#escape() in later Lucene library versions.
	 * Must be used to escape special characters before querying or indexing
	 * fields that must be queried.
	 * 
	 * @param s
	 * @return
	 */
	public static String escape(String s) {
		StringBuilder sb = new StringBuilder();
		for (int i = 0; i < s.length(); i++) {
			char c = s.charAt(i);
			// These characters are part of the query syntax and must be escaped
			if (c == '\\' || c == '+' || c == '-' || c == '!' || c == '('
					|| c == ')' || c == ':' || c == '^' || c == '[' || c == ']'
					|| c == '\"' || c == '{' || c == '}' || c == '~'
					|| c == '*' || c == '?' || c == '|' || c == '&' || c == '/') {
				sb.append('\\');
			}
			sb.append(c);
		}
		return sb.toString();
	}

	public static String doubleEscape(String s) {
		StringBuilder sb = new StringBuilder();
		for (int i = 0; i < s.length(); i++) {
			char c = s.charAt(i);
			// These characters are part of the query syntax and must be escaped
			if (c == '\\' || c == '+' || c == '-' || c == '!' || c == '('
					|| c == ')' || c == ':' || c == '^' || c == '[' || c == ']'
					|| c == '\"' || c == '{' || c == '}' || c == '~'
					|| c == '*' || c == '?' || c == '|' || c == '&' || c == '/') {
				sb.append("\\\\");
			}
			sb.append(c);
		}
		return sb.toString();
	}

	/**
	 * Call it if you want to restore to the original version a field that has
	 * been previously escaped before the indexing.
	 * 
	 * @param s
	 * @return
	 */
	public static String unescape(String s) {
		s = s.replace("\\\\", "\\");
		s = s.replace("\\+", "+");
		s = s.replace("\\-", "-");
		s = s.replace("\\!", "!");
		s = s.replace("\\(", "(");
		s = s.replace("\\)", ")");
		s = s.replace("\\:", ":");
		s = s.replace("\\^", "^");
		s = s.replace("\\[", "[");
		s = s.replace("\\]", "]");
		s = s.replace("\\\"", "\"");
		s = s.replace("\\{", "{");
		s = s.replace("\\}", "}");
		s = s.replace("\\~", "~");
		s = s.replace("\\*", "*");
		s = s.replace("\\~", "~");
		s = s.replace("\\&", "&");
		s = s.replace("\\/", "/");
		return s;
	}

	public static String termWeightVectorToQuery(TermWeightVector twv,
			boolean and) {
		StringBuffer q = new StringBuffer();
		String[] tt = twv.terms();
		double[] ww = twv.weights();
		for (int i = 0; i < tt.length; i++) {
			q.append("\"" + tt[i] + "\"^" + ww[i] + " ");
			if (and) {
				q.append("AND ");
			} 
		}
		return q.toString();
	}

	public static void main(String[] args) {
		LuceneSearchEngine searchEngine = null;
		try {
			org.tabularium.text.se.StandardAnalyzer a = new org.tabularium.text.se.StandardAnalyzer();
			a.enableLowerCase(true);
			a.enableNumberFilter(true);
			a.enablePorterStemmer(true);
			a.enableStopList(true);
			searchEngine = new LuceneSearchEngine("./lucene-se", a);
			// searchEngine = new LuceneSearchEngine("./lucene-se", new
			// org.apache.lucene.analysis.standard.StandardAnalyzer());
			searchEngine
					.index("1",
							"This is the text of the first documents and the first is this one");
			searchEngine
					.index("2",
							"This is the text of the second documents and the first is this second.");
			System.out.println("search engine # docs: "
					+ searchEngine.numDocs());
			System.out.println("search engine # terms: "
					+ searchEngine.docFreqs().length);
			TermWeightVector v = searchEngine.termFreqs("0");

			System.out.println("v0: " + (v != null ? v.toString() : ""));
			v = searchEngine.termFreqs("1");
			System.out.println("v1: " + (v != null ? v.toString() : ""));
			v = searchEngine.termFreqs("2");
			System.out.println("v2: " + v != null ? v.toString() : null);
		} catch (Exception ex) {
			System.err.println(ex.toString());
			ex.printStackTrace();
			return;
		}
	}
	/*
	 * public Map termFreqs() throws IOException { IndexReader reader =
	 * IndexReader.open(dir); Map map = docFreqs(); try { Iterator terms =
	 * map.keySet().iterator(); while (terms.hasNext()) { String term =
	 * (String)terms.next(); TermDocs td = reader.termDocs(new Term("text",
	 * term)); int f = 0; while (td.next()) { f += td.freq(); } map.put(term,
	 * new Integer(f)); } } finally { reader.close(); } return map; }
	 */
}
