package org.tabularium.text.se;

import java.io.IOException;

import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;

import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.PerFieldAnalyzerWrapper;
import org.apache.lucene.analysis.SimpleAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.queryParser.QueryParser;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.Hits;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.Searcher;
import org.apache.lucene.search.TermQuery;
import org.tabularium.net.downloader.DefaultResourceFactory;
import org.tabularium.net.downloader.Resource;

public class FieldLuceneSearchEngine extends LuceneSearchEngine {
	Map<String, Analyzer> fieldAnalyzers;

	/**
	 * The key field's analyzer has been set by LuceneSearchEngine constructor.
	 * @param dir
	 * @param defaultAnalyzer
	 * @param fieldAnalyzers
	 * @throws IOException
	 */
	public FieldLuceneSearchEngine(String dir, Analyzer defaultAnalyzer,
			Map<String, Analyzer> fieldAnalyzers) throws IOException {
		super(dir, defaultAnalyzer);

		this.fieldAnalyzers = fieldAnalyzers;

		Iterator<String> iter = fieldAnalyzers.keySet().iterator();
		while (iter.hasNext()) {
			String f = iter.next();
			fieldAnalyzer.addAnalyzer(f, fieldAnalyzers.get(f));
		}		
	}

	/**
	 * Index the text of a document and associated fields.
	 * 
	 * A set of new org.apache.lucene.document.Field objects represents
	 * additional fields.
	 * <p>
	 * es. Field(name, value, stored, indexed, tokenized)
	 * <p>
	 * Fields used for storing information but never specifically queried are
	 * usually only stored (not-indexed, not-tokenized). Fields that are
	 * subjected of search are indexed. Note that fields which are not stored
	 * are not available in documents (Document objects) retrieved from the
	 * index. 
	 * <p>
	 * If the field contains Lucene special characters (@see <a href="http://lucene.apache.org/core/2_9_4/queryparsersyntax.html">Query syntax</a>), call {@link #LuceneSearchEngine.escape(String)}.
	 * For querying the index, the same field must be double-escaped by calling {@link #doubleEscape(String)}.
	 */
	public void index(String id, String text,
			Set<org.apache.lucene.document.Field> fields) throws IOException {
		Document lucenedoc = new Document();
		// not tokenized but indexed
		if (id != null) {
			lucenedoc.add(new org.apache.lucene.document.Field("idse",
					id /*escapeQuotes(id)*/, true, true, false));
		}
		/*
		 * Constructs a String-valued Field that is: stored, indexed, tokenized
		 * and termvector will be stored as well.
		 */
		lucenedoc.add(new org.apache.lucene.document.Field("text", text, true,
				true, true, true));

		Iterator<Field> iter = fields.iterator();
		while (iter.hasNext()) {
			Field f = iter.next();
			lucenedoc.add(f);
		}

		// no one must delete while new docs are inserted
		synchronized (this) {
			writer = new IndexWriter(dir, fieldAnalyzer, false);
			writer.addDocument(lucenedoc);
			writer.close();
		}
	}

	  /** 
	   * Remember to call {@link #LuceneSearchEngine.doubleEscape(String)} if the query contains special characters, e.g, URLs.
	   * 
	   * @param query
	   * @param field
	   * @return
	   * @throws Exception
	   */
	  public HitsIterator query(String query, String field) throws Exception {
			Document doc = null;
			Searcher searcher = new IndexSearcher(dir);
			
			Query q1 = QueryParser.parse(query, field, fieldAnalyzer);
			//Query q1 = new TermQuery(new Term(field, query));
			Hits hits = searcher.search(q1);
			LuceneHitsIterator iter = new LuceneHitsIterator(hits, searcher);
			return iter;
	  }
	  
	  
	  /** 
	   * The query terms will be in AND.
	   * 
	   * Remember to call LuceneSearchEngine.escape() if the Resource's url contains special characters.
	   * @param query
	   * @param field
	   * @return
	   * @throws Exception
	   */
	  public HitsIterator query(List<String> queries, List<String> fields) throws Exception {
			Document doc = null;
			Searcher searcher = new IndexSearcher(dir);
			
			StringBuffer sb = new StringBuffer();
			Iterator<String> queryIter = queries.iterator();
			Iterator<String> fieldIter = fields.iterator();
			while (queryIter.hasNext()) {
				sb.append("+"+fieldIter.next()+":\"" + queryIter.next() + "\" ");
			}
			
			Query q = null;
			try {
				q = QueryParser.parse(sb.toString(), "idse", fieldAnalyzer);
			} catch (Exception ex) {
				throw new IOException("Query exception: " + ex.toString());
			}
			
			Hits hits = searcher.search(q);
			LuceneHitsIterator iter = new LuceneHitsIterator(hits, searcher);
			return iter;
	  }
	  
		public static void main(String[] args) {
			FieldLuceneSearchEngine se;
			try {
				se = new FieldLuceneSearchEngine("./index/", new SimpleAnalyzer(), new HashMap());
				DefaultResourceFactory drf = new DefaultResourceFactory();
				Resource res = drf.emptyResource();
				res.setObject(new byte[] {1, 5, 6});
				res.setValue("key1", "value1");
				res.setValue(Resource.URL, "http://resource.it");
				se.index("1", new String("ciao"));
				se.index(LuceneSearchEngine.escape("http://www.google.it"), new String("ciao3"));
				se.index("http://www.google.it/pluto", new String(
						"ciao1"));
				se.index("http://www.google.it/pluto\"1", new String(
						"ciao2"));
				HitsIterator iter = se.query(LuceneSearchEngine.doubleEscape("http://www.google.it"), "idse");
				if (iter.hasNext()){
					Document doc = (Document)iter.next();
					System.out.println(doc.getField("text").toString());
				}
				iter = se.query("1", "idse");
				if (iter.hasNext()){
					Document doc = (Document)iter.next();
					System.out.println(doc.getField("text").toString());
				}
				///System.out.println(s);
			} catch (Exception ex) {
				System.err.println(ex.toString());
				return;
			}
		}


}
