package westh.ilib.service.surveyReport.queryGeneration;

import java.io.File;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.Iterator;
import java.util.Set;

import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.index.Term;
import org.apache.lucene.queryParser.MultiFieldQueryParser;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.util.Version;

public class Generator {
	
	// string of query input
	private String str;

	// query generated
	private Query query;

	// StandardAnalyzer uses StandardTokenizer & StandardFilter
	Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_CURRENT);

	// fields lists all the fields searching in. fields definition see
	// documentSearch.FileDocument
	String[] fields = { "title", "keyword", "abstract", "contents" };

	// BooleanClause.Occur[] lists the occur requirements of keyword in each
	// fields. fields.length == clauses.length
	BooleanClause.Occur[] clauses = { BooleanClause.Occur.SHOULD,
			BooleanClause.Occur.SHOULD, BooleanClause.Occur.SHOULD,
			BooleanClause.Occur.SHOULD };

	public Generator(String input) {
		str = input;
	}

	private void tokenizer() {
		// StandardAnalyzer uses StandardTokenizer & StandardFilter as default
	}

	// uses WordNet to expand the query keywords
	private void expansion(String wordnetIndexPath) {
		File indexDir = new File(wordnetIndexPath);
		try {
			if (!indexDir.exists()) {
				System.exit(1);
			}

			// read WordNetIndex into RAM
			RAMDirectory directory = new RAMDirectory(FSDirectory
					.open(indexDir));

			// parse the query str
			query = MultiFieldQueryParser.parse(Version.LUCENE_CURRENT, str,
					fields, clauses, analyzer);

			// for each query term, search WordnetIndex & get top 10 syns
			Set<Term> terms = new HashSet<Term>();
			query.extractTerms(terms);
			Iterator<Term> iter = terms.iterator();
			
			IndexSearcher searcher = new IndexSearcher(directory);
			ArrayList<String> syns = new ArrayList<String>();

			for (int j = 0; j < terms.size() / fields.length ; j++) {
				if (iter.hasNext()) {
					TopDocs topdocs = searcher.search(new TermQuery(new Term(
							"word", iter.next().text())), 10);
					ScoreDoc[] hits = topdocs.scoreDocs;
					for (int i = 0; i < hits.length; i++) {
						syns.add(searcher.doc(hits[i].doc).getField("syn")
								.stringValue());
					}
				}
			}
			
			// generate the synQueries & combine them into query
			Query[] synQueries = new Query[1 + syns.size()];
			synQueries[0] = query;
			for (int i = 0; i < syns.size(); i++) {
				synQueries[i + 1] = MultiFieldQueryParser.parse(
						Version.LUCENE_CURRENT, syns.get(i), fields, clauses,
						analyzer);
				// boost of synQuery is set to 0.5,
				// while boost of input keyword is 1.0 as default
				synQueries[i + 1].setBoost((float) 0.5);
			}
			query = synQueries[0].combine(synQueries);
		} catch (Exception e) {
			e.printStackTrace();
		}
	}

	public Query generator(String wordnetIndexPath) {
		tokenizer();
		expansion(wordnetIndexPath);
		return query;
	}

	public static void main(String[] args) {
		// directory of index of wordNet synonyms
		final String wordnetIndexPath = "E:\\zhyx\\SurveyReport\\wordnetIndex";

		Generator gen = new Generator("search engine");
		System.out.println(gen.generator(wordnetIndexPath).toString());
	}
}