package com.ls.fw.data.search.impl.lucene.query.builder;

import java.io.IOException;
import java.io.StringReader;
import java.util.ArrayList;
import java.util.List;

import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.index.Term;

/**
 * Term analysis utilities.
 */
public final class Terms {

	/** Not instantiable. */
	private Terms() {
	}

	/**
	 * Returns an array of Terms generated by the passed string.
	 * 
	 * @param analyzer
	 *            - the analyzer used to tokenize the index
	 * @param field
	 *            - the field to match on
	 * @param query
	 *            - The un-tokenized string
	 * @return array of Terms
	 * @throws IOException
	 */
	public static Term[] termsFor(final Analyzer analyzer, final String field,
			final String query) throws IOException {
		if (query == null) {
			return null;
		}
		TokenStream tokens = analyzer.tokenStream(field, new StringReader(query));
		List<Term> terms = new ArrayList<Term>();
		try {
			boolean anyTokens = false;
			tokens.reset();
			while (tokens.incrementToken()) {
				anyTokens = true;
				String word = tokens.getAttribute(CharTermAttribute.class)
						.toString();
				terms.add(new Term(field, word));
			}

			if (anyTokens) {
				return terms.toArray(new Term[terms.size()]);
			} else {
				return null;
			}
		} catch (IOException e) {
			return null;
		} finally {
			tokens.close();
		}
	}

}
