package com.appspot.bibtable.model;

import java.io.IOException;
import java.io.StringReader;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashSet;
import java.util.Set;
import java.util.logging.Logger;

import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.analysis.tokenattributes.TermAttribute;
import org.apache.lucene.util.Version;

import com.appspot.bibtable.util.StringUtility;

public class ReferenceEntityIndex
{
	public static final int MAXIMUM_NUMBER_OF_WORDS_TO_SEARCH = 5;
	public static final int MAXIMUM_NUMBER_OF_WORDS_TO_INDEX = 200;

	private static final Logger logger = Logger.getLogger(ReferenceEntityIndex.class.getName());
	private static final Set<String> stopWords;

	static
	{
		stopWords = Collections.unmodifiableSet(new HashSet<String>(Arrays.asList("a", "an", "and", "are", "as", "at", "be", "but", "by", "for", "if", "in", "into", "is", "it", "no", "not", "of", "on", "or", "such", "that", "the", "their", "then", "there", "these", "they", "this", "to", "was", "will", "with")));
	}

	private ReferenceEntityIndex()
	{
		// Cannot be instantiated.
	}

	public static Set<String> getTokens(String target, int maximumTokenCount)
	{
		Set<String> tokens = new HashSet<String>();

		if (!StringUtility.isNullOrEmpty(target))
		{
			try
			{
				Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_30, stopWords);
				TokenStream tokenStream = analyzer.tokenStream("title", new StringReader(target));
				while (tokenStream.incrementToken() && tokens.size() < maximumTokenCount)
				{
					TermAttribute attribute = tokenStream.getAttribute(TermAttribute.class);
					String token = attribute.term();
					tokens.add(token);
				}
			}
			catch (IOException ex)
			{
				logger.severe(ex.getMessage());
			}
		}

		return tokens;
	}
}
