import java.io.IOException;
import java.io.Reader;

import org.apache.lucene.analysis.TokenFilter;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.core.LowerCaseFilter;
import org.apache.lucene.analysis.core.StopFilter;
import org.apache.lucene.analysis.en.EnglishPossessiveFilter;
import org.apache.lucene.analysis.en.PorterStemFilter;
import org.apache.lucene.analysis.miscellaneous.LengthFilter;
import org.apache.lucene.analysis.standard.StandardFilter;
import org.apache.lucene.analysis.standard.StandardTokenizer;
import org.apache.lucene.analysis.util.CharArraySet;
import org.apache.lucene.analysis.util.StopwordAnalyzerBase;
import org.apache.lucene.util.Version;


class ImprovedAnalyzer extends StopwordAnalyzerBase {

	protected ImprovedAnalyzer(Version version, CharArraySet stopwords) {
		super(version, stopwords);
	}

	//we really think that a term in a size of 50 is significant enought
	private int maxTokenLength = 50;
	
	@Override
	protected TokenStreamComponents createComponents(String arg0,
			Reader reader) {
		
		final StandardTokenizer source = new StandardTokenizer(matchVersion, reader);
        source.setMaxTokenLength(maxTokenLength);

        TokenStream pipeline = source;
        pipeline = new StandardFilter(matchVersion, pipeline);
        //remove trailing s'
        pipeline = new EnglishPossessiveFilter(matchVersion, pipeline);
        pipeline = new LowerCaseFilter(matchVersion, pipeline);
        pipeline = new StopFilter(matchVersion, pipeline, stopwords);
        pipeline = new PorterStemFilter(pipeline);
        pipeline = new LengthFilter(matchVersion, pipeline, 3, 14);
        return new TokenStreamComponents(source, pipeline);
        
		
	}
}