package lbd;

import java.io.File;
import java.io.IOException;
import java.io.Reader;

import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.LowerCaseFilter;
import org.apache.lucene.analysis.StopFilter;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.WordlistLoader;
import org.apache.lucene.analysis.snowball.SnowballFilter;
import org.apache.lucene.analysis.standard.StandardFilter;
import org.apache.lucene.analysis.standard.StandardTokenizer;


public class CustomAnalyzer extends Analyzer {
	
	private File stopwords;
	private boolean stemming = true;
	public static final int DEFAULT_MAX_TOKEN_LENGTH = 255;
  
	public CustomAnalyzer(File stopwords, boolean stemming) throws IOException {
		this.stopwords = stopwords;
		this.stemming = stemming;
	}

	public TokenStream tokenStream(String fieldName, Reader reader) {
		
	    StandardTokenizer tokenStream = new StandardTokenizer(reader, false);
	    tokenStream.setMaxTokenLength(DEFAULT_MAX_TOKEN_LENGTH);
	    TokenStream result = new StandardFilter(tokenStream);
	    result = new LowerCaseFilter(result);
	    
	    try {
	    	result = new StopFilter(result, WordlistLoader.getWordSet(stopwords));
	    } catch (Exception e) { e.printStackTrace(); }
	    
	    if(stemming)
	    	result = new SnowballFilter(result, "English");
	    return result;
	    
	}

  

}
