package com.qq.BFMRSE.entity;


import java.util.Iterator;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.TokenFilter;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.core.LowerCaseFilter;
import org.apache.lucene.analysis.core.SimpleAnalyzer;
import org.apache.lucene.analysis.core.StopFilter;
import org.apache.lucene.analysis.en.PorterStemFilter;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.analysis.standard.StandardTokenizer;
import org.apache.lucene.analysis.util.CharArraySet;
//"Title","Type","Date","Sponsor","Abstract","Investigator"
//,"Award","File","Expected","Org","Latest","Amt","Estimated",
public class PostStopAndLowCaseAnalyzer extends Analyzer{
	  public static final String[] stopWords = { "and", "of", "the", "to", "is",  
          "their", "can", "all", "i", "in","within","us","our","he","his","she","her","they"
          ,"those","on","into","Award","File","Expected","Org","Latest","Amt","Estimated",
          "Title","Type","Date","Sponsor","Abstract","Investigator","'","."};  
	@Override
	protected TokenStreamComponents createComponents(String arg0) {
		CharArraySet cas = new CharArraySet(0, true);
		for(int i=0;i<stopWords.length;i++)
		{
			cas.add(stopWords[i]);
		}
        Iterator<Object> itor = StandardAnalyzer.STOP_WORDS_SET.iterator();  
        while (itor.hasNext()) {  
            cas.add(itor.next());  
        }  
		Tokenizer tokenizer=new StandardTokenizer();
		TokenFilter lowcaTokenFilter=new LowerCaseFilter(tokenizer);
		TokenFilter stopFilter=new StopFilter(lowcaTokenFilter, cas);
		TokenStream stemFilter=new PorterStemFilter(stopFilter);
		
		return  new TokenStreamComponents(tokenizer,stemFilter); 
	}
}
