package analyzer;

import java.util.List;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.Reader;
import java.io.StringReader;
import java.util.Set;

import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.LowerCaseTokenizer;
import org.apache.lucene.analysis.StopFilter;
import org.apache.lucene.analysis.TokenStream;

import segmenter.Segmenter;

public class ChineseAnalyzer extends Analyzer {

	private Set<Object> stopWords;
	
	public static final String[] STOP_WORDS = {
		"我们", "我", "这个", "这", "这些", "那个", "那", "那些"
	};
	
	@SuppressWarnings("deprecation")
	public ChineseAnalyzer() {
		stopWords = StopFilter.makeStopSet(STOP_WORDS);
	}
	
	@SuppressWarnings("deprecation")
	public ChineseAnalyzer(String[] stopWords) {
		this.stopWords = StopFilter.makeStopSet(stopWords);
	}
	
	@SuppressWarnings("deprecation")
	@Override
	public TokenStream tokenStream(String fieldName, Reader reader) {
		BufferedReader br = new BufferedReader(reader);
		StringBuffer sb = new StringBuffer();
		String line = null;
		try {
			while ( (line = br.readLine()) != null ) {
				sb.append(line);
			}
		} catch (IOException e) {
			e.printStackTrace();
		}
		List<String> words = Segmenter.getInstance().segment(sb.toString());
		sb = new StringBuffer();
		for (String word : words) {
			sb.append(word + " ");
		}
		return new StopFilter(false, new LowerCaseTokenizer(
				new StringReader(sb.toString().trim())), stopWords);
	}

}
