package com.ls.fw.data.search.impl.lucene.util;

import java.io.IOException;
import java.io.Reader;
import java.util.ArrayList;
import java.util.List;

import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.wltea.analyzer.lucene.IKAnalyzer;

import com.ls.fw.data.search.impl.lucene.mapper.TokenMapper;

public class AnalyzerUtils {
	
	private static Analyzer analyzer = null;
	
	public static Analyzer getInstance(){
		if(analyzer == null){
			synchronized (AnalyzerUtils.class) {
				if(analyzer==null){
					analyzer = new IKAnalyzer();//避免多次多次初始化分词器，加载字典耗时长
				}
			}
		}
		return analyzer;
	}
	public static List<String> tokenStream(Analyzer analyzer, Reader analyzerReader) throws IOException {
		final List<String> result = new ArrayList<String>();
		token(analyzerReader, analyzer, new TokenMapper() {
			
			@Override
			public void mapper(String token) {
				result.add(token);
			}
		});
		return result;
	}
	
	 public static void token(final Reader reader
	    		, final Analyzer analyzer, final TokenMapper mapper){
	    	
	    	TokenStream tokenStream  = null;
	    	try {
				tokenStream = analyzer.tokenStream("", reader);  
				tokenStream.reset();  
				CharTermAttribute term = tokenStream.getAttribute(CharTermAttribute.class);  
				//输出分词器和处理结果  
				while(tokenStream.incrementToken()){    
					mapper.mapper(term.toString());
				}
			} catch (IOException e) {
				throw new IllegalArgumentException(e);
			} finally{
				if(tokenStream != null){
					try {
						tokenStream.close();
					} catch (IOException e) {
						e.printStackTrace();
					}
				}
			}
	    }
}
