package com.mijie.homi.search.util;

import java.io.Reader;
import java.io.StringReader;
import java.util.regex.Pattern;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.pattern.PatternTokenizer;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;

/**
 * @author 三劫散仙 自定义分词器 针对单字切 单个符号切分 多个符号组合切分
 * 
 * **/
public class PatternAnalyzer extends Analyzer {
	String regex;// 使用的正则拆分式

	public PatternAnalyzer(String regex2) {
		this.regex = regex2;
	}

	@Override
	protected TokenStreamComponents createComponents(String arg0, Reader arg1) {
		Pattern compile = Pattern.compile(regex);
		PatternTokenizer patternTokenizer = new PatternTokenizer(arg1,
				compile, -1);
		return new TokenStreamComponents(patternTokenizer);
	}

	public static void main(String[] args) throws Exception {
		// SplitAnalyzer analyzer=new SplitAnalyzer('#'）；
		//PatternAnalyzer analyzer = new PatternAnalyzer("");
		PatternAnalyzer analyzer = new PatternAnalyzer("#");
		// 空字符串代表单字切分
		TokenStream ts = analyzer.tokenStream("field", new StringReader(
				"我#你#他"));
		CharTermAttribute term = ts.addAttribute(CharTermAttribute.class);
		ts.reset();
		while (ts.incrementToken()) {
			System.out.println(term.toString());
		}
		ts.end();
		ts.close();
	}

}
