
package com.ossean.match.lucene;

import java.io.IOException;
import java.io.Reader;
import java.io.StringReader;
import java.util.HashSet;
import java.util.Set;


//import org.apache.lucene.analysis.Analyzer;
//import org.apache.lucene.analysis.TokenStream;
//import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.wltea.analyzer.core.IKSegmenter;
import org.wltea.analyzer.core.Lexeme;
//import org.wltea.analyzer.lucene.IKAnalyzer;

public class IKTest {

	/**
	 * @param args
	 * @throws IOException 
	 */
	public static void main(String[] args) throws IOException {
//		InputStream in = ClassLoader.getSystemResourceAsStream("ext.dic");
//		System.out.println(in);
		for (int i = 0; i < 2; i++) {
			Reader text = new StringReader("google123 and baidu###");
			
//			Analyzer anal = new IKAnalyzer(true);
			IKSegmenter iks = new IKSegmenter(text, true);
			
			Lexeme term = null;
			while((term = iks.next()) != null){
				System.out.print(term.getLexemeText().toString() + "|");
				
			}
			
//			reader.close();
			System.out.println();
			Set<String> words = new HashSet<String>();
			words.add("google");
			words.add("baidu");
			org.wltea.analyzer.dic.Dictionary.getSingleton().addWords(words);
		}
	}

}
