package com.simhash;

import java.io.*;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.wltea.analyzer.lucene.IKAnalyzer;

public class FenCi {
	/* @函数：String Set(String str_ys)
	 * @作用：输入原始字符串，输出分词后的字符串(可多段)
	 * */
	public static String Set(String str_ys) {
		String str_fch="";
		//创建分词对象
		Analyzer anal=new IKAnalyzer(true);
		StringReader reader=new StringReader(str_ys);
		//分词
		TokenStream ts=anal.tokenStream("", reader);
		CharTermAttribute term=ts.getAttribute(CharTermAttribute.class);
		//遍历分词数据(每行)
		try {
			while(ts.incrementToken()){
				str_fch+=term.toString()+"/";
			}
		} catch (IOException e) {
			e.printStackTrace();
			System.out.println("分词出现错误！！");
		}
		reader.close();
		anal.close();
		System.out.println("字符串处理完毕，已生成该字符串的分词后字符串！");
		System.out.println(str_fch.trim());
		return str_fch.trim();
	}
}