package com.jieshuibao.util;

import java.io.IOException;
import java.io.StringReader;
import java.util.ArrayList;
import java.util.List;

import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.wltea.analyzer.core.IKSegmenter;
import org.wltea.analyzer.core.Lexeme;

/**
 * @author :liuxb@jieshuibao.com
 * @version ：2018年8月21日 上午10:00:56
 * 类说明
 */
public class IKAnalyserUtil {
	
	private static final Logger logger = LoggerFactory
			.getLogger("IKAnalyserUtil.class");
	
	 /**
     * 对语句进行分词
     *
     * @param text 语句
     * @return 分词后的集合
     * @throws IOException
     */
	public static List<String> segment(String text) throws IOException {
        List<String> list = new ArrayList<>();
        list.add(text);
        StringReader re = new StringReader(text);
        IKSegmenter ik = new IKSegmenter(re, true);
        Lexeme lex;
        while ((lex = ik.next()) != null) {
            list.add(lex.getLexemeText());
        }
        return list;
    }
	/**
	 * 去除单个字符 并把相邻单个字符 拼接
	 * @param text
	 * @return
	 * @throws IOException
	 */
	public  static List<String> segmentRemoveSingle(String text) {
		List<String> list = new ArrayList<>();
        try {
			list.add(text);
			StringReader re = new StringReader(text);
			IKSegmenter ik = new IKSegmenter(re, true);
			Lexeme lex;
			String str="";
			while ((lex = ik.next()) != null) {
				if (lex.getLexemeText().length()<=1) {
					
					str=str+lex.getLexemeText();
				}else {
					if (StringUtils.isNotBlank(str)&&str.length()>1) {
						list.add(str);
					}
					str="";
					list.add(lex.getLexemeText());
				}
				
			    
			}
			if (StringUtils.isNotBlank(str)&&str.length()>1&&list.size()>1) {
				list.add(str);
				
			}
			logger.info("分词后数据："+JsonUtil.toJson(list));
			
			return list;
		} catch (IOException e) {
			e.printStackTrace();
			logger.error("中文分词异常");
			
			return list;
		}
    }
}
