package com.mairuide.robot.IKAnalyzer;

import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import org.wltea.analyzer.lucene.IKAnalyzer;

import java.io.StringReader;

/**
 * 中文分词接口
 *
 * @author songRT
 */
@Service
public class WordsSegmentationService {

    @Autowired
    private IKAnalyzer anal;

    /**
     * 分词
     */
    public String analyzer(String question) {
        StringBuilder strBuilder = new StringBuilder();
        try {
            String text = question;
            StringReader reader = new StringReader(text);
            //分词
            TokenStream ts = anal.tokenStream("", reader);
            CharTermAttribute term = ts.getAttribute(CharTermAttribute.class);
            //遍历分词数据
            while (ts.incrementToken()) {
                strBuilder.append("'" + term.toString() + "',");
            }
            //log.debug("Analyzer question=" + question + ",result=" + strBuilder.toString());
        } catch (Exception ex) {
            //log.error("Analyzer " + question + " error:" + ex.getMessage());
        }
        strBuilder.append("'" + question + "'");
        return strBuilder.toString();
    }
}
