package com.udf.flink.udf;
/*
 * @author: create by jianglai
 * @version: v1.0
 * @description: com.dtstack.flinksql.udf
 * @date:2021/1/27
 **/

import org.apache.flink.table.functions.ScalarFunction;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.io.StringReader;

//import org.wltea.analyzer.core.IKSegmenter;
//import org.wltea.analyzer.core.Lexeme;

/**
 * @description：分词功能udf
 * @author：jianglai
 * @date：2021/1/27 3:14 下午
 */
public class SplitWord extends ScalarFunction {

    private static final Logger LOG = LoggerFactory.getLogger(SplitWord.class);
    /**
     * @author jianglai
     * @Description: 处理逻辑
     * @date 2021/1/27
     * @params [word, smart]
     * @return java.lang.String
     */
    public static String eval(String word, boolean smart){
        return  getKeywords(word,smart);
    }

    /**
     * @author jianglai
     * @Description: 智能分词，对传入的地址进行分词
     * @date 2021/1/27
     * @params [keyword, smart]
     * @return java.lang.String
     */
    public static String getKeywords(String keyword, boolean smart) {
        //处理null值，如果是null，则返回null
        if (null == keyword){
            return null;
        }
        StringReader reader = new StringReader(keyword);
//        IKSegmenter iks = new IKSegmenter(reader, smart);
//        StringBuilder buffer = new StringBuilder();
//        //处理非空字符串。
//        try {
//            Lexeme lexeme;
//            while ((lexeme = iks.next()) != null) {
//                buffer.append(lexeme.getLexemeText()).append(',');
//            }
//        } catch (IOException e) {
//            LOG.error(e.getMessage());
//        }
        //去除最后一个,
//        if (buffer.length() > 0) {
//            buffer.setLength(buffer.length() - 1);
//        }
//        return buffer.toString();
        return "";
    }
}
