package com.atguigu.gmall.realtime.util;

import org.wltea.analyzer.core.IKSegmenter;
import org.wltea.analyzer.core.Lexeme;

import java.io.IOException;
import java.io.StringReader;
import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.HashSet;
import java.util.Set;

/**
 * @Author lzc
 * @Date 2022/12/5 10:10
 */
public class AtguiguUtil {
    
    public static String tsToDate(Long ts) {
        return new SimpleDateFormat("yyyy-MM-dd").format(ts);
    }
    
    public static Set<String> ikAnalyze(String keyword) {
        Set<String> set = new HashSet<>();
        // String       Reader
        // 内存流
        StringReader reader = new StringReader(keyword);
        // seg 可以把流的字符按照中文的习惯进行拆分
        IKSegmenter seg = new IKSegmenter(reader, true);  // max_word
        
        Lexeme next = null;  // 获取分词得到的第一个词
        try {
            next = seg.next();
            while (next != null) {
                String kw = next.getLexemeText();  // 获取第一个词的文本信息
                set.add(kw);
                next = seg.next();  // 再取下一个
                
            }
        } catch (IOException e) {
            throw new RuntimeException(e);
        }
        
        return set;
    }
    
    public static void main(String[] args) {
        System.out.println(ikAnalyze("我是中国人"));
    }
    
    public static String toDatTime(long ts) {
        return new SimpleDateFormat("yyyy-MM-dd HH:mm:ss").format(ts);
    }
    
    public static Long dateToTs(String date) {
        try {
            return new SimpleDateFormat("yyyy-MM-dd").parse(date).getTime();
        } catch (ParseException e) {
            throw new RuntimeException(e);
        }
    }
}
