package org.shj.demo;

import com.alibaba.fastjson.JSON;
import com.hankcs.hanlp.HanLP;
import com.hankcs.hanlp.corpus.document.sentence.Sentence;
import com.hankcs.hanlp.model.perceptron.PerceptronLexicalAnalyzer;
import com.hankcs.hanlp.seg.Segment;
import com.hankcs.hanlp.tokenizer.NLPTokenizer;
import com.hankcs.hanlp.tokenizer.lexical.AbstractLexicalAnalyzer;

import java.io.BufferedReader;
import java.io.FileOutputStream;
import java.io.FileReader;
import java.io.RandomAccessFile;
import java.util.List;
import java.util.regex.Pattern;

/**
 * @author Shen Huang Jian
 * @date 2020-08-31 16:05
 */
public class SegmentDemo {

    private static Pattern chinesePatter = Pattern.compile("[\u4e00-\u9fa5]");

    public static boolean isChinese(char c){
        return isChinese(String.valueOf(c));
    }

    public static boolean isChinese(String s){
        return chinesePatter.matcher(s).matches();
    }

    public static void main(String[] args){
        //preTokenFile("D:\\test\\situation.txt","D:\\test\\segment.txt");
        String text = "今天 天word气真好。";
        text = text.trim();
        StringBuilder sb = new StringBuilder();
        sb.append(text.charAt(0));
        for(int i = 1 ; i < text.length(); i++){
            char c = text.charAt(i);

            char last = sb.charAt(sb.length() - 1);
            if(Character.isSpaceChar(c)){
                if((last >= 97 && last <= 122) || (last >= 65 && last <= 90)){
                    sb.append(c);
                }
            }else{
                if(isChinese(c)){
                    if(Character.isSpaceChar(last)){
                        sb.deleteCharAt(sb.length() - 1);
                    }
                }
                sb.append(c);
            }
        }

        System.out.println(sb.toString());
        nlpTokenizer(sb.toString());

        /*try {
            AbstractLexicalAnalyzer ANALYZER = new PerceptronLexicalAnalyzer();
            List<String> list = ANALYZER.segment(text);
            for(String txt : list){
                System.out.println(txt);
            }
        }catch (Exception e){

        }*/

        /*Segment segment = HanLP.newSegment();
        segment.enablePlaceRecognize(true);
        segment.enableNameRecognize(true);
        segment.enableTranslatedNameRecognize(true);
        System.out.println(segment.seg(text));*/

        //read("D:\\source\\hanLp\\data\\tencent\\Tencent_AILab_ChineseEmbedding\\Tencent_AILab_ChineseEmbedding.txt");
    }

    private static void preTokenFile(String sourceFile, String tokenizedFile){

        try(//FileOutputStream os = new FileOutputStream(tokenizedFile);
            RandomAccessFile raf = new RandomAccessFile(tokenizedFile, "rw");
            BufferedReader br = new BufferedReader(new FileReader(sourceFile))) {

            NLPTokenizer.ANALYZER.enableCustomDictionary(false); // 中文分词≠词典，不用词典照样分词。
            String line = null;
            raf.seek(raf.length());

            while((line = br.readLine()) != null){
               // os.write((NLPTokenizer.analyze(line).toString()+"\n").getBytes());
                raf.writeChars(NLPTokenizer.analyze(line).toString()+"\n");
            }
            //os.flush();

        }catch (Exception e){
            e.printStackTrace();
        }
    }

    /*private static void read(String file){
        try(BufferedReader br = new BufferedReader(new FileReader(file))) {
            String line = null;
            int cnt = 0;
            while((line = br.readLine()) != null){
                cnt++;
                System.out.println(line);
                if(cnt == 100){
                    break;
                }
            }
        }catch (Exception e){
            e.printStackTrace();
        }
    }*/

    private static void nlpTokenizer(String text){
        Sentence sentence = NLPTokenizer.analyze(text);
//        System.out.println(JSON.toJSONString(sentence));
        System.out.println(sentence);

    }
}
