package nlp;

import cn.hutool.core.util.NumberUtil;
import cn.hutool.extra.tokenizer.Result;
import cn.hutool.extra.tokenizer.TokenizerEngine;
import cn.hutool.extra.tokenizer.TokenizerUtil;
import cn.hutool.extra.tokenizer.engine.word.WordEngine;
import com.hankcs.hanlp.HanLP;
import com.hankcs.hanlp.dictionary.stopword.CoreStopWordDictionary;
import com.hankcs.hanlp.mining.word2vec.DocVectorModel;
import com.hankcs.hanlp.mining.word2vec.Word2VecTrainer;
import com.hankcs.hanlp.mining.word2vec.WordVectorModel;
import com.hankcs.hanlp.suggest.Suggester;
import com.hankcs.hanlp.tokenizer.NLPTokenizer;
import org.ansj.splitWord.analysis.ToAnalysis;
import org.apdplat.word.WordSegmenter;
import org.apdplat.word.recognition.StopWord;
import org.apdplat.word.segmentation.Word;
import org.junit.Test;

import java.io.IOException;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;

/**
 * @program JavaBase
 * @description:
 * @author: zhanglu
 * @create: 2020-12-04 18:08:00
 */
public class Attempt {

    @Test
    public void test01(){
        List<Word> words = WordSegmenter.segWithStopWords("我有点咳嗽、发烧");
        System.out.println(words);
    }

    @Test
    public void test02(){
        String str = "欢迎使用ansj_seg,(ansj中文分词)在这里如果你遇到什么问题都可以联系我.我一定尽我所能.帮助大家.ansj_seg更快,更准,更自由!" ;
        System.out.println(ToAnalysis.parse(str));
    }

    @Test
    public void test03() throws IOException {
        String content = "一生您好，我噶举额不舒服，早上有点咳嗽，中午喝了凉水，现在有点发烧咳嗽感冒，这水我的症状，麻烦大夫了";


        //关键词
//        List<String> keywordList = HanLP.extractKeyword(content, 10);
//        System.out.println(keywordList);


        //摘要
//        List<String> sentenceList = HanLP.extractSummary(content, 2);
//        System.out.println(sentenceList);


        //相关度
        WordVectorModel wordVectorModel = new WordVectorModel("/Users/zhanglu/Desktop/hanlp-wiki-vec-zh/hanlp-wiki-vec-zh.txt");

//        List<String> collect = keywordList.stream().sorted((o1, o2) -> NumberUtil.compare(wordVectorModel.similarity(o2, "咳嗽"), wordVectorModel.similarity(o1, "咳嗽"))).collect(Collectors.toList());
//        System.out.println(collect);
//
//        collect = keywordList.stream().sorted((o1, o2) -> NumberUtil.compare(wordVectorModel.similarity(o2, "发烧"), wordVectorModel.similarity(o1, "发烧"))).collect(Collectors.toList());
//        System.out.println(collect);
//
//        collect = keywordList.stream().sorted((o1, o2) -> NumberUtil.compare(wordVectorModel.similarity(o2, "感冒"), wordVectorModel.similarity(o1, "感冒"))).collect(Collectors.toList());
//        System.out.println(collect);

        //        System.out.println(wordVectorModel.similarity(content, "生病"));
//        System.out.println(wordVectorModel.similarity(content, "咳嗽"));
//        System.out.println(wordVectorModel.similarity(content, "发烧"));
//        System.out.println(wordVectorModel.similarity(content, "感冒"));

//        System.out.println(wordVectorModel.similarity("山东", "江苏"));
//        System.out.println(wordVectorModel.similarity("山东", "上班"));
//        System.out.println(wordVectorModel.nearest("咳嗽"));
//        System.out.println(wordVectorModel.nearest("发烧"));
//        System.out.println(wordVectorModel.nearest("感冒"));


        DocVectorModel docVectorModel = new DocVectorModel(wordVectorModel);
        String[] documents = new String[]{
                "山东苹果丰收",
                "农民在江苏种水稻",
                "奥运会女排夺冠",
                "世界锦标赛胜出",
                "中国足球失败",
        };

        for (int i = 0; i < documents.length; i++) {
            docVectorModel.addDocument(i, documents[i]);
        }
        List<Map.Entry<Integer, Float>> entryList = docVectorModel.nearest("体育");
        for (Map.Entry<Integer, Float> entry : entryList) {
            System.out.printf("%d %s %.2f\n", entry.getKey(), documents[entry.getKey()], entry.getValue());
        }

    }

}
