package com.test.test;

import cn.hutool.core.text.TextSimilarity;
import com.hankcs.hanlp.HanLP;
import com.hankcs.hanlp.corpus.io.IOUtil;
import com.hankcs.hanlp.dictionary.py.Pinyin;
import com.hankcs.hanlp.mining.word2vec.DocVectorModel;
import com.hankcs.hanlp.mining.word2vec.Word2VecTrainer;
import com.hankcs.hanlp.mining.word2vec.WordVectorModel;
import com.hankcs.hanlp.seg.Segment;
import com.hankcs.hanlp.seg.common.Term;
import com.hankcs.hanlp.tokenizer.StandardTokenizer;
import org.junit.jupiter.api.Test;
import org.springframework.boot.test.context.SpringBootTest;

import java.io.IOException;
import java.util.*;

@SpringBootTest
class NLPTestApp {

    @Test
    void contextLoads() {

    }

    @Test
    void test1() {
        List<Pinyin> pinyins = HanLP.convertToPinyinList("北京");
        for (Pinyin pinyin : pinyins) {
//            Pinyin pinyin1 = PinyinUtil.removeTone(pinyin);
//            System.out.println(pinyin1);
//            String pinyinWithToneMark = pinyin1.getPinyinWithToneMark();
            System.out.println(pinyin.getPinyinWithoutTone());

        }
        Segment newSegment = HanLP.newSegment();
        newSegment.seg("北京天安门");

    }


    @Test
    void test2() {

        Set<String> set = new HashSet<>();
        Segment newSegment = HanLP.newSegment();
        List<Term> seg = newSegment.seg("资产级联关系表：库-表；库-模式-表");
        for (Term term : seg) {
            System.out.print(term.word);
            set.add(term.word);
        }
        System.out.println(set);
    }


    @Test
    void test3() {
        List<Term> termList = StandardTokenizer.segment("商品和服务");
        System.out.println(termList);
    }

    @Test
    void test4() {
        String text1 = "我爱中国";
        String text2 = "我喜欢中国";

        double similar = TextSimilarity.similar(text1, text2);
        System.out.println(similar);

        List<Term> termList1 = StandardTokenizer.segment(text1);
        List<Term> termList2 = StandardTokenizer.segment(text2);

        double similarity = calculateSimilarity(termList1, termList2);
        System.out.println(similarity);
    }

    private static double calculateSimilarity(List<Term> termList1, List<Term> termList2) {
        int count = 0;
        for (Term term1 : termList1) {
            for (Term term2 : termList2) {
                if (term1.word.equals(term2.word)) {
                    count++;
                    break;
                }
            }
        }
        return (double) count / Math.max(termList1.size(), termList2.size());
    }

    private static final String TRAIN_FILE_NAME =  "MSR.TRAIN_PATH";
    private static final String MODEL_FILE_NAME = "data/test/word2vec.txt";

    @Test
    void test5() {
        try {
            WordVectorModel wordVectorModel = trainOrLoadModel();
            printNearest("上海", wordVectorModel);
            printNearest("美丽", wordVectorModel);
            printNearest("购买", wordVectorModel);
            System.out.println(wordVectorModel.similarity("上海", "广州"));
            System.out.println(wordVectorModel.analogy("日本", "自民党", "共和党"));

            // 文档向量
            DocVectorModel docVectorModel = new DocVectorModel(wordVectorModel);
            String[] documents = new String[]{
                    "山东苹果丰收",
                    "农民在江苏种水稻",
                    "奥运会女排夺冠",
                    "世界锦标赛胜出",
                    "中国足球失败",
            };

            System.out.println(docVectorModel.similarity(documents[0], documents[1]));
            System.out.println(docVectorModel.similarity(documents[0], documents[4]));

            for (int i = 0; i < documents.length; i++) {
                docVectorModel.addDocument(i, documents[i]);
            }

            printNearestDocument("体育", documents, docVectorModel);
            printNearestDocument("农业", documents, docVectorModel);
            printNearestDocument("我要看比赛", documents, docVectorModel);
            printNearestDocument("要不做饭吧", documents, docVectorModel);
        } catch (Exception e) {
            throw new RuntimeException(e);
        }
    }


    static void printNearest(String word, WordVectorModel model) {
        System.out.printf("\n                                                Word     Cosine\n------------------------------------------------------------------------\n");
        for (Map.Entry<String, Float> entry : model.nearest(word)) {
            System.out.printf("%50s\t\t%f\n", entry.getKey(), entry.getValue());
        }
    }

    static void printNearestDocument(String document, String[] documents, DocVectorModel model) {
        printHeader(document);
        for (Map.Entry<Integer, Float> entry : model.nearest(document)) {
            System.out.printf("%50s\t\t%f\n", documents[entry.getKey()], entry.getValue());
        }
    }

    private static void printHeader(String query) {
        System.out.printf("\n%50s          Cosine\n------------------------------------------------------------------------\n", query);
    }

    static WordVectorModel trainOrLoadModel() throws IOException {
        if (!IOUtil.isFileExisted(MODEL_FILE_NAME)) {
            if (!IOUtil.isFileExisted(TRAIN_FILE_NAME)) {
                System.err.println("语料不存在，请阅读文档了解语料获取与格式：https://github.com/hankcs/HanLP/wiki/word2vec");
                System.exit(1);
            }
            Word2VecTrainer trainerBuilder = new Word2VecTrainer();
            return trainerBuilder.train(TRAIN_FILE_NAME, MODEL_FILE_NAME);
        }

        return loadModel();
    }

    static WordVectorModel loadModel() throws IOException {
        return new WordVectorModel(MODEL_FILE_NAME);
    }


    @Test
    void test6() {
        // 输入文本
        String text = "李克强总理在北京大学发表演讲。";

        // 分词
        List<Term> termList = StandardTokenizer.segment(text);

        // 构建词汇表
        Map<String, Integer> vocabulary = new HashMap<>();
        int index = 0;
        for (Term term : termList) {
            if (!vocabulary.containsKey(term.word)) {
                vocabulary.put(term.word, index++);
            }
        }

        // 构建词袋向量
        int[] bowVector = new int[vocabulary.size()];
        for (Term term : termList) {
            Integer pos = vocabulary.get(term.word);
            if (pos != null) {
                bowVector[pos]++;
            }
        }

        // 输出词袋向量
        System.out.println("词汇表: " + vocabulary);
        System.out.println("词袋向量: " + Arrays.toString(bowVector));
    }

    @Test
    void test7() {
// 示例数据
        List<String> texts = Arrays.asList("我爱中国","中国是我家","大家都是好样的");

        // 分词并构建词汇表
        Map<String, Integer> vocabulary = new HashMap<>();
        int index = 0;
        List<List<Term>> tokenizedTexts = new ArrayList<>();
        for (String text : texts) {
            List<Term> terms = StandardTokenizer.segment(text);
            tokenizedTexts.add(terms);
            for (Term term : terms) {
                if (!vocabulary.containsKey(term.word)) {
                    vocabulary.put(term.word, index++);
                }
            }
        }
        // 计算TF-IDF值
        Map<String, Double> idf = calculateIDF(tokenizedTexts, vocabulary.size());
        Map<Integer, double[]> tfidfVectors = calculateTFIDF(tokenizedTexts, vocabulary, idf);
        // 输出TF-IDF向量
        for (Map.Entry<Integer, double[]> entry : tfidfVectors.entrySet()) {
            System.out.println("文档 " + entry.getKey() + ": " + Arrays.toString(entry.getValue()));
        }
    }

    /**
     * 计算IDF
     * @param tokenizedTexts 识别出的文本
     * @param vocabularySize 词典大小
     * @return
     */
    private static Map<String, Double> calculateIDF(List<List<Term>> tokenizedTexts, int vocabularySize) {
        Map<String, Integer> docFreq = new HashMap<>();
        for (List<Term> terms : tokenizedTexts) {
            // 获取每个文档的词
            Set<String> uniqueWords = new HashSet<>();
            for (Term term : terms) {
                uniqueWords.add(term.word);
            }
            // 看这个词在文档中出现的次数
            for (String word : uniqueWords) {
                docFreq.put(word, docFreq.getOrDefault(word, 0) + 1);
            }
        }

        Map<String, Double> idf = new HashMap<>();
        for (Map.Entry<String, Integer> entry : docFreq.entrySet()) {
            idf.put(entry.getKey(), Math.log((double) tokenizedTexts.size() / entry.getValue()));
        }

        return idf;
    }

    private static Map<Integer, double[]> calculateTFIDF(List<List<Term>> tokenizedTexts, Map<String, Integer> vocabulary, Map<String, Double> idf) {
        Map<Integer, double[]> tfidfVectors = new HashMap<>();

        for (int docIndex = 0; docIndex < tokenizedTexts.size(); docIndex++) {
            List<Term> terms = tokenizedTexts.get(docIndex);
            double[] tfidfVector = new double[vocabulary.size()];

            Map<String, Integer> termFreq = new HashMap<>();
            for (Term term : terms) {
                termFreq.put(term.word, termFreq.getOrDefault(term.word, 0) + 1);
            }

            for (Map.Entry<String, Integer> entry : termFreq.entrySet()) {
                String word = entry.getKey();
                Integer pos = vocabulary.get(word);
                double tf = (double) entry.getValue() / terms.size();
                double tfidf = tf * idf.get(word);
                tfidfVector[pos] = tfidf;
            }

            tfidfVectors.put(docIndex, tfidfVector);
        }

        return tfidfVectors;
    }

    @Test
    void test8() {

    }


}
