package org.example.util;
import com.hankcs.hanlp.HanLP;
import com.hankcs.hanlp.seg.common.Term;
import org.springframework.stereotype.Component;

import java.util.*;

/**
 * @date 2023/11/22
 */
@Component
public class TFIDFTextComparator {

    // 提取文本中有实意的词
    public List<String> extractWordFromText(String text){
        // resultList 用于保存提取后的结果
        List<String> resultList = new ArrayList<>();

        // 当 text 为空字符串时，使用分词函数会报错，所以需要提前处理这种情况
        if(text.length() == 0){
            return resultList;
        }

        // 分词
        List<Term> termList = HanLP.segment(text);
        // 提取所有的 1.名词/n ; 2.动词/v ; 3.形容词/a ; 4.动名词/vn
        for (Term term : termList) {
//            if(term.nature == Nature.n || term.nature == Nature.v || term.nature == Nature.a
//                    || term.nature == Nature.vn){
            resultList.add(term.word);
//            }
        }

        return resultList;
    }
        // 计算单词频率 TF
        public Map<String, Double> calculateTF(List<String> words) {
            Map<String, Double> tf = new HashMap<>();
            int wordCount = words.size();

            for (String word : words) {
                tf.put(word, tf.getOrDefault(word, 0.0) + 1.0 / wordCount);
            }

            return tf;
        }

        // 计算逆文档频率 IDF
        public Map<String, Double> calculateIDF(List<List<String>> documents) {
            Map<String, Double> idf = new HashMap<>();
            int totalDocuments = documents.size();

            // 计算 DF
            Map<String, Integer> wordDocumentFrequency = new HashMap<>();
            for (List<String> words : documents) {
                Set<String> uniqueWords = new HashSet<>(words);
                for (String word : uniqueWords) {
                    wordDocumentFrequency.put(word, wordDocumentFrequency.getOrDefault(word, 0) + 1);
                }
            }

            // 计算 IDF
            for (String word : wordDocumentFrequency.keySet()) {
                double frequency = wordDocumentFrequency.get(word);
                idf.put(word, Math.log((double) totalDocuments / (1 + frequency)));
            }

            return idf;
        }

        // 计算 TF-IDF
        public Map<String, Double> calculateTFIDF(Map<String, Double> tf, Map<String, Double> idf) {
            Map<String, Double> tfidf = new HashMap<>();

            for (String word : tf.keySet()) {
                tfidf.put(word, tf.get(word) * idf.getOrDefault(word, 0.0));
            }

            return tfidf;
        }

        // 计算余弦相似度
        public double cosineSimilarity(Map<String, Double> vector1, Map<String, Double> vector2) {
            double dotProduct = 0.0;
            double magnitude1 = 0.0;
            double magnitude2 = 0.0;

            // 计算点积
            for (String word : vector1.keySet()) {
                dotProduct += vector1.get(word) * vector2.getOrDefault(word, 0.0);
                magnitude1 += Math.pow(vector1.get(word), 2);
            }

            for (String word : vector2.keySet()) {
                magnitude2 += Math.pow(vector2.get(word), 2);
            }

            // 计算余弦相似度
            if (magnitude1 == 0 || magnitude2 == 0) {
                return 0.0;
            } else {
                return dotProduct / (Math.sqrt(magnitude1) * Math.sqrt(magnitude2));
            }
        }

        public void main(String[] args) {
            // 示例用法
            String text1 = "package org.example.controller;\n" +
                    "\n" +
                    "import com.hankcs.hanlp.HanLP;\n" +
                    "import com.hankcs.hanlp.seg.common.Term;\n" +
                    "\n" +
                    "import java.util.List;\n" +
                    "\n" +
                    "public class test {\n" +
                    "    public static void test01(){\n" +
                    "        String text = \"\";\n" +
                    "        List<Term> words= HanLP.segment(text);\n" +
                    "        for (Term word : words) {\n" +
                    "            System.out.print(word.word + \",\");\n" +
                    "        }\n" +
                    "    }\n" +
                    "\n" +
                    "    public static void main(String[] args) {\n" +
                    "        test01();\n" +
                    "    }\n" +
                    "\n" +
                    "}\n";
            String text2 = "package org.example;\n" +
                    "\n" +
                    "import org.springframework.boot.SpringApplication;\n" +
                    "import org.springframework.boot.autoconfigure.SpringBootApplication;\n" +
                    "\n" +
                    "@SpringBootApplication\n" +
                    "public class Main {\n" +
                    "    public static void main(String[] args) {\n" +
                    "        SpringApplication.run(Main.class, args);\n" +
                    "\n" +
                    "    }\n" +
                    "}";
            List<String> document1 = extractWordFromText(text1);
            List<String> document2 = extractWordFromText(text2);
            List<List<String>> documents = new ArrayList<>();
            documents.add(document1);
            documents.add(document2);

            TFIDFTextComparator calculator = new TFIDFTextComparator();
            Map<String, Double> tfDocument1 = calculator.calculateTF(document1);
            Map<String, Double> tfDocument2 = calculator.calculateTF(document2);

            Map<String, Double> idf = calculator.calculateIDF(documents);

            Map<String, Double> tfidfDocument1 = calculator.calculateTFIDF(tfDocument1, idf);
            Map<String, Double> tfidfDocument2 = calculator.calculateTFIDF(tfDocument2, idf);

            double similarity = calculator.cosineSimilarity(tfidfDocument1, tfidfDocument2);
            System.out.println("Cosine Similarity between documents: " + similarity);
        }

}
