package com.xust.oa.utils;

import com.hankcs.hanlp.HanLP;
import com.hankcs.hanlp.seg.common.Term;

import java.util.*;

public class SentenceSimilarityNew {
    // 计算余弦相似度
    public static double cosineSimilarity(Map<String, Integer> vec1, Map<String, Integer> vec2) {
        Set<String> words = new HashSet<>();
        words.addAll(vec1.keySet());
        words.addAll(vec2.keySet());
        double dotProduct = 0.0;
        double norm1 = 0.0;
        double norm2 = 0.0;
        for (String word : words) {
            int count1 = vec1.getOrDefault(word, 0);
            int count2 = vec2.getOrDefault(word, 0);

            dotProduct += count1 * count2;
            norm1 += Math.pow(count1, 2);
            norm2 += Math.pow(count2, 2);
        }

        if (norm1 == 0 || norm2 == 0) {
            return 0.0;
        }
        return dotProduct / (Math.sqrt(norm1) * Math.sqrt(norm2));
    }

    // 使用 HanLP 分词
    public static Map<String, Integer> getWordFrequencyVector(String sentence) {
        Map<String, Integer> wordFrequency = new HashMap<>();
        List<Term> terms = HanLP.segment(sentence);
        for (Term term : terms) {
            String word = term.word;
            wordFrequency.put(word, wordFrequency.getOrDefault(word, 0) + 1);
        }
        return wordFrequency;
    }
    public static void main(String[] args) {
        // 输入句子
        String sentence1 = "我喜欢用Java编程";
        String sentence2 = "我对Java比较感兴趣";
        // 将句子转换为词频向量
        Map<String, Integer> vec1 = getWordFrequencyVector(sentence1);
        Map<String, Integer> vec2 = getWordFrequencyVector(sentence2);
        // 计算余弦相似度
        double similarity = cosineSimilarity(vec1, vec2);
        System.out.println("Cosine similarity: " + similarity);
    }
}