package com.quiz.bis.util.text;

import java.util.*;
import java.util.stream.Collectors;

public class CosineSimilarity {
    /**
     * 计算余弦相似度（不考虑同义词）
     * @param words1
     * @param words2
     * @return
     */
    public static double computeCosineSimilarity(List<String> words1, List<String> words2) {
        Map<String, Integer> freq1 = computeWordFrequency(words1);
        Map<String, Integer> freq2 = computeWordFrequency(words2);

        Set<String> allWords = new HashSet<>();
        allWords.addAll(freq1.keySet());
        allWords.addAll(freq2.keySet());

        double dotProduct = 0;
        double magnitude1 = 0;
        double magnitude2 = 0;

        for (String word : allWords) {
            int count1 = freq1.getOrDefault(word, 0);
            int count2 = freq2.getOrDefault(word, 0);

            dotProduct += count1 * count2;
            magnitude1 += count1 * count1;
            magnitude2 += count2 * count2;
        }

        magnitude1 = Math.sqrt(magnitude1);
        magnitude2 = Math.sqrt(magnitude2);

        if (magnitude1 == 0 || magnitude2 == 0) {
            return 0;
        }

        return dotProduct / (magnitude1 * magnitude2);
    }

    private static Map<String, Integer> computeWordFrequency(List<String> words) {
        return words.stream()
                .collect(Collectors.toMap(
                        word -> word,
                        word -> 1,
                        Integer::sum
                ));
    }










    /**
     * 文本相似度计算算法（考虑同义词和词序，同时考虑它们的权重）
     */
    public static double computeEnhancedSimilarity(List<String> words1, List<String> words2,
                                                   Map<String, Double> keywordWeights,
                                                   Map<String, Set<String>> synonyms) {
        // 1. 预处理：词干提取、停用词过滤、同义词替换
        List<String> processedWords1 = preprocessWords(words1, synonyms);
        List<String> processedWords2 = preprocessWords(words2, synonyms);

        // 2. 计算词频，考虑关键词权重
        Map<String, Double> weightedFreq1 = computeWeightedWordFrequency(processedWords1, keywordWeights);
        Map<String, Double> weightedFreq2 = computeWeightedWordFrequency(processedWords2, keywordWeights);

        // 3. 合并词汇表
        Set<String> allWords = new HashSet<>();
        allWords.addAll(weightedFreq1.keySet());
        allWords.addAll(weightedFreq2.keySet());

        // 4. 计算改进的余弦相似度
        double dotProduct = 0;
        double magnitude1 = 0;
        double magnitude2 = 0;

        for (String word : allWords) {
            double count1 = weightedFreq1.getOrDefault(word, 0.0);
            double count2 = weightedFreq2.getOrDefault(word, 0.0);

            // 考虑词序相似性（添加位置权重）
            double positionalWeight = computePositionalWeight(word, words1, words2);
            double adjustedCount1 = count1 * (1 + 0.2 * positionalWeight);
            double adjustedCount2 = count2 * (1 + 0.2 * positionalWeight);

            dotProduct += adjustedCount1 * adjustedCount2;
            magnitude1 += adjustedCount1 * adjustedCount1;
            magnitude2 += adjustedCount2 * adjustedCount2;
        }

        magnitude1 = Math.sqrt(magnitude1);
        magnitude2 = Math.sqrt(magnitude2);

        if (magnitude1 == 0 || magnitude2 == 0) {
            return 0;
        }

        // 5. 添加语义相似度调整
        double cosineSimilarity = dotProduct / (magnitude1 * magnitude2);
        double semanticSimilarity = computeSemanticSimilarity(words1, words2);

        // 6. 综合相似度（余弦相似度70% + 语义相似度30%）
        return 0.7 * cosineSimilarity + 0.3 * semanticSimilarity;
    }

    /**
     * 预处理单词：同义词替换、词干提取等
     */
    private static List<String> preprocessWords(List<String> words, Map<String, Set<String>> synonyms) {
        List<String> processed = new ArrayList<>();
        for (String word : words) {
            // 转换为小写
            String lowerWord = word.toLowerCase();

            // 同义词替换为标准词
            String standardWord = findStandardWord(lowerWord, synonyms);
            processed.add(standardWord);

            // 这里可以添加词干提取等处理
        }
        return processed;
    }

    /**
     * 查找同义词对应的标准词
     */
    private static String findStandardWord(String word, Map<String, Set<String>> synonyms) {
        if (synonyms == null) {return word;}

        for (Map.Entry<String, Set<String>> entry : synonyms.entrySet()) {
            if (entry.getValue().contains(word)) {
                return entry.getKey();
            }
        }
        return word;
    }

    /**
     * 计算带权重的词频
     */
    private static Map<String, Double> computeWeightedWordFrequency(List<String> words,
                                                                    Map<String, Double> keywordWeights) {
        Map<String, Double> freq = new HashMap<>();
        for (String word : words) {
            double weight = keywordWeights.getOrDefault(word, 1.0); // 默认权重1.0
            freq.put(word, freq.getOrDefault(word, 0.0) + weight);
        }
        return freq;
    }

    /**
     * 计算词的位置权重（考虑词在答案中的位置相似性）
     */
    private static double computePositionalWeight(String word, List<String> words1, List<String> words2) {
        int position1 = words1.indexOf(word);
        int position2 = words2.indexOf(word);

        if (position1 == -1 || position2 == -1) {
            return 0;
        }

        // 计算位置差异的相对权重
        int maxLength = Math.max(words1.size(), words2.size());
        double positionDiff = Math.abs(position1 - position2);
        return 1.0 - (positionDiff / maxLength);
    }

    /**
     * 计算语义相似度（可以使用预训练的词向量模型）
     */
    private static double computeSemanticSimilarity(List<String> words1, List<String> words2) {
        // 这里可以使用Word2Vec、GloVe等词向量模型计算语义相似度
        // 简化实现：使用同义词和简单语义关系

        // 实际实现中应该替换为真正的词向量计算
        int commonWords = 0;
        Set<String> set1 = new HashSet<>(words1);
        for (String word : words2) {
            if (set1.contains(word)) {
                commonWords++;
            }
        }

        if (words1.isEmpty() || words2.isEmpty()) {
            return 0;
        }

        return (double) commonWords / Math.max(words1.size(), words2.size());
    }
}
