package com.textcheck;

import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;

import com.hankcs.hanlp.HanLP;
import com.hankcs.hanlp.dictionary.CustomDictionary;
import com.hankcs.hanlp.seg.common.Term;

/**
 * 余弦相似度算法实现类
 */
public class CosineSimilarity implements SimilarityCalculator {
    
    // 词性权重映射
    private static final Map<String, Double> POS_WEIGHT = new HashMap<>();
    
    static {
        // 初始化词性权重
        POS_WEIGHT.put("n", 1.0);   // 普通名词
        POS_WEIGHT.put("v", 0.9);   // 普通动词
        POS_WEIGHT.put("vn", 1.0);  // 动名词
        POS_WEIGHT.put("nz", 1.2);  // 专有名词
        POS_WEIGHT.put("a", 0.8);   // 形容词
        POS_WEIGHT.put("d", 0.5);   // 副词
        POS_WEIGHT.put("p", 0.5);   // 介词
        POS_WEIGHT.put("r", 0.8);   // 代词
        POS_WEIGHT.put("m", 0.5);   // 数词
        POS_WEIGHT.put("q", 0.5);   // 量词
        POS_WEIGHT.put("c", 0.3);   // 连词
        POS_WEIGHT.put("w", 0.0);   // 标点符号
        POS_WEIGHT.put("u", 0.0);   // 助词
    }

    @Override
    public double calculate(String text1, String text2) {
        if (text1 == null || text2 == null) {
            return 0.0;
        }
        
        if (text1.isEmpty() && text2.isEmpty()) {
            return 1.0;
        }
        if (text1.isEmpty() || text2.isEmpty()) {
            return 0.0;
        }

        try {
            // 调试信息
            System.out.println("文本1: " + text1);
            System.out.println("文本2: " + text2);

            // 分词并构建词频向量
            List<Term> terms1 = HanLP.segment(text1);
            List<Term> terms2 = HanLP.segment(text2);

            // 调试信息
            System.out.println("分词结果1: " + terms1);
            System.out.println("分词结果2: " + terms2);

            // 构建词频向量
            Map<String, Double> vector1 = getWeightedTermFrequencyVector(terms1);
            Map<String, Double> vector2 = getWeightedTermFrequencyVector(terms2);

            // 调试信息
            System.out.println("词频向量1: " + vector1);
            System.out.println("词频向量2: " + vector2);

            // 计算考虑同义词的相似度
            double similarity = calculateSimilarityWithSynonyms(vector1, vector2);
            
            // 调试信息
            System.out.println("原始相似度: " + similarity);

            // 根据文本特征调整相似度
            similarity = adjustSimilarity(similarity, text1, text2);
            
            System.out.println("调整后相似度: " + similarity);
            
            return similarity;
        } catch (Exception e) {
            System.err.println("计算过程出错: " + e.getMessage());
            return calculateCharacterBased(text1, text2);
        }
    }

    private Map<String, Double> getWeightedTermFrequencyVector(List<Term> terms) {
        Map<String, Double> vector = new HashMap<>();
        
        for (Term term : terms) {
            String word = term.word.trim().toLowerCase();
            String nature = term.nature.toString();
            
            if (word.isEmpty()) continue;
            
            // 获取词性权重
            double posWeight = POS_WEIGHT.getOrDefault(
                nature.length() > 1 ? nature.substring(0, 1) : nature,
                0.5
            );
            
            // 专有名词和自定义词典中的词增加权重
            if (CustomDictionary.contains(word) || "nz".equals(nature)) {
                posWeight *= 1.2;
            }
            
            // 长词加权
            double lengthWeight = calculateLengthWeight(word);
            
            // 计算最终权重
            double weight = posWeight * lengthWeight;
            
            // 调试信息
            System.out.println(String.format("词: %s, 词性: %s, 权重: %.2f", word, nature, weight));
            
            vector.merge(word, weight, Double::sum);
        }
        
        return vector;
    }

    private double calculateLengthWeight(String word) {
        if (word.length() <= 1) return 0.8;
        if (word.length() == 2) return 1.0;
        if (word.length() == 3) return 1.2;
        return 1.5; // 长度大于3的词
    }

    private double calculateSimilarityWithSynonyms(Map<String, Double> vector1, Map<String, Double> vector2) {
        double dotProduct = 0.0;
        
        // 获取所有唯一词
        Set<String> allTerms = new HashSet<>(vector1.keySet());
        allTerms.addAll(vector2.keySet());
        
        // 计算考虑同义词的点积
        for (String term1 : vector1.keySet()) {
            double weight1 = vector1.get(term1);
            
            // 寻找最佳匹配的词（包括同义词）
            double bestMatch = 0.0;
            for (String term2 : vector2.keySet()) {
                if (term1.equals(term2)) {
                    bestMatch = vector2.get(term2);
                    break;
                } else if (NlpConfig.areSynonyms(term1, term2)) {
                    bestMatch = Math.max(bestMatch, vector2.get(term2) * 0.9); // 同义词略微降权
                }
            }
            dotProduct += weight1 * bestMatch;
        }

        // 计算向量模长
        double norm1 = calculateNorm(vector1);
        double norm2 = calculateNorm(vector2);

        if (norm1 == 0.0 || norm2 == 0.0) {
            return 0.0;
        }

        return dotProduct / (norm1 * norm2);
    }

    private double adjustSimilarity(double similarity, String text1, String text2) {
        // 计算长度比例
        int maxLength = Math.max(text1.length(), text2.length());
        int minLength = Math.min(text1.length(), text2.length());
        double lengthRatio = (double) minLength / maxLength;
        
        // 对相似度进行调整
        if (maxLength < 10) {
            // 短文本需要更严格的相似度要求
            similarity = similarity * (0.8 + 0.2 * lengthRatio);
        } else {
            // 长文本的相似度适当提升
            similarity = similarity * (0.9 + 0.1 * lengthRatio);
        }
        
        // 确保相似度在[0,1]范围内
        return Math.max(0.0, Math.min(1.0, similarity));
    }

    private double calculateNorm(Map<String, Double> vector) {
        return Math.sqrt(
            vector.values().stream()
                  .mapToDouble(weight -> weight * weight)
                  .sum()
        );
    }

    private double calculateCharacterBased(String text1, String text2) {
        System.out.println("使用字符级别的降级比较方案");
        Map<Character, Integer> vector1 = new HashMap<>();
        Map<Character, Integer> vector2 = new HashMap<>();
        
        // 构建字符频率向量
        for (char c : text1.toCharArray()) {
            if (Character.isWhitespace(c)) continue;
            vector1.merge(c, 1, Integer::sum);
        }
        
        for (char c : text2.toCharArray()) {
            if (Character.isWhitespace(c)) continue;
            vector2.merge(c, 1, Integer::sum);
        }
        
        double dotProduct = 0.0;
        Set<Character> allChars = new HashSet<>(vector1.keySet());
        allChars.addAll(vector2.keySet());
        
        for (char c : allChars) {
            int freq1 = vector1.getOrDefault(c, 0);
            int freq2 = vector2.getOrDefault(c, 0);
            dotProduct += freq1 * freq2;
        }
        
        double norm1 = Math.sqrt(vector1.values().stream().mapToInt(i -> i * i).sum());
        double norm2 = Math.sqrt(vector2.values().stream().mapToInt(i -> i * i).sum());
        
        if (norm1 == 0.0 || norm2 == 0.0) {
            return 0.0;
        }
        
        return dotProduct / (norm1 * norm2);
    }
}