package org.lc.kcjxzj.textcheck;

import java.util.*;
import java.util.concurrent.ConcurrentHashMap;
import org.springframework.stereotype.Component;
import com.hankcs.hanlp.HanLP;
import com.hankcs.hanlp.dictionary.CustomDictionary;
import com.hankcs.hanlp.seg.common.Term;
import lombok.extern.slf4j.Slf4j;

/**
 * 改进的余弦相似度算法实现类
 * 
 * 改进点：
 * 1. 使用缓存优化性能
 * 2. 改进词频权重计算
 * 3. 优化同义词处理
 * 4. 使用日志替代System.out
 */
@Slf4j
@Component
public class CosineSimilarity implements SimilarityCalculator {
    
    // 词性权重映射
    private static final Map<String, Double> POS_WEIGHT = new HashMap<>();
    
    // 分词和向量缓存
    private static final Map<String, List<Term>> segmentationCache = new ConcurrentHashMap<>();
    private static final Map<String, Map<String, Double>> vectorCache = new ConcurrentHashMap<>();
    
    static {
        // 初始化词性权重
        POS_WEIGHT.put("n", 1.0);   // 普通名词
        POS_WEIGHT.put("v", 0.9);   // 普通动词
        POS_WEIGHT.put("vn", 1.0);  // 动名词
        POS_WEIGHT.put("nz", 1.2);  // 专有名词
        POS_WEIGHT.put("nt", 1.1);  // 时间词
        POS_WEIGHT.put("ns", 1.1);  // 地名词
        POS_WEIGHT.put("a", 0.8);   // 形容词
        POS_WEIGHT.put("d", 0.5);   // 副词
        POS_WEIGHT.put("p", 0.5);   // 介词
        POS_WEIGHT.put("r", 0.8);   // 代词
        POS_WEIGHT.put("m", 0.5);   // 数词
        POS_WEIGHT.put("q", 0.5);   // 量词
        POS_WEIGHT.put("c", 0.3);   // 连词
        POS_WEIGHT.put("w", 0.0);   // 标点符号
        POS_WEIGHT.put("u", 0.0);   // 助词
    }

    @Override
    public double calculate(String text1, String text2) {
        if (text1 == null || text2 == null) {
            return 0.0;
        }
        
        if (text1.isEmpty() && text2.isEmpty()) {
            return 1.0;
        }
        if (text1.isEmpty() || text2.isEmpty()) {
            return 0.0;
        }

        try {
            // 获取词频向量
            Map<String, Double> vector1 = getTermVector(text1);
            Map<String, Double> vector2 = getTermVector(text2);
            
            // 计算考虑同义词的相似度
            double similarity = calculateSimilarityWithSynonyms(vector1, vector2);
            
            // 根据文本特征调整相似度
            similarity = adjustSimilarity(similarity, text1, text2);
            
            return similarity;
        } catch (Exception e) {
            log.error("计算余弦相似度时发生错误", e);
            return calculateCharacterBased(text1, text2);
        }
    }

    /**
     * 获取文本的词频向量，优先从缓存获取
     */
    private Map<String, Double> getTermVector(String text) {
        // 尝试从缓存获取向量
        return vectorCache.computeIfAbsent(text, k -> {
            // 从缓存获取分词结果
            List<Term> terms = segmentationCache.computeIfAbsent(k, t -> HanLP.segment(t));
            return calculateTermVector(terms);
        });
    }

    /**
     * 计算词频向量
     */
    private Map<String, Double> calculateTermVector(List<Term> terms) {
        Map<String, Double> vector = new HashMap<>();
        
        for (Term term : terms) {
            String word = term.word.toLowerCase();
            String nature = term.nature.toString();
            
            if (word.isEmpty()) continue;
            
            // 获取词性权重
            double posWeight = POS_WEIGHT.getOrDefault(
                nature.length() > 1 ? nature.substring(0, 1) : nature,
                0.5
            );
            
            // 专有名词和自定义词典中的词增加权重
            if (CustomDictionary.contains(word) || "nz".equals(nature)) {
                posWeight *= 1.2;
            }
            
            // 计算长度权重
            double lengthWeight = calculateLengthWeight(word);
            
            // 计算最终权重
            double weight = posWeight * lengthWeight;
            
            // 累加权重
            vector.merge(word, weight, Double::sum);
        }
        
        // 对向量进行归一化
        normalizeVector(vector);
        
        return vector;
    }

    /**
     * 计算考虑同义词的相似度
     */
    private double calculateSimilarityWithSynonyms(Map<String, Double> vector1, Map<String, Double> vector2) {
        double dotProduct = 0.0;
        
        // 计算考虑同义词的点积
        for (Map.Entry<String, Double> entry1 : vector1.entrySet()) {
            String term1 = entry1.getKey();
            double weight1 = entry1.getValue();
            
            // 寻找最佳匹配词
            double bestMatch = 0.0;
            for (Map.Entry<String, Double> entry2 : vector2.entrySet()) {
                String term2 = entry2.getKey();
                double weight2 = entry2.getValue();
                
                if (term1.equals(term2)) {
                    bestMatch = weight2;
                    break;
                } else if (NlpConfig.areSynonyms(term1, term2)) {
                    // 根据词语相似程度调整权重
                    double synonymWeight = NlpConfig.getSynonymWeight(term1, term2);
                    bestMatch = Math.max(bestMatch, weight2 * synonymWeight);
                }
            }
            dotProduct += weight1 * bestMatch;
        }
        
        // 计算向量模长
        double norm1 = calculateNorm(vector1);
        double norm2 = calculateNorm(vector2);
        
        if (norm1 == 0.0 || norm2 == 0.0) {
            return 0.0;
        }
        
        return dotProduct / (norm1 * norm2);
    }

    /**
     * 对向量进行归一化处理
     */
    private void normalizeVector(Map<String, Double> vector) {
        double norm = calculateNorm(vector);
        if (norm > 0) {
            vector.replaceAll((k, v) -> v / norm);
        }
    }

    /**
     * 计算向量的模长
     */
    private double calculateNorm(Map<String, Double> vector) {
        return Math.sqrt(
            vector.values().stream()
                  .mapToDouble(weight -> weight * weight)
                  .sum()
        );
    }

    /**
     * 计算词长度权重
     */
    private double calculateLengthWeight(String word) {
        if (word.length() <= 1) return 0.8;
        if (word.length() == 2) return 1.0;
        if (word.length() == 3) return 1.2;
        return 1.5;
    }

    /**
     * 根据文本特征调整相似度
     */
    private double adjustSimilarity(double similarity, String text1, String text2) {
        // 计算长度比例
        int maxLength = Math.max(text1.length(), text2.length());
        int minLength = Math.min(text1.length(), text2.length());
        double lengthRatio = (double) minLength / maxLength;
        
        // 根据文本长度调整相似度
        if (maxLength < 10) {
            // 短文本需要更严格的相似度要求
            similarity = similarity * (0.8 + 0.2 * lengthRatio);
        } else if (maxLength > 100) {
            // 长文本的相似度适当提升
            similarity = similarity * (0.9 + 0.1 * lengthRatio);
        } else {
            // 中等长度文本
            similarity = similarity * (0.85 + 0.15 * lengthRatio);
        }
        
        return Math.max(0.0, Math.min(1.0, similarity));
    }

    /**
     * 基于字符的降级比较方案
     */
    private double calculateCharacterBased(String text1, String text2) {
        log.debug("使用字符级别的降级比较方案");
        
        Map<Character, Integer> freqMap1 = new HashMap<>();
        Map<Character, Integer> freqMap2 = new HashMap<>();
        
        // 构建字符频率映射
        text1.chars().mapToObj(c -> (char)c)
             .filter(c -> !Character.isWhitespace(c))
             .forEach(c -> freqMap1.merge(c, 1, Integer::sum));
        
        text2.chars().mapToObj(c -> (char)c)
             .filter(c -> !Character.isWhitespace(c))
             .forEach(c -> freqMap2.merge(c, 1, Integer::sum));
        
        // 计算点积
        double dotProduct = freqMap1.entrySet().stream()
            .filter(e -> freqMap2.containsKey(e.getKey()))
            .mapToDouble(e -> e.getValue() * freqMap2.get(e.getKey()))
            .sum();
        
        // 计算向量模长
        double norm1 = Math.sqrt(freqMap1.values().stream().mapToInt(i -> i * i).sum());
        double norm2 = Math.sqrt(freqMap2.values().stream().mapToInt(i -> i * i).sum());
        
        if (norm1 == 0.0 || norm2 == 0.0) {
            return 0.0;
        }
        
        return dotProduct / (norm1 * norm2);
    }

    /**
     * 清理指定文本的缓存
     */
    public void clearCache(String text) {
        if (text != null) {
            segmentationCache.remove(text);
            vectorCache.remove(text);
        }
    }

    /**
     * 清理所有缓存
     */
    public void clearAllCache() {
        segmentationCache.clear();
        vectorCache.clear();
    }
}