package com.sky.service;

import com.hankcs.hanlp.HanLP;
import com.hankcs.hanlp.seg.common.Term;
import com.huaban.analysis.jieba.JiebaSegmenter;
import com.huaban.analysis.jieba.SegToken;
import org.springframework.stereotype.Service;

import java.util.*;
import java.util.stream.Collectors;

@Service
public class NlpService {
    
    private final JiebaSegmenter segmenter = new JiebaSegmenter();
    
    /**
     * 中文分词
     */
    public List<String> segment(String text) {
        List<SegToken> tokens = segmenter.process(text, JiebaSegmenter.SegMode.SEARCH);
        return tokens.stream()
                .map(token -> token.word)
                .collect(Collectors.toList());
    }
    
    /**
     * 提取关键词
     */
    public List<String> extractKeywords(String text, int size) {
        return HanLP.extractKeyword(text, size);
    }
    
    /**
     * 文本摘要
     */
    public String extractSummary(String text, int size) {
        return HanLP.extractSummary(text, size).get(0);
    }
    
    /**
     * 计算文本相似度
     */
    public double similarity(String text1, String text2) {
        List<String> words1 = segment(text1);
        List<String> words2 = segment(text2);
        
        // 计算词向量相似度
        return calculateCosineSimilarity(words1, words2);
    }
    
    /**
     * 意图识别
     */
    public String recognizeIntent(String text) {
        // 使用关键词匹配识别意图
        List<String> keywords = extractKeywords(text, 3);
        
        if (keywords.contains("价格") || keywords.contains("多少钱")) {
            return "price_inquiry";
        } else if (keywords.contains("配送") || keywords.contains("送达")) {
            return "delivery_inquiry";
        } else if (keywords.contains("优惠") || keywords.contains("折扣")) {
            return "discount_inquiry";
        } else {
            return "general_inquiry";
        }
    }
    
    /**
     * 计算余弦相似度
     */
    private double calculateCosineSimilarity(List<String> words1, List<String> words2) {
        // 构建词频向量
        Map<String, Integer> vector1 = buildWordVector(words1);
        Map<String, Integer> vector2 = buildWordVector(words2);
        
        // 计算点积
        double dotProduct = 0.0;
        for (String word : vector1.keySet()) {
            if (vector2.containsKey(word)) {
                dotProduct += vector1.get(word) * vector2.get(word);
            }
        }
        
        // 计算向量长度
        double norm1 = calculateVectorNorm(vector1);
        double norm2 = calculateVectorNorm(vector2);
        
        // 计算余弦相似度
        return dotProduct / (norm1 * norm2);
    }
    
    private Map<String, Integer> buildWordVector(List<String> words) {
        Map<String, Integer> vector = new HashMap<>();
        for (String word : words) {
            vector.put(word, vector.getOrDefault(word, 0) + 1);
        }
        return vector;
    }
    
    private double calculateVectorNorm(Map<String, Integer> vector) {
        double sum = 0.0;
        for (int value : vector.values()) {
            sum += value * value;
        }
        return Math.sqrt(sum);
    }
} 