package com.ruoyi.web.util;

import com.huaban.analysis.jieba.JiebaSegmenter;
import com.huaban.analysis.jieba.SegToken;
import org.springframework.stereotype.Component;

import java.util.*;
import java.util.stream.Collectors;

/**
 * 中文文本分析工具类
 * 基于jieba分词库实现中文评论关键词提取和词云图数据生成
 * 
 * @author ruoyi
 */
@Component
public class ChineseTextAnalyzer {

    private final JiebaSegmenter segmenter;
    
    // 停用词列表
    private final Set<String> stopWords;
    
    public ChineseTextAnalyzer() {
        this.segmenter = new JiebaSegmenter();
        this.stopWords = initStopWords();
    }
    
    /**
     * 初始化停用词列表
     */
    private Set<String> initStopWords() {
        Set<String> stopWords = new HashSet<>();
        // 添加常见的停用词
        stopWords.addAll(Arrays.asList(
            "的", "了", "在", "是", "我", "有", "和", "就", "不", "人", "都", "一", "一个", "上", "也", "很", "到", "说", "要", "去", "你", "会", "着", "没有", "看", "好", "自己", "这", "那", "他", "她", "它", "们", "这个", "那个", "这些", "那些", "什么", "怎么", "为什么", "哪里", "哪个", "多少", "几个", "还是", "或者", "但是", "然后", "因为", "所以", "如果", "虽然", "可是", "不过", "而且", "并且", "以及", "以后", "以前", "现在", "今天", "明天", "昨天", "时候", "地方", "东西", "问题", "方法", "办法", "情况", "结果", "开始", "最后", "已经", "正在", "可能", "应该", "需要", "希望", "觉得", "认为", "知道", "听说", "看见", "发现", "变成", "成为", "进行", "继续", "完成", "实现", "得到", "拿到", "收到", "买到", "卖出", "用来", "作为", "关于", "对于", "由于", "根据", "按照", "通过", "经过", "超过", "达到", "来到", "回到", "走到", "跑到", "飞到", "游到", "爬到", "跳到", "坐到", "站到", "躺到", "睡到", "醒来", "起来", "下来", "上来", "进来", "出来", "回来", "过来", "带来", "拿来", "送来", "给", "让", "使", "叫", "请", "告诉", "教", "学", "练", "做", "干", "搞", "弄", "整", "修", "建", "造", "创", "制", "产", "生", "长", "大", "小", "高", "低", "多", "少", "新", "旧", "好", "坏", "美", "丑", "快", "慢", "早", "晚", "前", "后", "左", "右", "东", "南", "西", "北", "中", "内", "外", "里", "面", "边", "旁", "近", "远", "这里", "那里", "哪里", "到处", "处处", "各处", "别处", "某处", "此处", "彼处", "何处", "无处", "有处", "一处", "两处", "三处", "四处", "五处", "六处", "七处", "八处", "九处", "十处", "百处", "千处", "万处", "亿处", "兆处"
        ));
        return stopWords;
    }
    
    /**
     * 提取文本关键词
     * 
     * @param text 待分析的文本
     * @param topN 返回前N个关键词
     * @return 关键词列表，按频率降序排列
     */
    public List<Map<String, Object>> extractKeywords(String text, int topN) {
        if (text == null || text.trim().isEmpty()) {
            return new ArrayList<>();
        }
        
        // 分词
        List<SegToken> tokens = segmenter.process(text, JiebaSegmenter.SegMode.INDEX);
        
        // 统计词频，过滤停用词和单字符
        Map<String, Integer> wordCount = new HashMap<>();
        for (SegToken token : tokens) {
            String word = token.word.trim();
            if (word.length() > 1 && !stopWords.contains(word) && isValidWord(word)) {
                wordCount.put(word, wordCount.getOrDefault(word, 0) + 1);
            }
        }
        
        // 按频率排序并返回前N个
        return wordCount.entrySet().stream()
                .sorted(Map.Entry.<String, Integer>comparingByValue().reversed())
                .limit(topN)
                .map(entry -> {
                    Map<String, Object> item = new HashMap<>();
                    item.put("word", entry.getKey());
                    item.put("count", entry.getValue());
                    item.put("weight", entry.getValue() * 10); // 权重用于词云图显示
                    return item;
                })
                .collect(Collectors.toList());
    }
    
    /**
     * 生成词云图数据
     * 
     * @param texts 文本列表
     * @param maxWords 最大词数
     * @return 词云图数据
     */
    public List<Map<String, Object>> generateWordCloudData(List<String> texts, int maxWords) {
        if (texts == null || texts.isEmpty()) {
            return new ArrayList<>();
        }
        
        // 合并所有文本
        String combinedText = String.join(" ", texts);
        
        // 提取关键词
        List<Map<String, Object>> keywords = extractKeywords(combinedText, maxWords);
        
        // 为词云图添加颜色和大小信息
        Random random = new Random();
        String[] colors = {"#FF6B6B", "#4ECDC4", "#45B7D1", "#96CEB4", "#FFEAA7", "#DDA0DD", "#98D8C8", "#F7DC6F", "#BB8FCE", "#85C1E9"};
        
        for (int i = 0; i < keywords.size(); i++) {
            Map<String, Object> item = keywords.get(i);
            item.put("color", colors[i % colors.length]);
            item.put("fontSize", Math.max(12, 30 - i * 2)); // 根据排名设置字体大小
        }
        
        return keywords;
    }
    
    /**
     * 分析评论情感倾向
     * 
     * @param text 评论文本
     * @return 情感分数 (-1到1之间，负数表示负面，正数表示正面)
     */
    public double analyzeSentiment(String text) {
        if (text == null || text.trim().isEmpty()) {
            return 0.0;
        }
        
        // 简单的情感词典方法
        Set<String> positiveWords = new HashSet<>(Arrays.asList(
            "好", "棒", "赞", "优秀", "满意", "喜欢", "推荐", "值得", "不错", "完美", "惊喜", "超值", "实用", "方便", "快速", "及时", "专业", "贴心", "周到", "细致", "认真", "负责", "热情", "友好", "耐心", "高效", "优质", "精美", "漂亮", "美观", "舒适", "温馨", "温暖", "感动", "开心", "愉快", "轻松", "放心", "安心", "省心", "贴心", "用心", "走心", "暖心", "开心", "舒心", "称心", "顺心", "合心", "得心应手"
        ));
        
        Set<String> negativeWords = new HashSet<>(Arrays.asList(
            "差", "烂", "垃圾", "失望", "不满", "讨厌", "后悔", "坑", "骗", "假", "劣质", "粗糙", "难用", "麻烦", "复杂", "困难", "慢", "延迟", "不及时", "不专业", "冷漠", "态度差", "不耐烦", "敷衍", "马虎", "粗心", "不负责", "不认真", "难看", "丑", "不舒服", "不方便", "不实用", "浪费", "贵", "不值", "坑钱", "黑心", "无良", "欺骗", "虚假", "误导", "夸大", "吹牛", "忽悠", "套路", "陷阱"
        ));
        
        List<SegToken> tokens = segmenter.process(text, JiebaSegmenter.SegMode.INDEX);
        
        int positiveCount = 0;
        int negativeCount = 0;
        
        for (SegToken token : tokens) {
            String word = token.word.trim();
            if (positiveWords.contains(word)) {
                positiveCount++;
            } else if (negativeWords.contains(word)) {
                negativeCount++;
            }
        }
        
        int totalSentimentWords = positiveCount + negativeCount;
        if (totalSentimentWords == 0) {
            return 0.0;
        }
        
        return (double) (positiveCount - negativeCount) / totalSentimentWords;
    }
    
    /**
     * 检查是否为有效词汇
     */
    private boolean isValidWord(String word) {
        // 过滤纯数字、纯英文、特殊符号等
        return word.matches(".*[\u4e00-\u9fa5].*"); // 包含中文字符
    }
    
    /**
     * 批量分析评论关键词
     * 
     * @param comments 评论列表
     * @param topN 每条评论提取的关键词数量
     * @return 所有关键词的统计结果
     */
    public Map<String, Object> batchAnalyzeComments(List<String> comments, int topN) {
        Map<String, Object> result = new HashMap<>();
        
        if (comments == null || comments.isEmpty()) {
            result.put("keywords", new ArrayList<>());
            result.put("wordCloud", new ArrayList<>());
            result.put("sentimentStats", new HashMap<>());
            return result;
        }
        
        // 提取关键词
        List<Map<String, Object>> keywords = extractKeywords(String.join(" ", comments), topN);
        result.put("keywords", keywords);
        
        // 生成词云数据
        List<Map<String, Object>> wordCloud = generateWordCloudData(comments, 50);
        result.put("wordCloud", wordCloud);
        
        // 情感分析统计
        Map<String, Object> sentimentStats = new HashMap<>();
        int positiveCount = 0;
        int negativeCount = 0;
        int neutralCount = 0;
        
        for (String comment : comments) {
            double sentiment = analyzeSentiment(comment);
            if (sentiment > 0.1) {
                positiveCount++;
            } else if (sentiment < -0.1) {
                negativeCount++;
            } else {
                neutralCount++;
            }
        }
        
        sentimentStats.put("positive", positiveCount);
        sentimentStats.put("negative", negativeCount);
        sentimentStats.put("neutral", neutralCount);
        sentimentStats.put("total", comments.size());
        sentimentStats.put("positiveRate", comments.size() > 0 ? (double) positiveCount / comments.size() * 100 : 0);
        sentimentStats.put("negativeRate", comments.size() > 0 ? (double) negativeCount / comments.size() * 100 : 0);
        
        result.put("sentimentStats", sentimentStats);
        
        return result;
    }
}