package com.eqs.enterprisequestionnaire.service;

import com.eqs.enterprisequestionnaire.model.enums.QuestionEnum;
import com.eqs.enterprisequestionnaire.model.pojo.*;
import com.huaban.analysis.jieba.SegToken;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.http.*;
import org.springframework.stereotype.Service;
import com.huaban.analysis.jieba.JiebaSegmenter;

import java.util.*;
import java.util.stream.Collectors;

import static com.eqs.enterprisequestionnaire.model.enums.QuestionEnum.*;

@Service
public class WordCloudService {
    @Autowired
    private HistoryService historyService;
    @Autowired
    private QuestionService questionService;
    @Autowired
    private UserService userService;
    @Autowired
    private AnswerService answerService;

    private static final String API_URL = "https://www.hanlp.com/hanlp/v21/redirect";
    private static final String AUTH = "66866611eaf65b7b456a67df";

    private static final Set<String> STOPWORDS = new HashSet<>(Arrays.asList(
            "的", "是", "了", "我", "在", "不", "有", "他", "这", "也", "就", "到", "和", "说", "要", "去",
            "你", "会", "吗", "什么", "那", "我们", "可以", "她", "他们", "但", "与", "一个", "而", "人",
            "好", "都", "很", "还是", "没有", "并", "被", "着", "自己", "这个", "下", "一个", "中", "上",
            "还", "为", "对", "以", "来", "这", "想","，","。","想起"
    ));

    //词云算法
    public Map<String, Integer> generateWordFrequencies(Integer questionId) {
        Question question = questionService.selectByQuestionId(questionId);
        if (!isTextType(question.getType())) {
            return null;
        }
        List<Answer> answers = historyService.getListAnswerByQuestionId(questionId);
        List<String> answerContents = answers.stream().map(Answer::getContent).collect(Collectors.toList());

        Map<String, Integer> wordCountMap = new HashMap<>();

        try {
            // 组合所有回答内容
            String text = String.join(" ", answerContents);

            // 分词
            JiebaSegmenter segmenter = new JiebaSegmenter();
            List<SegToken> tokens = segmenter.process(text, JiebaSegmenter.SegMode.INDEX);

            // 统计词频
            for (SegToken token : tokens) {
                String word = token.word;
                if (!STOPWORDS.contains(word) && word.trim().length() > 0) {
                    wordCountMap.put(word, wordCountMap.getOrDefault(word, 0) + 1);
                }
            }

            // 排序并限制词云大小
            return wordCountMap.entrySet()
                    .stream()
                    .sorted(Map.Entry.<String, Integer>comparingByValue().reversed())
                    .limit(20)
                    .collect(Collectors.toMap(
                            Map.Entry::getKey,
                            Map.Entry::getValue,
                            (e1, e2) -> e1,
                            LinkedHashMap::new
                    ));

        } catch (Exception e) {
            e.printStackTrace();
        }

        return wordCountMap;
    }

    public Map<Boolean, Map<String, Integer>> generateGenderSpecificWordCloud(Integer questionId) {
        Question question = questionService.selectByQuestionId(questionId);
        if (!isTextType(question.getType())) {
            return null;
        }
        // 获取该问题的所有回答
        List<Answer> answers = historyService.getListAnswerByQuestionId(questionId);

        // 按性别分组
        Map<Boolean, List<String>> genderedAnswers = answers.stream()
                .collect(Collectors.groupingBy(
                        a -> {
                            User user = answerService.selectUser(a);
                            return user.getGender(); // 假设 User 对象有 getGender 方法，返回 true 表示男性，false 表示女性
                        },
                        Collectors.mapping(Answer::getContent, Collectors.toList())
                ));

        Map<Boolean, Map<String, Integer>> genderedWordClouds = new HashMap<>();

        try {
            // 分别处理男性和女性的回答
            for (Map.Entry<Boolean, List<String>> entry : genderedAnswers.entrySet()) {
                String combinedText = String.join(" ", entry.getValue());

                // 分词
                JiebaSegmenter segmenter = new JiebaSegmenter();
                List<SegToken> tokens = segmenter.process(combinedText, JiebaSegmenter.SegMode.INDEX);

                // 统计词频
                Map<String, Integer> wordCountMap = new HashMap<>();
                for (SegToken token : tokens) {
                    String word = token.word;
                    if (!STOPWORDS.contains(word) && word.trim().length() > 0) {
                        wordCountMap.put(word, wordCountMap.getOrDefault(word, 0) + 1);
                    }
                }

                // 排序并限制词云大小
                Map<String, Integer> sortedWordCountMap = wordCountMap.entrySet()
                        .stream()
                        .sorted(Map.Entry.<String, Integer>comparingByValue().reversed())
                        .limit(20)
                        .collect(Collectors.toMap(
                                Map.Entry::getKey,
                                Map.Entry::getValue,
                                (e1, e2) -> e1,
                                LinkedHashMap::new
                        ));

                genderedWordClouds.put(entry.getKey(), sortedWordCountMap);
            }
        } catch (Exception e) {
            e.printStackTrace();
        }

        return genderedWordClouds;
    }

    public Map<Option,Map<Boolean,Integer>> getGenderDifference(Integer questionId){

        // 获取该问题的所有选项
        List<Option> options = historyService.getListOptionByQuestionId(questionId);
        //根据选项获得差异化分析Map
        Map<Option,Map<Boolean,Integer>> result = new HashMap<>();

        for (Option option : options){
            //初始化选项差异化
            Map<Boolean,Integer> difference =new HashMap<>();
            List<Answer> answers =answerService.selectAllAnswer(option.getId());

            for(Answer a :answers) {
                //该答案关联的用户
                User user = answerService.selectUser(a);

                difference.merge(user.getGender(), 1,
                        (v1,v2)->v1+1);
            }
            result.put(option,difference);
        }
        return result;
    }

    //计算每个文本问题的平均文本量
    public Double getAverageWordOfQuestion(Integer questionId){
        // 获取问题信息
        Question question = questionService.getById(questionId);
        // 检查问题类型是否是简答题或文本域
        if (!isTextType(question.getType())) {
            // 不是简答题或文本域，退出
            return null;
        }

        // 获取该问题的所有答案
        List<Answer> answers = historyService.getListAnswerByQuestionId(questionId);
        // 计算所有答案的平均字数
        double averageWordCount = answers.stream()
                .mapToInt(answer -> answer.getContent().length())
                .average()
                .orElse(0.0);
        // 返回平均字数
        return  averageWordCount;
    }

}
