package cn.zust.itcost.utils;

import cn.zust.itcost.config.TextAnalysisConfig;
import com.huaban.analysis.jieba.JiebaSegmenter;
import com.huaban.analysis.jieba.SegToken;
import lombok.extern.slf4j.Slf4j;
import org.springframework.stereotype.Component;

import java.util.*;
import java.util.stream.Collectors;

@Slf4j
@Component
public class TextAnalyzer {
    
    private final JiebaSegmenter segmenter;
    
    public TextAnalyzer() {
        this.segmenter = new JiebaSegmenter();
    }
    
    /**
     * 对文本进行分词
     * @param text 待分词的文本
     * @return 分词结果列表
     */
    public List<String> segment(String text) {
        if (text == null || text.isEmpty()) {
            return Collections.emptyList();
        }
        
        List<SegToken> tokens = segmenter.process(text, JiebaSegmenter.SegMode.INDEX);
        List<String> words = new ArrayList<>();
        
        for (SegToken token : tokens) {
            String word = token.word.trim();
            // 过滤停用词和单个字符
            if (!TextAnalysisConfig.STOP_WORDS.contains(word) && 
                word.length() >= TextAnalysisConfig.MIN_WORD_LENGTH && 
                word.length() <= TextAnalysisConfig.MAX_WORD_LENGTH) {
                words.add(word);
            }
        }
        
        return words;
    }
    
    /**
     * 提取关键词
     * @param text 文本内容
     * @param topN 返回前N个关键词
     * @return 关键词及其权重
     */
    public Map<String, Double> extractKeywords(String text, int topN) {
        List<String> words = segment(text);
        
        // 计算词频
        Map<String, Integer> wordFreq = new HashMap<>();
        for (String word : words) {
            wordFreq.put(word, wordFreq.getOrDefault(word, 0) + 1);
        }
        
        // 计算TF-IDF权重（改进版）
        Map<String, Double> wordWeights = new HashMap<>();
        int totalWords = words.size();
        
        for (Map.Entry<String, Integer> entry : wordFreq.entrySet()) {
            String word = entry.getKey();
            int freq = entry.getValue();
            
            // 计算TF
            double tf = (double) freq / totalWords;
            
            // 计算IDF（使用对数函数平滑）
            double idf = Math.log(1 + (double) totalWords / freq);
            
            // 考虑词长度权重（长词通常更有意义）
            double lengthWeight = 1.0 + (word.length() - 2) * 0.1;
            
            // 最终权重 = TF * IDF * 长度权重
            wordWeights.put(word, tf * idf * lengthWeight);
        }
        
        // 按权重排序并返回前N个
        return wordWeights.entrySet().stream()
                .sorted(Map.Entry.<String, Double>comparingByValue().reversed())
                .limit(topN)
                .collect(Collectors.toMap(
                        Map.Entry::getKey,
                        Map.Entry::getValue,
                        (e1, e2) -> e1,
                        LinkedHashMap::new
                ));
    }
    
    /**
     * 分析文本中的技能词
     * @param text 文本内容
     * @return 技能词列表
     */
    public List<String> extractSkills(String text) {
        List<String> words = segment(text);
        return words.stream()
                .filter(TextAnalysisConfig.SKILL_KEYWORDS::contains)
                .distinct()
                .collect(Collectors.toList());
    }
    
    /**
     * 分析文本中的教育信息
     * @param text 文本内容
     * @return 教育信息关键词
     */
    public Map<String, List<String>> extractEducationInfo(String text) {
        List<String> words = segment(text);
        Map<String, List<String>> result = new HashMap<>();
        
        // 提取学校名称（改进版）
        List<String> schools = new ArrayList<>();
        for (int i = 0; i < words.size(); i++) {
            if (TextAnalysisConfig.SCHOOL_KEYWORDS.contains(words.get(i)) && i > 0) {
                // 尝试组合前面的词
                StringBuilder schoolName = new StringBuilder();
                int j = i - 1;
                while (j >= 0 && j >= i - TextAnalysisConfig.MAX_COMBINE_WORDS) {
                    schoolName.insert(0, words.get(j));
                    j--;
                }
                schools.add(schoolName.toString() + words.get(i));
            }
        }
        result.put("schools", schools);
        
        // 提取学位信息（改进版）
        List<String> degrees = new ArrayList<>();
        for (int i = 0; i < words.size(); i++) {
            if (TextAnalysisConfig.DEGREE_KEYWORDS.contains(words.get(i))) {
                // 尝试组合前后的词
                StringBuilder degreeInfo = new StringBuilder();
                if (i > 0) {
                    degreeInfo.append(words.get(i - 1));
                }
                degreeInfo.append(words.get(i));
                if (i < words.size() - 1) {
                    degreeInfo.append(words.get(i + 1));
                }
                degrees.add(degreeInfo.toString());
            }
        }
        result.put("degrees", degrees);
        
        // 提取专业信息（改进版）
        List<String> majors = new ArrayList<>();
        for (int i = 0; i < words.size(); i++) {
            if (TextAnalysisConfig.MAJOR_KEYWORDS.contains(words.get(i)) && i > 0) {
                // 尝试组合前面的词
                StringBuilder majorName = new StringBuilder();
                int j = i - 1;
                while (j >= 0 && j >= i - TextAnalysisConfig.MAX_COMBINE_WORDS) {
                    majorName.insert(0, words.get(j));
                    j--;
                }
                majors.add(majorName.toString());
            }
        }
        result.put("majors", majors);
        
        return result;
    }
    
    /**
     * 分析文本中的工作经历
     * @param text 文本内容
     * @return 工作经历信息
     */
    public Map<String, List<String>> extractWorkExperience(String text) {
        List<String> words = segment(text);
        Map<String, List<String>> result = new HashMap<>();
        
        // 提取公司名称（改进版）
        List<String> companies = new ArrayList<>();
        for (int i = 0; i < words.size(); i++) {
            if (TextAnalysisConfig.COMPANY_KEYWORDS.contains(words.get(i)) && i > 0) {
                // 尝试组合前面的词
                StringBuilder companyName = new StringBuilder();
                int j = i - 1;
                while (j >= 0 && j >= i - TextAnalysisConfig.MAX_COMBINE_WORDS) {
                    companyName.insert(0, words.get(j));
                    j--;
                }
                companies.add(companyName.toString() + words.get(i));
            }
        }
        result.put("companies", companies);
        
        // 提取职位信息（改进版）
        List<String> positions = new ArrayList<>();
        for (int i = 0; i < words.size(); i++) {
            if (TextAnalysisConfig.POSITION_KEYWORDS.contains(words.get(i))) {
                // 尝试组合前后的词
                StringBuilder positionInfo = new StringBuilder();
                if (i > 0) {
                    positionInfo.append(words.get(i - 1));
                }
                positionInfo.append(words.get(i));
                if (i < words.size() - 1) {
                    positionInfo.append(words.get(i + 1));
                }
                positions.add(positionInfo.toString());
            }
        }
        result.put("positions", positions);
        
        return result;
    }
} 