package com.thinvent.recommend.manager.manager.impl;

import com.thinvent.recommend.manager.dto.KbFileContentDTO;
import com.thinvent.recommend.manager.manager.TfIdfVectorManager;
import com.thinvent.recommend.manager.manager.JiebaManager;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Component;

import java.util.*;

/**
 * 基于 自定义 JiebaManager 分词 + 原生 Java 计算的 TF–IDF 实现
 * 并对每篇文档的 TF–IDF 稀疏向量做 L2 归一化，使得内积查询等同于余弦相似度。
 */
@Component
public class TfIdfVectorManagerImpl implements TfIdfVectorManager {

    private final JiebaManager jiebaManager;

    @Autowired
    public TfIdfVectorManagerImpl(JiebaManager jiebaManager) {
        this.jiebaManager = jiebaManager;
    }

    @Override
    public Map<String, Map<String, Double>> computeTfIdfVectors(List<KbFileContentDTO> docs) {
        int N = docs.size();
        // 1. 分词：收集每个文档的 token 列表
        List<List<String>> allTokens = new ArrayList<>(N);
        List<String> docIds = new ArrayList<>(N);
        for (KbFileContentDTO dto : docs) {
            docIds.add(dto.getId());
            List<String> tokens = jiebaManager.cutPrecise(dto.getContent());
            allTokens.add(tokens);
        }

        // 2. 计算 DF（文档频次）
        Map<String, Integer> df = new HashMap<>();
        for (List<String> tokens : allTokens) {
            Set<String> seen = new HashSet<>();
            for (String t : tokens) {
                if (seen.add(t)) {
                    df.merge(t, 1, Integer::sum);
                }
            }
        }

        // 3. 计算 IDF：idf = log((N+1)/(df+1)) + 1
        Map<String, Double> idf = new HashMap<>(df.size());
        for (Map.Entry<String, Integer> e : df.entrySet()) {
            double v = Math.log((double)(N + 1) / (e.getValue() + 1)) + 1.0;
            idf.put(e.getKey(), v);
        }

        // 4. 计算每篇文档的 TF–IDF 并做 L2 归一化
        Map<String, Map<String, Double>> result = new HashMap<>(N);
        for (int i = 0; i < N; i++) {
            String docId = docIds.get(i);
            List<String> tokens = allTokens.get(i);

            // 4.1 计算原始 TF
            Map<String, Integer> tf = new HashMap<>();
            for (String t : tokens) {
                tf.merge(t, 1, Integer::sum);
            }
            // 4.2 归一化 TF = 0.5 + 0.5 * tf / maxTf
            int maxTf = tf.values().stream().max(Integer::compareTo).orElse(1);

            // 4.3 逐项计算 TF–IDF
            Map<String, Double> vector = new HashMap<>(tf.size());
            for (Map.Entry<String, Integer> te : tf.entrySet()) {
                String term = te.getKey();
                double tfNorm = 0.5 + 0.5 * te.getValue() / maxTf;
                double tfidf = tfNorm * idf.getOrDefault(term, 0.0);
                vector.put(term, tfidf);
            }

            // 4.4 L2 归一化：v = v / ||v||₂
            double sumSq = 0.0;
            for (double v : vector.values()) {
                sumSq += v * v;
            }
            double norm = Math.sqrt(sumSq);
            if (norm > 0) {
                for (Map.Entry<String, Double> ve : vector.entrySet()) {
                    ve.setValue(ve.getValue() / norm);
                }
            }
            // 放入结果
            result.put(docId, vector);
        }

        return result;
    }
}
