package org.qengine.utils.Deduplicator;

import org.apache.commons.math3.linear.ArrayRealVector;
import org.apache.commons.math3.linear.RealVector;

import java.util.*;
/*
 * 将处理过的文本提取TF-IDF向量
 */
public class Vectorizer {
    // 文档集合
    private List<String> documents;
    // 词汇表
    private Set<String> vocabulary;
    // 词频 (TF)
    private List<Map<String, Integer>> tfVectors;
    // 逆文档频率 （IDF）
    private Map<String, Double> idfMap;

    public Vectorizer(List<String> documents){
        this.documents = documents;
        this.vocabulary = buildVocabulary(documents);
        this.tfVectors = culculateTF(documents);
        this.idfMap = culculateIDF(documents);
    }

    //构建词汇表
    private Set<String> buildVocabulary(List<String> documents) {
        Set<String> vocabulary = new HashSet<>();
        for (String document : documents) {
            vocabulary.addAll(Arrays.asList(document.split(" ")));
        }
        return vocabulary;
    }
    //计算词频
    private List<Map<String, Integer>> culculateTF(List<String> documents) {
        List<Map<String, Integer>> tfVectors = new ArrayList<>();
        for (String document : documents) {
            Map<String, Integer> tfMap = new HashMap<>();
            String[] words = document.split(" ");
            for (String word : words) {
                tfMap.put(word, tfMap.getOrDefault(word, 0) + 1);
            }
            tfVectors.add(tfMap);
        }
        return tfVectors;
    }
    //计算逆文档频率
    private Map<String, Double> culculateIDF(List<String> documents) {
        Map<String, Double> idfMap = new HashMap<>();
        int totalDocs = documents.size();
        for (String word : vocabulary) {
            int docCount = 0;
            for (String doc : documents) {
                if (doc.contains(word)) {
                    docCount++;
                }
            }
            idfMap.put(word, Math.log((double) totalDocs / (docCount + 1))); //+1 是为了防止分母为0
        }
        return idfMap;
    }


    //计算TF-IDF向量
    public RealVector transform(String document) {
        Map<String, Double> tfMap = new HashMap<>();
        String[] words = document.split(" ");
        for (String word : words) {
            tfMap.put(word, tfMap.getOrDefault(word, 0.0) + 1.0);
        }

        RealVector vector = new ArrayRealVector(vocabulary.size());
        int index = 0;
        for (String word : vocabulary) {
            double tf = tfMap.getOrDefault(word, 0.0);
            double idf = idfMap.getOrDefault(word, 0.0);
            vector.setEntry(index++, tf * idf);
        }
        return vector;
    }
}
