package ai.mspbots.poc.es.service;

import edu.stanford.nlp.ling.IndexedWord;
import edu.stanford.nlp.pipeline.Annotation;
import edu.stanford.nlp.pipeline.StanfordCoreNLP;
import edu.stanford.nlp.util.CoreMap;
import org.deeplearning4j.models.embeddings.loader.WordVectorSerializer;
import org.deeplearning4j.models.embeddings.wordvectors.WordVectors;
import org.nd4j.linalg.api.ndarray.INDArray;
import org.nd4j.linalg.factory.Nd4j;
import edu.stanford.nlp.ling.CoreAnnotations;
import edu.stanford.nlp.ling.CoreLabel;
import edu.stanford.nlp.pipeline.Annotation;
import edu.stanford.nlp.pipeline.StanfordCoreNLP;
import edu.stanford.nlp.util.CoreMap;

import java.io.File;
import java.util.List;

public class TextVectorization {

    private WordVectors wordVectors;
    private StanfordCoreNLP pipeline;

    public TextVectorization(String modelPath) throws Exception {
        // 加载 Word2Vec 模型
        wordVectors = WordVectorSerializer.readWord2VecModel(new File(modelPath));
        
        // 设置 Stanford NLP 管道
        pipeline = new StanfordCoreNLP("tokenize,ssplit,pos,lemma");
    }

    public INDArray vectorize(String text) {
        String[] words = processText(text);
        INDArray sum = Nd4j.zeros(wordVectors.lookupTable().layerSize());
        int count = 0;

        for (String word : words) {
            if (wordVectors.hasWord(word)) {
                sum.addi(wordVectors.getWordVectorMatrix(word));
                count++;
            }
        }

        if (count > 0) {
            sum.divi(count); // 计算平均向量
        }
        return sum;
    }

    private String[] processText(String text) {
        // 使用 Stanford NLP 进行文本处理
        Annotation annotation = new Annotation(text);
        pipeline.annotate(annotation);
        List<CoreMap> sentences = annotation.get(CoreAnnotations.SentencesAnnotation.class);
        
        StringBuilder processedText = new StringBuilder();
        for (CoreMap sentence : sentences) {
            for (CoreLabel token : sentence.get(CoreAnnotations.TokensAnnotation.class)) {
                processedText.append(token.word()).append(" ");
            }
        }
        return processedText.toString().trim().split(" ");
    }
}