package ai.mspbots.poc.help;

import ai.mspbots.poc.db.util.INDArrayUtil;
import cn.hutool.core.util.StrUtil;
import cn.hutool.extra.spring.SpringUtil;
import org.deeplearning4j.models.embeddings.loader.WordVectorSerializer;
import org.deeplearning4j.models.embeddings.wordvectors.WordVectors;
import org.nd4j.linalg.api.ndarray.INDArray;
import org.nd4j.linalg.factory.Nd4j;

import java.io.File;

public class VectorHelper {


    private static final PocEnv pocEnv = SpringUtil.getBean(PocEnv.class);

    /**
     * 加载词向量模型
     *
     * @return WordVectors
     */
    public static WordVectors loadWordVectors() {
        WordVectors threadLocalWordVectors = WordVectorsThreadLocal.getWordVectors();
        if (threadLocalWordVectors != null) {
            return threadLocalWordVectors;
        }
        WordVectors wordVectors = WordVectorSerializer.loadStaticModel(new File(pocEnv.getGlove().getModel()));
        WordVectorsThreadLocal.setWordVectors(wordVectors);
        return wordVectors;
    }


    /**
     * 向量化为本
     *
     * @param text
     * @return
     */
    public static INDArray vectorizedText(String text) {
        return vectorizedText(text, loadWordVectors());
    }

    public static double[] toDoubleVector(String text) {
        return vectorizedText(text).toDoubleVector();
    }
    /**
     * 向量化为本
     *
     * @param text
     * @param wordVectors
     * @return
     */
    private static INDArray vectorizedText(String text, WordVectors wordVectors) {
        String replacePunctuationWithWord = replacePunctuationWithWord(text, " ");
        // 使用空格分隔符分词
        String[] words = replacePunctuationWithWord.split(" ");
        INDArray sum = Nd4j.zeros(pocEnv.getDims());
        int count = 0;

        for (String word : words) {
            // System.out.println("word = " + word);
            if (StrUtil.isEmpty(word)) {
                continue;
            }
            String lowerCaseWord = word.toLowerCase();

            if (INDArrayUtil.checkSql(lowerCaseWord) > 0) {
                INDArray retrieveINDArray = INDArrayUtil.retrieveINDArray(lowerCaseWord);
                // System.out.println("retrieveINDArray = " + retrieveINDArray);
                if (sum != null) {
                    sum.addi(retrieveINDArray); // 获取词向量并累加
                } else {
                    sum = retrieveINDArray;
                }
                count++;
                continue;
            }
            // 检查词是否在模型中
            if (wordVectors.hasWord(lowerCaseWord)) {
                INDArray wordVectorMatrix = wordVectors.getWordVectorMatrix(lowerCaseWord);
                // System.out.println("wordVectorMatrix = " + wordVectorMatrix);
                // 确保 wordVectorMatrix 是一维的
                if (wordVectorMatrix.rank() > 1) {
                    double[] doubleArray = wordVectorMatrix.toDoubleVector();
                    wordVectorMatrix = Nd4j.create(doubleArray);
                }
                // System.out.println("wordVectorMatrix = " + wordVectorMatrix);
                if (INDArrayUtil.checkSql(lowerCaseWord) == 0) {
                    INDArrayUtil.saveToDb(lowerCaseWord, wordVectorMatrix);
                }
                if (sum != null) {
                    sum.addi(wordVectorMatrix); // 获取词向量并累加
                } else {
                    sum = wordVectorMatrix;
                }

                count++; // 计数有效的词
            }
        }

        // 如果有有效词，计算平均向量
        if (count > 0) {
            sum.divi(count); // 除以有效词数量
        }
        if (sum == null) {
            sum = Nd4j.zeros(pocEnv.getDims());
        }
        return sum; // 返回文本的向量表示
    }

    public static String replacePunctuationWithWord(String input, String replacement) {
        // 定义一个正则表达式，匹配所有标点符号
        String regex = "\\p{Punct}";

        // 使用正则表达式替换标点符号
        return input.replaceAll(regex, replacement);
    }

    private static INDArray vectorizedTextByNd4j(String text, WordVectors wordVectors) {
        // 分词并计算向量（同前面的实现）
        String[] words = text.split(" ");
        INDArray sum = Nd4j.zeros(wordVectors.lookupTable().layerSize());
        int count = 0;

        for (String word : words) {
            if (wordVectors.hasWord(word)) {
                sum.addi(wordVectors.getWordVectorMatrix(word));
                count++;
            }
        }

        if (count > 0) {
            sum.divi(count);
        }
        return sum;
    }
}
