package edu.ycu.aladdin.calculator;

import org.datavec.api.util.ClassPathResource;
import org.deeplearning4j.models.embeddings.loader.WordVectorSerializer;
import org.deeplearning4j.models.paragraphvectors.ParagraphVectors;
import org.deeplearning4j.text.tokenization.tokenizer.preprocessor.CommonPreprocessor;
import org.deeplearning4j.text.tokenization.tokenizerfactory.DefaultTokenizerFactory;
import org.deeplearning4j.text.tokenization.tokenizerfactory.TokenizerFactory;
import org.nd4j.linalg.api.ndarray.INDArray;
import org.nd4j.linalg.ops.transforms.Transforms;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

/**
 * This is example code for dl4j ParagraphVectors inference use implementation.
 * 这是dl4j 图向量接口使用的示例代码。
 *
 * In this example we load previously built model, and pass raw sentences,
 * probably never seen before, to get their vector representation.
 * 在这个例子中，我们加载以前构建的模型，并传递可能从未见过的原始句子来获得它们的向量表示。
 *
 * *************************************************************************************************
 * PLEASE NOTE: THIS EXAMPLE REQUIRES DL4J/ND4J VERSIONS >= 0.6.0 TO COMPILE SUCCESSFULLY
 * *************************************************************************************************
 *
 * @author raver119@gmail.com
 */
public class ParagraphVectorsInferenceExample {

    private static final Logger log = LoggerFactory.getLogger(ParagraphVectorsInferenceExample.class);

    public static void main(String[] args) throws Exception {
        ClassPathResource resource = new ClassPathResource("/paravec/simple.pv");
        TokenizerFactory t = new DefaultTokenizerFactory();
        t.setTokenPreProcessor(new CommonPreprocessor());

        // we load externally originated model
        ParagraphVectors vectors = WordVectorSerializer.readParagraphVectors(resource.getFile());
        vectors.setTokenizerFactory(t);
        vectors.getConfiguration().setIterations(1); // please note, we set iterations to 1 here, just to speedup inference

        /*
        // here's alternative way of doing this, word2vec model can be used directly
        // PLEASE NOTE: you can't use Google-like model here, since it doesn't have any Huffman tree information shipped.

        // 这是另一种方法，可以直接使用word2vec模型

        // 请注意：您不能在此处使用Google-like 模型，因为它没有发送任何Huffman树信息。

        ParagraphVectors vectors = new ParagraphVectors.Builder()
            .useExistingWordVectors(word2vec)
            .build();
        */
        // we have to define tokenizer here, because restored model has no idea about it
        // 我们必须在这里定义tokenizer，因为恢复的模型不知道它


        INDArray inferredVectorA = vectors.inferVector("This is my world .");
        INDArray inferredVectorA2 = vectors.inferVector("This is my world .");
        INDArray inferredVectorB = vectors.inferVector("This is my way .");

        // high similarity expected here, since in underlying corpus words WAY and WORLD have really close context
        // 这里预期相似度很高，因为在基础语料库中，WAY和WORLD之间的语境非常接近
        final double similarityA_B = Transforms.cosineSim(inferredVectorA, inferredVectorB);
        log.info("Cosine similarity A/B: {}", similarityA_B);

        // equality expected here, since inference is happening for the same sentences
        // 在这里期望几乎相等，因为对于相同的句子进行推理
        final double similarityA_A2 = Transforms.cosineSim(inferredVectorA, inferredVectorA2);
        log.info("Cosine similarity A/A2: {}", similarityA_A2);
    }
}
