package com.tensquare.ai.service;

import org.deeplearning4j.models.embeddings.loader.WordVectorSerializer;
import org.deeplearning4j.models.word2vec.Word2Vec;
import org.deeplearning4j.text.sentenceiterator.LineSentenceIterator;
import org.deeplearning4j.text.sentenceiterator.SentenceIterator;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.stereotype.Service;
import util.FileUtil;

import java.io.File;
import java.io.IOException;
import java.util.List;

/**
 * 构建词向量
 */
@Service
public class Word2VecService {

    @Value("${ai.wordLib}")
    private String wordLib; //合并后的分词语料库文件

    @Value("${ai.savePath}")
    private String savePath;//合并前的分词语料库目录

    @Value("${ai.vecModel}")
    private String vecModel;//词向量文件

    /**
     * 合并分词预料库
     */
    public void merge(){
        //1.先获取某个目录下所有文件名称
        List<String> files = FileUtil.getFiles(savePath);

        try {
            FileUtil.merge(wordLib,files);
        } catch (IOException e) {
            e.printStackTrace();
        }
    }


    /**
     * 构建词向量
     */
    public void build(){

        SentenceIterator sentenceIterator = new LineSentenceIterator(new File(wordLib));

        /**
         * minWordFrequency: 出现的最小词频率
         * iterations: 学习次数
         * layerSize: 词向量数量
         * seed: 随机数
         * windowSize: 词的距离
         */

        //1.创建Word2Vec对象
        Word2Vec word2Vec = new Word2Vec.Builder()
                .minWordFrequency(5)
                .iterations(1)
                .layerSize(100)
                .seed(40)
                .windowSize(5)
                .iterate(sentenceIterator)
                .build();

        //2.转换
        word2Vec.fit();

        //3.把转换的结果保存成文件
        try {
            WordVectorSerializer.writeWordVectors(word2Vec,vecModel);
        } catch (IOException e) {
            e.printStackTrace();
        }
    }
}
