package sky.demo.ml.word2vector;

import java.io.IOException;
import java.util.Collection;

import org.deeplearning4j.models.embeddings.loader.WordVectorSerializer;
import org.deeplearning4j.models.word2vec.Word2Vec;
import org.deeplearning4j.text.sentenceiterator.BasicLineIterator;
import org.deeplearning4j.text.sentenceiterator.SentenceIterator;
import org.deeplearning4j.text.tokenization.tokenizer.TokenPreProcess;
import org.deeplearning4j.text.tokenization.tokenizerfactory.DefaultTokenizerFactory;
import org.fnlp.nlp.cn.tag.CWSTagger;
import org.fnlp.nlp.corpus.StopWords;
import org.fnlp.util.exception.LoadModelException;
import org.nd4j.linalg.api.ndarray.INDArray;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

public class FNLPWord2Vector {
	private static Logger log = LoggerFactory.getLogger(FNLPWord2Vector.class);
	private static String stopWordFile = "text/stopwords.txt";
	private static StopWords stopWords = new StopWords(stopWordFile);
	private static String modelFile = "D:/third_code/fnlp/models/seg.m";
	private static String filePath = "text/src.txt";
	private static int iterations = 1;

	public static Word2Vec testByFile(String filePath) throws LoadModelException, IOException {

		CWSTagger tag = new CWSTagger(modelFile);

		String fnlpOutfile = "out2.txt";
		tag.tagFile(filePath, fnlpOutfile, "");

		log.info("Load & Vectorize Sentences....");
		// Strip white space before and after for each line
		SentenceIterator iter = new BasicLineIterator(fnlpOutfile);
		// Split on white spaces in the line to get words

		DefaultTokenizerFactory t = new DefaultTokenizerFactory();
		t.setTokenPreProcessor(new TokenPreProcess() {

			@Override
			public String preProcess(String token) {

				if (stopWords.isStopWord(token)) {
					return null;
				}
				return token;
			}
		});

		log.info("Building model....");
		// Word2Vec vec = new
		// Word2Vec.Builder().minWordFrequency(1).iterations(1).layerSize(100).seed(42).windowSize(5)
		// .iterate(iter).tokenizerFactory(t).build();

		Word2Vec vec = new Word2Vec.Builder().minWordFrequency(1).iterations(iterations).layerSize(100).seed(42)
				.windowSize(5).tokenizerFactory(t).iterate(iter).build();

		log.info("Fitting Word2Vec model....");
		vec.fit();

		log.info("Writing word vectors to text file....");

		// Write word vectors to file
		WordVectorSerializer.writeWordVectors(vec, "wordVector_file.txt");

		// Prints out the closest 10 words to "day". An example on what to do
		// with these Word Vectors.
		log.info("Closest Words:");
		return vec;
	}

	public static Word2Vec testByTokenizer(String filePath) throws LoadModelException, IOException {
		log.info("Load & Vectorize Sentences....");
		SentenceIterator iter = new BasicLineIterator(filePath);

		// Split by fnlp in the line to get words
		String modelFile = "D:/third_code/fnlp/models/seg.m";
		FNLPTokenizerFactory t = new FNLPTokenizerFactory(modelFile);
		t.setTokenPreProcessor(new TokenPreProcess() {

			@Override
			public String preProcess(String token) {

				if (stopWords.isStopWord(token)) {
					return null;
				}
				return token;
			}
		});
		log.info("Building model....");

		Word2Vec vec = new Word2Vec.Builder().minWordFrequency(1).iterations(iterations).layerSize(100).seed(42)
				.windowSize(5).tokenizerFactory(t).iterate(iter).build();

		log.info("Fitting Word2Vec model....");
		vec.fit();

		log.info("Writing word vectors to text file....");

		// Write word vectors to file
		WordVectorSerializer.writeWordVectors(vec, "wordVector_tokenizer.txt");

		// Prints out the closest 10 words to "day". An example on what to do
		// with these Word Vectors.
		log.info("Closest Words:");
		return vec;
		// System.out.println("10 Words closest to '维基': " + lst);
	}

	/**
	 * 先分词写入文件和用fnlpTokeonizer的结果不同，为什么？
	 * @param args
	 * @throws IOException
	 * @throws LoadModelException
	 */
	public static void main(String[] args) throws IOException, LoadModelException {
		iterations = 1;
		String word = "计划";
		System.out.println("------------testByFile----------------------------------------------");
		Word2Vec vec = testByFile(filePath);

		System.out.println("------------testByTokenizer----------------------------------------------");
		Word2Vec vec1 = testByTokenizer(filePath);
		INDArray wordVectorMatrix = vec.getWordVectorMatrix(word);
		System.out.println(wordVectorMatrix);
		System.out.println(word + " 10 Words closest  : " + vec.wordsNearest(word, 10));
		System.out.println(word + " 10 Words closest  : " + vec1.wordsNearest(word, 10));
	}
}
