# -*- coding: utf-8 -*-
# @file: word2vec.py
# @author: ZhuJiahui1991
# @time: 2018/1/28 22:31
# @version: v1.0

from gensim.models import Word2Vec, KeyedVectors
from gensim.models.word2vec import LineSentence
from utils.file_reader import read_to_2d_list


def gensim_word2vec_train(sentences, model_filename):
    # sg=1是skip-gram算法，对低频词敏感 默认sg=0为CBOW算法
    model = Word2Vec(sentences, sg=1, size=100, window=5, min_count=5, negative=3, sample=0.001, hs=1, workers=3)
    model.wv.save_word2vec_format(model_filename, binary=False)


def gensim_word2vec_file_train(train_filename, model_filename):
    # sg=1是skip-gram算法，对低频词敏感 默认sg=0为CBOW算法
    model = Word2Vec(LineSentence(train_filename), sg=1, size=50, window=5, min_count=2, negative=3, sample=0.001,
                     hs=1, workers=3)
    model.wv.save_word2vec_format(model_filename, binary=False)


def gensim_word2vec_train_test():
    read_filename = '../data/sogou-segment.txt'
    write_filename = '../data/sogou-word2vec.txt'
    sentences = read_to_2d_list(read_filename, ' ')
    gensim_word2vec_train(sentences, write_filename)


def gensim_word2vec_file_train_test():
    read_filename = '../data/sogou-segment.txt'
    write_filename = '../data/sogou-word2vec.txt'
    gensim_word2vec_file_train(read_filename, write_filename)


def gensim_word2vec_baike_train_test():
    read_filename = 'D:/Share/Dataset/filtered-processed-baike-segment.txt'
    write_filename = 'D:/Share/Dataset/embeddings/baike-embeddings-64.txt'
    gensim_word2vec_file_train(read_filename, write_filename)

def gensim_word2vec_all_train_test():
    read_filename = 'D:/Share/Dataset/all-segment.txt'
    write_filename = 'D:/Share/Dataset/embeddings/all-embeddings-50.txt'
    gensim_word2vec_file_train(read_filename, write_filename)


def gensim_word2vec_load_test():
    model_filename = '../data/sogou-word2vec.txt'
    word_vectors = KeyedVectors.load_word2vec_format(model_filename, binary=False)
    print(word_vectors.most_similar([u"武汉"], topn=5))
    print(word_vectors.similarity(u"北京", u"上海"))
    print(word_vectors.most_similar([u'北京', u'纽约'], [u'中国'], topn=5))


if __name__ == "__main__":
    # test_gensim_word2vec_train()
    # test_gensim_word2vec_file_train()
    # test_gensim_word2vec_load()
    # gensim_word2vec_baike_train_test()
    gensim_word2vec_all_train_test()