# -*- coding: utf-8 -*-
'''
Created on 2017年3月20日

@author: ZhuJiahui
'''
import os
import time
import numpy as np
from global_info.global_nlp import GlobalNLP
from word_embedding.word2vec import word2vec_skipgram

if __name__ == '__main__':
    
    start = time.clock()    
    now_directory = os.getcwd()
    root_directory = os.path.dirname(now_directory) + '/'
    read_filename = root_directory + u'dataset/20newsgroup/segment_pos_train.txt'
    write_filename = root_directory + u'dataset/20newsgroup/20ng_word_embedding.txt'
    
    text_corpus = []
    with open(read_filename, 'r') as f:
        for each_line in f:
            split_line = each_line.strip().split()
            text_corpus.append([x.split(GlobalNLP.EN_WORD_INNER_DELIMITER)[0] for x in split_line])

    word_embeddings, text_vocabulary, text_words = word2vec_skipgram(text_corpus, 200, 512, 3)
    
    np.savetxt(write_filename, word_embeddings)
    
    test_words = ['america', 'computer', 'graph']
    for word in test_words:
        if not (word in text_vocabulary):
            continue
        word_vec = word_embeddings[text_vocabulary.get(word),:]
        sim_mat = np.matmul(word_embeddings, word_vec)
        neareast = (-sim_mat).argsort()[:11]
        neareast_words = [text_words[id] for id in neareast]
        print('与词<{0}>最相似的前10个词为：'.format(word) + ','.join(neareast_words))
                