#!/usr/bin/env python3
# coding: utf-8
# File: train.py.py
# Author: lhy<lhy_in_blcu@126.com,https://huangyong.github.io>
# Date: 18-10-26

import os
import gensim
from gensim.models import word2vec
from gensim import utils
from sklearn.decomposition import PCA
import numpy as np

import logging
logging.basicConfig(format='%(asctime)s:%(levelname)s:%(message)s', level=logging.INFO)

class TrainVector:
    def __init__(self):
        # 训练语料所在目录
        self.token_filepath = 'word_data/token_train.txt'
        # self.token_filepath = './/data//train2.txt'

        # 向量文件所在目录
        self.token_embedding = 'word_data/new_token_vec_300.bin'

        #向量大小设置
        self.token_size = 300

    '''基于gensimx训练字符向量,拼音向量,词性向量'''
    def train_vector(self, train_path, embedding_path, embedding_size):
        '''
        训练字向量
        :param train_path: 训练语料文件的路径
        :param embedding_path: 字向量的存档路径
        :param embedding_size: 字向量的维数
        :return:
        '''
        sentences = word2vec.Text8Corpus(train_path)  # 加载分词语料
        model = word2vec.Word2Vec(sentences, size=embedding_size, window=5, min_count=5, iter=5)  # 训练skip-gram模型,默认window=5
		# https://radimrehurek.com/gensim/models/word2vec.html#gensim.models.word2vec.Word2Vec
        model.wv.save_word2vec_format(embedding_path, binary=False)

    def train(self):
        #训练汉字字向量
        self.train_vector(self.token_filepath, self.token_embedding, self.token_size)

    def test_model(self, embedding_path):
        '''
        对训练好的模型进行测试
        :param embedding_path: 字向量的存档路径
        :return:
        '''
        model = gensim.models.KeyedVectors.load_word2vec_format(embedding_path, binary=False)
        while (1):
            wd = input('enter an word to search:')
            result = model.most_similar(wd)
            for res in result:
                print(res)
        return


if __name__ == '__main__':
    handler = TrainVector()
    handler.train()
