import numpy as np
from gensim.models.word2vec import Word2Vec
import os
import pickle

# 切分数据集
def split_poetry(file = "poetry_7.txt"):
    # 读取诗词
    all_data = open(file,"r",encoding="utf-8").read()
    # 逐字分割
    all_data_split = " ".join(all_data)
    # 写入文件
    with open("split.txt","w",encoding = 'utf-8') as f:
        f.write(all_data_split)
    # 返回读取的诗词数据
    return all_data.split("\n")

# 使用Word2Vec训练模型
def train_vec(split_file = "split.txt",org_file = "poetry_7.txt"):
    vec_params_file = "vec_params.pkl"
    # 数据集未切分时，读取并切分数据集
    if os.path.exists(split_file) == False:
        split_poetry()
    # 加载切分后数据集
    split_all_data = open(split_file, "r", encoding="utf-8").read().split("\n")
    # 读取元数据
    org_data = open(org_file, "r", encoding="utf-8").read().split("\n")
    # 存在预训练模型则直接读取
    if os.path.exists(vec_params_file):
        return org_data,pickle.load(open(vec_params_file,"rb"))
    # workers ：进程数
    # min_count : 文字出现次数大于等于min_count才作为数据
    # vector_size : 词向量
    model = Word2Vec(split_all_data,vector_size=107,min_count = 1,workers = 6)
    # model.syn1neg训练后向量矩阵
    # model.wv.key_to_index、model.wv.index_to_key汉字索引间对应关系
    pickle.dump((model.syn1neg,model.wv.key_to_index,model.wv.index_to_key),open(vec_params_file,"wb"))

    return org_data , (model.syn1neg,model.wv.key_to_index,model.wv.index_to_key)



if __name__ == "__main__":
    all_data ,(w1,word_2_index,index_2_word) = train_vec()
    pass