'''
使用gensim库进行适配于本项目的预训练词向量(.npz)与词表(.pkl)的构建
'''

import gensim
from gensim.models import Word2Vec
import numpy as np

import pickle as pkl

import jieba
import re

#######参数区########

# 你可以将DATAPATH替换为自己的数据集文件路径

DATAPATH = "./in.txt"

# 最小词频和负采样
MINCOUNT = 5
NEGATIVE = 8

# 未知字符号和padding符号
UNK = "<UNK>"
PAD = "<PAD>"

# 词表的输出路径与文件名
vocab_path = "./vocab.pkl"

# 预训练词向量输出路径与文件名
embedding_path = "./our_embedding.npz"

########代码区########


f = open(DATAPATH, "r", encoding="utf-8")

lines = []
for line in f:
    line = re.sub('[^\u4e00-\u9fa5]+', '', line) # 过滤非中文字符
    temp = jieba.lcut(line) # 分词，得到列表
    words = []
    for i in temp:
        if len(i) > 0:
            words.append(i)
    if len(words) > 0:
        lines.append(words)

line = []
for i in range(0, MINCOUNT):
    line.append(UNK)
    line.append(PAD)

lines.append(line)

model = Word2Vec(lines, vector_size=300, window=5, min_count=MINCOUNT, epochs=9, negative=NEGATIVE) # sg=0,使用CBOW算法;sg=1,使用Skip-gram算法

vocab = model.wv.index_to_key

vocab_dic = {}
for i, word in enumerate(vocab):
    vocab_dic[word] = i

model = np.array(model.wv.vectors)

pkl.dump(vocab_dic, open(vocab_path, 'wb'))
np.savez(embedding_path,embeddings = model)