# -*- coding: utf-8 -*-
# @Time : 2021/4/19 22:44
# @Author : husongjiang
# @File : word2vec.py


import gensim
from torch import nn
import numpy as np


EMBEDDING_DIM = 128

VECTOR_DIR = 'D:\model\wordembedding\Tencent_AILab_ChineseEmbedding_Min.txt' # 词向量模型文件


#w2v_model = gensim.models.KeyedVectors.load_word2vec_format(VECTOR_DIR, binary=True)
w2v_model = gensim.models.KeyedVectors.load(VECTOR_DIR)
#embedding_matrix = np.zeros((len(word_index) + 1, EMBEDDING_DIM))
#for word, i in word_index.items():
    #if word in w2v_model:
       # embedding_matrix[i] = np.asarray(w2v_model[word],
                                       #  dtype='float32')
#embedding_layer = Embedding(len(word_index) + 1,
                           # EMBEDDING_DIM,
                           # weights=[embedding_matrix],
                            #input_length=MAX_SEQUENCE_LENGTH,
                            #trainable=False)

