import json
import gensim
import numpy as np
tokens = json.load(open("data/tokens_train.json"))
tokens_ = np.array(tokens).reshape(-1).tolist()
from gensim.models import word2vec
model = word2vec.Word2Vec(tokens_,size=100,min_count=1,iter=10)
word_vectors = model.wv
model.wv.save_word2vec_format("temp.vec",binary=False)
embedding_model = gensim.models.KeyedVectors.load_word2vec_format("temp.vec",binary=False)
#embedding_model = gensim.models.KeyedVectors.load_word2vec_format("C:\\Users\\gt\\Desktop\\rokid\\wordvec\\fasttext.vec",binary=False)
all_count = 0
oov_count = 0
all_count_set = set()
oov_count_set = set()
for word_list in np.array(tokens).reshape(-1):
    for word in word_list:
        all_count+=1
        all_count_set.add(word)
        try:
            temp = embedding_model[word]
        except:
            oov_count+=1
            oov_count_set.add(word)
            continue
print(all_count)
print(oov_count)
print(oov_count/all_count)
print(len(all_count_set))
print(len(oov_count_set))
print(embedding_model.most_similar(["手机"]))