import json
import gensim
import numpy as np
tokens_word = json.load(open("data/tokens_train.json"))
tokens_word_flat = np.array(tokens_word).reshape(-1).tolist()

tokens_char = json.load(open("data/tokens2_train.json"))

from gensim.models import word2vec
model = word2vec.Word2Vec(tokens_word_flat, size=100, min_count=1, iter=10)
word_vectors = model.wv
model.wv.save_word2vec_format("temp.vec",binary=False)
embedding_model = gensim.models.KeyedVectors.load_word2vec_format("temp.vec",binary=False)
embedding_model = gensim.models.KeyedVectors.load_word2vec_format("C:\\Users\\gt\\Desktop\\rokid\\wordvec\\fasttext.vec",binary=False)
embedding_model_norm = gensim.models.KeyedVectors.load_word2vec_format("C:\\Users\\gt\\Desktop\\rokid\\wordvec\\fasttext.vec",binary=False)
embedding_model_norm.init_sims(replace=True)

all_count = 0
oov_count = 0
all_count_set = set()
oov_count_set = set()
word2vector = {}
word2vector_norm = {}
for word_list in np.array(tokens_word).reshape(-1):
    for word in word_list:
        all_count+=1
        all_count_set.add(word)
        try:
            word2vector[word] = embedding_model[word]
            word2vector_norm[word] = embedding_model_norm[word]
        except:
            word2vector[word] = [0] * 100
            word2vector_norm[word] = [0] * 100
            oov_count+=1
            oov_count_set.add(word)
            continue

print(all_count)
print(oov_count)
print(oov_count/all_count)
print(len(all_count_set))
print(len(oov_count_set))
print()

for word_list in np.array(tokens_char).reshape(-1):
    for word in word_list:
        all_count+=1
        all_count_set.add(word)
        try:
            word2vector[word] = embedding_model[word]
            word2vector_norm[word] = embedding_model_norm[word]
        except:
            word2vector[word] = [0] * 100
            word2vector_norm[word] = [0] * 100
            oov_count+=1
            oov_count_set.add(word)
            continue

print(all_count)
print(oov_count)
print(oov_count/all_count)
print(len(all_count_set))
print(len(oov_count_set))
print(embedding_model.most_similar(["手机"]))

f = open("data/word2vec.vec",mode="w",encoding="utf-8")
f.write(str(len(word2vector)))
f.write(" ")
f.write("100")
f.write("\n")
for k in word2vector:
    f.write(k)
    f.write(" ")
    temp = ""
    for item in word2vector[k]:
        temp += str(item)
        temp += " "
    temp = temp.strip()
    f.write(temp)
    f.write("\n")

f = open("data/word2vec_norm.vec",mode="w",encoding="utf-8")
f.write(str(len(word2vector_norm)))
f.write(" ")
f.write("100")
f.write("\n")
for k in word2vector_norm:
    f.write(k)
    f.write(" ")
    temp = ""
    for item in word2vector_norm[k]:
        temp += str(item)
        temp += " "
    temp=temp.strip()
    f.write(temp)
    f.write("\n")