# coding: UTF-8
import os
import numpy as np
import pickle as pkl
from gensim.models import Word2Vec
from data_tran import DataTransformer
def get_wiki_word2vec(word_to_id, pretrain_dir,
                      emd_dim=300,
                      filename_trimmed_dir='../output/pre_w2c/embedding_wiki_w2c'):
    # 初始化一个随机矩阵
    embeddings = np.random.rand(len(word_to_id), emd_dim)
    w2c_model = Word2Vec.load(pretrain_dir)
    vocab = w2c_model.wv.vocab
    # for k,v in vocab.items():
    #     print(k,v)
    # print(embeddings[7])
    oov = []
    cnt = 0
    for i, (k, v) in enumerate(word_to_id.items()):
        # if i ==10:
        #     break
        if k in vocab:

            emb = w2c_model.wv[k]  # 获取emb
            # print(emb.shape,type(emb),emb.dtype)
            embeddings[v] = emb
            cnt += 1
            # print(k,v)
        else:
            oov.append(k)
    # print(embeddings[7])
    # print(oov)
    np.savez_compressed(filename_trimmed_dir, embeddings=embeddings)
    # 读取wiki w2c词向量完成，共获取 4576 个词向量
    print("读取wiki w2c词向量完成，共获取 {} 个词向量".format(cnt))


def get_wiki_cw2vec(word_to_id, pretrain_dir,
                    emd_dim=300,
                    filename_trimmed_dir='../output/pre_w2c/embedding_wiki_cw2c'
                    ):
    data_tran = DataTransformer(
        stroke2word_path=os.path.join(pretrain_dir, "processed/idx2word.pkl"),
        embedding_path=os.path.join(pretrain_dir, "embedding_300d/gensim_word_vector.bin")
    )

    print(len(data_tran.stroke2word))
    print(len(data_tran.stroke2word.keys()))
    data_tran.get_similar_words("男人")
    print("===")
    embeddings = np.random.rand(len(word_to_id), emd_dim)
    oov = []
    cnt = 0
    for i, (k, v) in enumerate(word_to_id.items()):
        if k in data_tran.embeddings_index:
            emb = data_tran.embeddings_index[k]  # 获取emb
            embeddings[v] = emb
            cnt += 1
        else:
            oov.append(k)
    #print(oov)
    np.savez_compressed(filename_trimmed_dir, embeddings=embeddings)
    # 读取wiki w2c词向量完成，共获取 4006 个词向量
    print("读取wiki w2c词向量完成，共获取 {} 个词向量 {}".format(cnt,embeddings.shape))

if __name__ == '__main__':
    vocab_dir = "../output/processed/word2index.pkl" # 训练集的词表
    wiki_w2c = "/home/stu/Documents/dataset/wiki/w2c_300d/wiki.model"
    wiki_cw2c = "/home/stu/Documents/dataset/wiki/cw2vec_300d"
    emb_dim = 300

    word_to_id = pkl.load(open(vocab_dir, 'rb'))
    #get_wiki_word2vec(
    #    word_to_id=word_to_id,
    #    pretrain_dir=wiki_w2c
    #)
    # 读取wiki w2c词向量完成，共获取 283964 个词向量
    # get_wiki_cw2vec(word_to_id=word_to_id, pretrain_dir=wiki_cw2c)
    # # 读取wiki w2c词向量完成，共获取 171934 个词向量 (403289, 300)
