import os
import sys
import time
from datetime import timedelta
from gensim.models import Word2Vec
from pycw2vec.io.data_transformer import DataTransformer
from pycw2vec.utils.utils import pkl_read
import numpy as np


def get_train_w2c(
    word_to_id, pretrain_dir, emb_dim=200,
    filename_trimmed_dir=None
):
    # 初始化一个随机矩阵
    embeddings = np.random.rand(len(word_to_id), emb_dim)
    w2c_model = Word2Vec.load(pretrain_dir)
    vocab = w2c_model.wv.vocab
    oov = []
    cnt = 0
    for i, (k, v) in enumerate(word_to_id.items()):
        # if i ==10:
        #     break
        if k in vocab:

            emb = w2c_model.wv[k]  # 获取emb
            # print(emb.shape,type(emb),emb.dtype)
            embeddings[v] = emb
            cnt += 1
            # print(k,v)
        else:
            oov.append(k)
    np.savez_compressed(filename_trimmed_dir, embeddings=embeddings)
    print("读取wiki w2c词向量完成，共获取 {} 个有效词向量，词向量矩阵形状为：{}".format(cnt,embeddings.shape))
def get_train_cw2vec(word_to_id, pretrain_dir, emb_dim=200,
    filename_trimmed_dir=None):

    data_tran = DataTransformer(
        stroke2word_path=os.path.join(pretrain_dir, "processed/idx2word.pkl"),
        embedding_path=os.path.join(pretrain_dir, "embedding/gensim_word_vector.bin")
        )
    embeddings = np.random.rand(len(word_to_id), emb_dim)
    oov = []
    cnt = 0
    for i, (k, v) in enumerate(word_to_id.items()):
        if k in data_tran.embeddings_index:
            emb = data_tran.embeddings_index[k]  # 获取emb
            embeddings[v] = emb
            cnt += 1
        else:
            oov.append(k)
      
    np.savez_compressed(filename_trimmed_dir, embeddings=embeddings)
    print("读取cw2c词向量完成，共获取 {} 个有效词向量，词向量矩阵形状为：{}".format(cnt,embeddings.shape))
if __name__ == '__main__':
    '''提取预训练词向量'''
    # train_dir = r"C:\Users\Administrator\Desktop\李冉冉文件\和彩云\wiki.txt"
    # model_path2=r'C:\Users\Administrator\Desktop\lrrProject\w2c_300d'
    # w2c_model = Word2Vec.load(os.path.join(model_path2,'wiki.model'))
    # # vocab = w2c_model.wv.vocab
    # # vocab_w2c = w2c_model.wv.index2word
    # # print(vocab)
    # tmp_vec = w2c_model.wv['男人']
    # print(tmp_vec)
    pass

    data_path = "/home/stu/Documents/dataset/sougo/cs_result/"
    vocab = data_path + "word2index.pkl"
    word_to_id = pkl_read(vocab)

    sougoCA_BASE = '/home/stu/Documents/dataset/sougo/sougoCA_full/'
    sougoCA_cw = sougoCA_BASE + "sougoCA_cw200d/"
    sougoCA_w = sougoCA_BASE + "sougoCA_200d/sougoCA.model"
    save_path = "/home/stu/Documents/dataset/sougo/sougoCA_full/extart_w2c_cw2c/"

    print("字符数量：{}".format(len(word_to_id)))
    get_train_w2c(word_to_id=word_to_id,pretrain_dir=sougoCA_w,emb_dim=200,
    filename_trimmed_dir=save_path+"cs_sougoCA_200d")
    get_train_cw2vec(word_to_id=word_to_id,pretrain_dir=sougoCA_cw,emb_dim=200,
    filename_trimmed_dir=save_path+"cs_sougoCA_cw200d")





