import pickle
import numpy as np

vocab, embeddings = [], []

embedpath = r'D:\00_code\007_papars\NER-joint\joint\Recurrent_Interaction_Network_EMNLP2020-master\Recurrent_Interaction_Network_EMNLP2020-master\glove.6B\glove.6B.100d.txt'
outpath = r'D:\00_code\007_papars\NER-joint\joint\Recurrent_Interaction_Network_EMNLP2020-master\Recurrent_Interaction_Network_EMNLP2020-master\data'


with open(embedpath, 'rt', encoding='utf8') as fi:
    full_content = fi.read().strip().split('\n')

for i in range(len(full_content)):
    i_word = full_content[i].split(' ')[0]
    i_embeddings = [float(val) for val in full_content[i].split(' ')[1:]]
    vocab.append(i_word)
    embeddings.append(i_embeddings)

vocab_npa = np.array(vocab)
embs_npa = np.array(embeddings)

#insert '<pad>' and '<unk>' tokens at start of vocab_npa.
vocab_npa = np.insert(vocab_npa, 0, '<PAD>')
vocab_npa = np.insert(vocab_npa, 1, '<UNK>')
print(vocab_npa[:10])

pad_emb_npa = np.zeros((1,embs_npa.shape[1]))   #embedding for '<pad>' token.
unk_emb_npa = np.mean(embs_npa,axis=0,keepdims=True)    #embedding for '<unk>' token.

#insert embeddings for pad and unk tokens at top of embs_npa.
embs_npa = np.vstack((pad_emb_npa,unk_emb_npa,embs_npa))
print(embs_npa.shape)

with open(outpath+'/vocab.pkl', 'wb') as f:
    pickle.dump(vocab_npa, f)

# with open(outpath+'vocab_npa.npy','wb') as f:
#     np.save(f,vocab_npa)

with open(outpath+'/embedding.npy','wb') as f:
    np.save(f,embs_npa)