import numpy as np
import pandas as pd
from gensim.models import Word2Vec, KeyedVectors
from tqdm import tqdm

seed = 1024
np.random.seed(seed)

path = '../data/'

train = pd.read_pickle(path + "train_clean.pkl")
valid = pd.read_pickle(path + "valid_clean.pkl")
dev = pd.read_pickle(path+'dev_clean.pkl')

data_all = pd.concat([train,valid,dev])
data_all.reset_index(inplace=1,drop=1)

# test['is_duplicated']=[-1]*test.shape[0]


len_train = data_all.shape[0]

# data_all = pd.concat([train,test])
model = KeyedVectors.load_word2vec_format(path + 'GoogleNews-vectors-negative300.bin', binary=True)  # C binary format

def calc_w2v_sim(row, embedder):
    a2 = row.strip().lower().split()
    vectorA = np.zeros(300)
    for w in a2:
        vectorA += embedder[w]
    vectorA /= len(a2)
    return vectorA


print('Generate pretrained w2v')

embedd_fea = []
dd = data_all['context'].values
for it in tqdm(np.arange(data_all.shape[0])):
    embedd_fea.append(calc_w2v_sim(dd[it]))


train_fea = embedd_fea[:train.shape[0]]
valid_fea = embedd_fea[train.shape[0]:(train.shape[0]+valid.shape[0])]
dev_fea = embedd_fea[(train.shape[0]+valid.shape[0]):]

pd.to_pickle(train_fea,path+'train_w2v.pkl')
pd.to_pickle(valid_fea,path+'valid_w2v.pkl')
pd.to_pickle(dev_fea,path+'dev_w2v.pkl')
