import json
import sys

import gensim
import numpy as np
import pandas as pd
INPUT = "data/tokens_train.json"
INPUT2 = "data/tokens2_train.json"
OUTPUT = "feature_data/all_wmd_feature.csv"
sys.path.append(".")
sys.path.append("..")
from distance_mayi import *
embedding_model = gensim.models.KeyedVectors.load_word2vec_format("data/word2vec.vec")
embedding_model_norm = gensim.models.KeyedVectors.load_word2vec_format("data/word2vec_norm.vec")
# embedding_model_norm.init_sims(replace=True)

print("load embedding success")
tokens = json.load(open(INPUT))
tokens2 = json.load(open(INPUT2))
from kg import jobs

stop_word_set = set()
with open("../data/stop_word_ch.txt", encoding="utf-8") as f:
    lines = f.readlines()
    for line in lines:
        line = line.replace("\n", "")
        stop_word_set.add(line)

# def wmd_stopwords(pair):
#     s1 = pair[0]
#     s2 = pair[1]
#     s1_ = [w for w in s1 if w not in stop_word_set]
#     s2_ = [w for w in s2 if w not in stop_word_set]
#     return embedding_model.wmdistance(s1_, s2_)
#
# def wmd_norm_stopwords(pair):
#     s1 = pair[0]
#     s2 = pair[1]
#     s1_ = [w for w in s1 if w not in stop_word_set]
#     s2_ = [w for w in s2 if w not in stop_word_set]
#     return embedding_model_norm.wmdistance(s1_, s2_)

def wmd(pair):
    return embedding_model.wmdistance(pair[0], pair[1])

def wmd_norm(pair):
    return embedding_model_norm.wmdistance(pair[0], pair[1])
import time
begin_time = int(time.time()*1000)
features = jobs.map_batch_parallel(
    tokens,
    item_mapper=wmd,
    batch_size=1000,
#    n_jobs=1
)
print("wmd time per 1000 line:")
print((int(time.time()*1000)-begin_time)/len(tokens))
print("ms")
begin_time = int(time.time()*1000)
features2 = jobs.map_batch_parallel(
    tokens2,
    item_mapper=wmd,
    batch_size=1000,
#    n_jobs=1
)
print("wmd time per 1000 line:")
print((int(time.time()*1000)-begin_time)/len(tokens))
print("ms")
begin_time = int(time.time()*1000)
features3 = jobs.map_batch_parallel(
    tokens,
    item_mapper=wmd_norm,
    batch_size=1000,
#    n_jobs=1
)
print("wmd time per 1000 line:")
print((int(time.time()*1000)-begin_time)/len(tokens))
print("ms")
begin_time = int(time.time()*1000)
features4 = jobs.map_batch_parallel(
    tokens2,
    item_mapper=wmd_norm,
    batch_size=1000,
#    n_jobs=1
)
print("wmd time per 1000 line:")
print((int(time.time()*1000)-begin_time)/len(tokens))
print("ms")
features=np.reshape(features, [-1, 1])
features2=np.reshape(features2, [-1, 1])
features3=np.reshape(features3, [-1, 1])
features4=np.reshape(features4, [-1, 1])
# features5=np.reshape(features5, [-1, 1])
# features6=np.reshape(features6, [-1, 1])
# features7=np.reshape(features7, [-1, 1])
# features8=np.reshape(features8, [-1, 1])
features=np.concatenate([features,features2,features3,features4]
                        ,axis=1)
# from sklearn import preprocessing
# features = preprocessing.scale(np.array(np.nan_to_num(features)))
features_df = pd.DataFrame(features, columns=["wmd_word_level", "wmd_char_level",
                                              "wmd_word_level_norm", "wmd_char_level_norm"])
features_df.to_csv(OUTPUT,encoding="utf-8",index=False)
