import json
import sys
import warnings
warnings.filterwarnings('ignore')
import gensim
import numpy as np
import pandas as pd
INPUT = "data/tokens_train.json"
INPUT2 = "data/tokens2_train.json"
OUTPUT = "feature_data/all_word2vec_feature.csv"
sys.path.append(".")
sys.path.append("..")
from distance_mayi import *
embedding_model = gensim.models.KeyedVectors.load_word2vec_format("data/word2vec.vec")
embedding_model_norm = gensim.models.KeyedVectors.load_word2vec_format("data/word2vec_norm.vec")
# embedding_model_norm.init_sims(replace=True)
stop_word_set = set()
# with open("../data/stop_word_ch.txt", encoding="utf-8") as f:
#     lines = f.readlines()
#     for line in lines:
#         line = line.replace("\n", "")
#         stop_word_set.add(line)


print("load embedding success")
tokens = json.load(open(INPUT))
tokens2 = json.load(open(INPUT2))


word_count_map = {}
for pair in tokens:
    for w in pair[0]:
        if w not in word_count_map:
            word_count_map[w] = 1
        else:
            word_count_map[w] += 1
    for w in pair[1]:
        if w not in word_count_map:
            word_count_map[w] = 1
        else:
            word_count_map[w] += 1

for word in word_count_map:
    if word_count_map[word]>5000:
        stop_word_set.add(word)

char_count_map = {}
for pair in tokens2:
    for w in pair[0]:
        if w not in char_count_map:
            char_count_map[w] = 1
        else:
            char_count_map[w] += 1
    for w in pair[1]:
        if w not in char_count_map:
            char_count_map[w] = 1
        else:
            char_count_map[w] += 1

for word in char_count_map:
    if char_count_map[word]>5000:
        stop_word_set.add(word)
stop_word_set=set()
from kg import jobs


def sent2vec_norm(word_list):
    word_list_ = [w for w in word_list if not w in stop_word_set]
    M = []
    for w in word_list_:
        try:
            M.append(embedding_model_norm[w])
        except:
            continue
    if len(M)==0:
        return 0
    M = np.array(M)
    v = M.sum(axis=0)
    return v / np.sqrt((v ** 2).sum())

def sent2vec(word_list):
    word_list_ = [w for w in word_list if not w in stop_word_set]
    M = []
    for w in word_list_:
        try:
            M.append(embedding_model[w])
        except:
            continue
    if len(M)==0:
        return 0
    M = np.array(M)
    v = M.sum(axis=0)
    return v / np.sqrt((v ** 2).sum())

def get_phrase_embedding_distances(pair):
    q1_vectors = [embedding_model[token] for token in pair[0] if token in embedding_model]
    q2_vectors = [embedding_model[token] for token in pair[1] if token in embedding_model]
    q1_final = sent2vec(pair[0])
    q2_final = sent2vec(pair[1])

    if len(q1_vectors) == 0:
        return [0]*(len(temp_columns)*3)
    if len(q2_vectors) == 0:
        return [0]*(len(temp_columns)*3)

    if type(q1_final) == int:
        return [0]*(len(temp_columns)*3)
    if type(q2_final) == int:
        return [0]*(len(temp_columns)*3)

    q1_mean = np.mean(q1_vectors, axis=0)
    q2_mean = np.mean(q2_vectors, axis=0)

    q1_sum = np.sum(q1_vectors, axis=0)
    q2_sum = np.sum(q2_vectors, axis=0)

    q1_norm = q1_sum / np.sqrt((q1_sum ** 2).sum())
    q2_norm = q2_sum / np.sqrt((q2_sum ** 2).sum())

    return np.concatenate([get_two_vector_distance(np.nan_to_num(q1_mean),np.nan_to_num(q2_mean))
                           ,get_two_vector_distance(np.nan_to_num(q1_norm),np.nan_to_num(q2_norm))
                           ,get_two_vector_distance(np.nan_to_num(q1_final),np.nan_to_num(q2_final))])

def get_phrase_embedding_distances_norm(pair):
    q1_vectors = [embedding_model_norm[token] for token in pair[0] if token in embedding_model_norm]
    q2_vectors = [embedding_model_norm[token] for token in pair[1] if token in embedding_model_norm]
    q1_final = sent2vec_norm(pair[0])
    q2_final = sent2vec_norm(pair[1])
    if len(q1_vectors) == 0:
        return [0]*(len(temp_columns)*3)
    if len(q2_vectors) == 0:
        return [0]*(len(temp_columns)*3)

    if type(q1_final) == int:
        return [0]*(len(temp_columns)*3)
    if type(q2_final) == int:
        return [0]*(len(temp_columns)*3)

    q1_mean = np.mean(q1_vectors, axis=0)
    q2_mean = np.mean(q2_vectors, axis=0)

    q1_sum = np.sum(q1_vectors, axis=0)
    q2_sum = np.sum(q2_vectors, axis=0)

    q1_norm = q1_sum / np.sqrt((q1_sum ** 2).sum())
    q2_norm = q2_sum / np.sqrt((q2_sum ** 2).sum())

    return np.concatenate([get_two_vector_distance(np.nan_to_num(q1_mean),np.nan_to_num(q2_mean))
                           ,get_two_vector_distance(np.nan_to_num(q1_norm),np.nan_to_num(q2_norm))
                           ,get_two_vector_distance(np.nan_to_num(q1_final), np.nan_to_num(q2_final))
                           ])

import time
begin_time = int(time.time()*1000)

features = jobs.map_batch_parallel(
    tokens,
    item_mapper=get_phrase_embedding_distances,
    batch_size=1000,
)
print("word2vec time per 1000 line:")
print((int(time.time()*1000)-begin_time)/len(tokens)*1000)
print("ms")
begin_time = int(time.time()*1000)
features2 = jobs.map_batch_parallel(
    tokens2,
    item_mapper=get_phrase_embedding_distances,
    batch_size=1000,
)
print("word2vec time per 1000 line:")
print((int(time.time()*1000)-begin_time)/len(tokens)*1000)
print("ms")
begin_time = int(time.time()*1000)
features3 = jobs.map_batch_parallel(
    tokens,
    item_mapper=get_phrase_embedding_distances_norm,
    batch_size=1000,
)
print("word2vec time per 1000 line:")
print((int(time.time()*1000)-begin_time)/len(tokens)*1000)
print("ms")
begin_time = int(time.time()*1000)
features4 = jobs.map_batch_parallel(
    tokens2,
    item_mapper=get_phrase_embedding_distances_norm,
    batch_size=1000,
)
print("word2vec time per 1000 line:")
print((int(time.time()*1000)-begin_time)/len(tokens)*1000)
print("ms")
begin_time = int(time.time()*1000)
features=np.concatenate([features,features2,features3,features4],axis=1)
columns1 = ["word2vec_mean_"+c for c in temp_columns]
columns2 = ["word2vec_norm_"+c for c in temp_columns]
columns3 = ["word2vec_stopword_"+c for c in temp_columns]
columns4 = ["char2vec_mean_"+c for c in temp_columns]
columns5 = ["char2vec_norm_"+c for c in temp_columns]
columns6 = ["char2vec_stopword_"+c for c in temp_columns]
columns7 = ["word2vec_mean_norm_"+c for c in temp_columns]
columns8 = ["word2vec_norm_norm_"+c for c in temp_columns]
columns9 = ["word2vec_stopword_norm_"+c for c in temp_columns]
columns10 = ["char2vec_mean_norm_"+c for c in temp_columns]
columns11 = ["char2vec_norm_norm_"+c for c in temp_columns]
columns12 = ["char2vec_stopword_norm_"+c for c in temp_columns]
columns=np.concatenate([columns1,columns2,columns3,columns4,
                        columns5, columns6, columns7, columns8,
                        columns9, columns10, columns11, columns12])
# from sklearn import preprocessing
# features = preprocessing.scale(np.array(features))
features_df = pd.DataFrame(features, columns=columns)
features_df.to_csv(OUTPUT,encoding="utf-8",index=False)