# coding=UTF-8
import pandas as pd
import numpy as np
import gensim
from fuzzywuzzy import fuzz
from tqdm import tqdm
from scipy.stats import skew, kurtosis
from scipy.spatial.distance import cosine, cityblock, jaccard, canberra, euclidean, minkowski, braycurtis

INPUT = 'data/rokid_fenci_train.csv'
INPUT2 = 'data/rokid_fenzi_train.csv'
OUTPUT = 'feature_data/quora_features.csv'

stop_word_set = set()
with open("../data/stop_word_ch.txt", encoding="utf-8") as f:
    lines = f.readlines()
    for line in lines:
        line = line.replace("\n", "")
        stop_word_set.add(line)

def wmd(s1, s2):
    s1 = [w for w in s1 if w not in stop_word_set]
    s2 = [w for w in s2 if w not in stop_word_set]
    return model.wmdistance(s1, s2)


def norm_wmd(s1, s2):
    s1 = [w for w in s1 if w not in stop_word_set]
    s2 = [w for w in s2 if w not in stop_word_set]
    return norm_model.wmdistance(s1, s2)


def sent2vec(words):
    words = [w for w in words if not w in stop_word_set]
    M = []
    for w in words:
        try:
            M.append(model[w])
        except:
            continue
    M = np.array(M)
    v = M.sum(axis=0)
    return v / np.sqrt((v ** 2).sum())



def wmd2(s1, s2):
    s1 = [w for w in s1 if w not in stop_word_set]
    s2 = [w for w in s2 if w not in stop_word_set]
    return model2.wmdistance(s1, s2)


def norm_wmd2(s1, s2):
    s1 = [w for w in s1 if w not in stop_word_set]
    s2 = [w for w in s2 if w not in stop_word_set]
    return norm_model2.wmdistance(s1, s2)


def sent2vec2(words):
    words = [w for w in words if not w in stop_word_set]
    M = []
    for w in words:
        try:
            M.append(model2[w])
        except:
            continue
    M = np.array(M)
    v = M.sum(axis=0)
    return v / np.sqrt((v ** 2).sum())

tokens_fenci = pd.read_csv(INPUT, names=["id","question1","question2","score"], sep='\t',encoding="utf-8")
tokens_fenzi = pd.read_csv(INPUT2, names=["id","question1","question2","score"], sep='\t',encoding="utf-8",)
tokens_fenci = tokens_fenci.drop("id", axis=1)
tokens_fenzi = tokens_fenzi.drop("id", axis=1)
features =pd.DataFrame()

import time
begin_time = int(time.time()*1000)

features['len_word_q1'] = tokens_fenci.question1.apply(lambda x: len(x))
features['len_word_q2'] = tokens_fenci.question2.apply(lambda x: len(x))
features['diff_len_word'] = features.len_word_q1 - features.len_word_q2
features['len_char_q1'] = tokens_fenzi.question1.apply(lambda x: len(x))
features['len_char_q2'] = tokens_fenzi.question2.apply(lambda x: len(x))
features['diff_len_char'] = features.len_char_q1 - features.len_char_q2
# data['len_word_q1'] = data.question1.apply(lambda x: len(str(x).split()))
# data['len_word_q2'] = data.question2.apply(lambda x: len(str(x).split()))
features['common_words'] = tokens_fenci.apply(lambda x: len(set(x['question1']).intersection(set(x['question2']))), axis=1)
features['common_chars'] = tokens_fenzi.apply(lambda x: len(set(x['question1']).intersection(set(x['question2']))), axis=1)


features['fuzz_qratio_word'] = tokens_fenci.apply(lambda x: fuzz.QRatio(x['question1'], x['question2']), axis=1)
features['fuzz_WRatio_word'] = tokens_fenci.apply(lambda x: fuzz.WRatio(x['question1'], x['question2']), axis=1)
features['fuzz_qratio_char'] = tokens_fenzi.apply(lambda x: fuzz.QRatio(x['question1'], x['question2']), axis=1)
features['fuzz_WRatio_char'] = tokens_fenzi.apply(lambda x: fuzz.WRatio(x['question1'], x['question2']), axis=1)

features['fuzz_partial_ratio'] = tokens_fenci.apply(lambda x: fuzz.partial_ratio(x['question1'], x['question2']), axis=1)
features['fuzz_partial_token_set_ratio'] = tokens_fenci.apply(lambda x: fuzz.partial_token_set_ratio(x['question1'], x['question2']), axis=1)
features['fuzz_partial_token_sort_ratio'] = tokens_fenci.apply(lambda x: fuzz.partial_token_sort_ratio(x['question1'], x['question2']), axis=1)
features['fuzz_token_set_ratio'] = tokens_fenci.apply(lambda x: fuzz.token_set_ratio(x['question1'], x['question2']), axis=1)
features['fuzz_token_sort_ratio'] = tokens_fenci.apply(lambda x: fuzz.token_sort_ratio(x['question1'], x['question2']), axis=1)

features['fuzz_partial_ratio2'] = tokens_fenzi.apply(lambda x: fuzz.partial_ratio(x['question1'], x['question2']), axis=1)
features['fuzz_partial_token_set_ratio2'] = tokens_fenzi.apply(lambda x: fuzz.partial_token_set_ratio(x['question1'], x['question2']), axis=1)
features['fuzz_partial_token_sort_ratio2'] = tokens_fenzi.apply(lambda x: fuzz.partial_token_sort_ratio(x['question1'], x['question2']), axis=1)
features['fuzz_token_set_ratio2'] = tokens_fenzi.apply(lambda x: fuzz.token_set_ratio(x['question1'], x['question2']), axis=1)
features['fuzz_token_sort_ratio2'] = tokens_fenzi.apply(lambda x: fuzz.token_sort_ratio(x['question1'], x['question2']), axis=1)


model = gensim.models.KeyedVectors.load_word2vec_format('data/word2vec.vec')
features['wmd'] = tokens_fenci.apply(lambda x: wmd(x['question1'], x['question2']), axis=1)

norm_model = gensim.models.KeyedVectors.load_word2vec_format('data/word2vec_norm.vec')
# norm_model.init_sims(replace=True)
features['norm_wmd'] = tokens_fenci.apply(lambda x: norm_wmd(x['question1'], x['question2']), axis=1)

question1_vectors = np.zeros((tokens_fenci.shape[0], 100))
error_count = 0

for i, q in tqdm(enumerate(tokens_fenci.question1.values)):
    question1_vectors[i, :] = sent2vec(q)

question2_vectors  = np.zeros((tokens_fenci.shape[0], 100))
for i, q in tqdm(enumerate(tokens_fenci.question2.values)):
    question2_vectors[i, :] = sent2vec(q)
begin_time = int(time.time()*1000)
features['cosine_distance'] = [cosine(x, y) for (x, y) in zip(np.nan_to_num(question1_vectors),
                                                          np.nan_to_num(question2_vectors))]

features['cityblock_distance'] = [cityblock(x, y) for (x, y) in zip(np.nan_to_num(question1_vectors),
                                                                np.nan_to_num(question2_vectors))]

features['jaccard_distance'] = [jaccard(x, y) for (x, y) in zip(np.nan_to_num(question1_vectors),
                                                            np.nan_to_num(question2_vectors))]

features['canberra_distance'] = [canberra(x, y) for (x, y) in zip(np.nan_to_num(question1_vectors),
                                                              np.nan_to_num(question2_vectors))]

features['euclidean_distance'] = [euclidean(x, y) for (x, y) in zip(np.nan_to_num(question1_vectors),
                                                                np.nan_to_num(question2_vectors))]

features['minkowski_distance'] = [minkowski(x, y, 3) for (x, y) in zip(np.nan_to_num(question1_vectors),
                                                                   np.nan_to_num(question2_vectors))]

features['braycurtis_distance'] = [braycurtis(x, y) for (x, y) in zip(np.nan_to_num(question1_vectors),
                                                                  np.nan_to_num(question2_vectors))]


features['skew_q1vec'] = [skew(x) for x in np.nan_to_num(question1_vectors)]
features['skew_q2vec'] = [skew(x) for x in np.nan_to_num(question2_vectors)]
features['kur_q1vec'] = [kurtosis(x) for x in np.nan_to_num(question1_vectors)]
features['kur_q2vec'] = [kurtosis(x) for x in np.nan_to_num(question2_vectors)]


model2 = gensim.models.KeyedVectors.load_word2vec_format('data/word2vec.vec')
features['wmd2'] = tokens_fenzi.apply(lambda x: wmd2(x['question1'], x['question2']), axis=1)

norm_model2 = gensim.models.KeyedVectors.load_word2vec_format('data/word2vec_norm.vec')
# norm_model2.init_sims(replace=True)
features['norm_wmd2'] = tokens_fenzi.apply(lambda x: norm_wmd2(x['question1'], x['question2']), axis=1)

question1_vectors2 = np.zeros((tokens_fenzi.shape[0], 100))
error_count = 0

for i, q in tqdm(enumerate(tokens_fenzi.question1.values)):
    question1_vectors2[i, :] = sent2vec2(q)

question2_vectors2  = np.zeros((tokens_fenzi.shape[0], 100))
for i, q in tqdm(enumerate(tokens_fenzi.question2.values)):
    question2_vectors2[i, :] = sent2vec2(q)

features['cosine_distance2'] = [cosine(x, y) for (x, y) in zip(np.nan_to_num(question1_vectors2),
                                                          np.nan_to_num(question2_vectors2))]

features['cityblock_distance2'] = [cityblock(x, y) for (x, y) in zip(np.nan_to_num(question1_vectors2),
                                                                np.nan_to_num(question2_vectors2))]

features['jaccard_distance2'] = [jaccard(x, y) for (x, y) in zip(np.nan_to_num(question1_vectors2),
                                                            np.nan_to_num(question2_vectors2))]

features['canberra_distance2'] = [canberra(x, y) for (x, y) in zip(np.nan_to_num(question1_vectors2),
                                                              np.nan_to_num(question2_vectors2))]

features['euclidean_distance2'] = [euclidean(x, y) for (x, y) in zip(np.nan_to_num(question1_vectors2),
                                                                np.nan_to_num(question2_vectors2))]

features['minkowski_distance2'] = [minkowski(x, y, 3) for (x, y) in zip(np.nan_to_num(question1_vectors2),
                                                                   np.nan_to_num(question2_vectors2))]

features['braycurtis_distance2'] = [braycurtis(x, y) for (x, y) in zip(np.nan_to_num(question1_vectors2),
                                                                  np.nan_to_num(question2_vectors2))]

features['skew_q1vec2'] = [skew(x) for x in np.nan_to_num(question1_vectors2)]
features['skew_q2vec2'] = [skew(x) for x in np.nan_to_num(question2_vectors2)]
features['kur_q1vec2'] = [kurtosis(x) for x in np.nan_to_num(question1_vectors2)]
features['kur_q2vec2'] = [kurtosis(x) for x in np.nan_to_num(question2_vectors2)]


features.to_csv(OUTPUT, index=False)
