import json
import sys

import numpy as np
import pandas as pd

sys.path.append(".")
sys.path.append("..")
from distance_mayi import *

INPUT1="data/tokens_train.json"
INPUT2="data/tokens2_train.json"
OUTPUT="feature_data/simple_feature.csv"

def word_difference_ratio(q1_tokens, q2_tokens):
    return len(set(q1_tokens) ^ set(q2_tokens)) / (len(set(q1_tokens)) + len(set(q2_tokens)))


def extract_original_question_features(pair):
    q1 = pair[0]
    q2 = pair[1]

    shorter_length = min(len(q1), len(q2))
    longer_length = max(len(q1), len(q2))
    return [
        np.log(shorter_length + 1),
        np.log(longer_length + 1),
        np.log(abs(longer_length - shorter_length) + 1),
        shorter_length / longer_length,
        word_difference_ratio(q1, q2),
    ]

NGRAM_RANGE = range(2, 4)
def get_char_ngrams(doc, n):
    return [doc[i:i + n] for i in range(len(doc) - n + 1)]


def get_jaccard_set_similarities(a, b):
    len_intersection = len(a.intersection(b))
    jaccard_index = len_intersection / len(a.union(b))
    jaccard_index_norm_a = len_intersection / len(a)
    jaccard_index_norm_b = len_intersection / len(b)

    return jaccard_index, jaccard_index_norm_a, jaccard_index_norm_b


def get_jaccard_similarities(q1, q2, n):
    if len(q1) < max(NGRAM_RANGE) and len(q2) < max(NGRAM_RANGE):
        return 1, 1, 1
    if len(q1) < max(NGRAM_RANGE) or len(q2) < max(NGRAM_RANGE):
        return 0, 0, 0

    q1_ngrams = set(get_char_ngrams(q1, n))
    q2_ngrams = set(get_char_ngrams(q2, n))
    return get_jaccard_set_similarities(q1_ngrams, q2_ngrams)

def get_question_pair_features(pair):
    q1 = ' '.join(pair[0])
    q2 = ' '.join(pair[1])

    features = []
    for n in NGRAM_RANGE:
        features.extend(get_jaccard_similarities(q1, q2, n))

    return features


tokens = json.load(open(INPUT1))
tokens2 = json.load(open(INPUT2))
from kg import jobs

features = jobs.map_batch_parallel(
    tokens,
    item_mapper=extract_original_question_features,
    batch_size=1000,
)
features2 = jobs.map_batch_parallel(
    tokens2,
    item_mapper=extract_original_question_features,
    batch_size=1000,
)
features3 = jobs.map_batch_parallel(
    tokens,
    item_mapper=get_question_pair_features,
    batch_size=1000,
)
features4 = jobs.map_batch_parallel(
    tokens2,
    item_mapper=get_question_pair_features,
    batch_size=1000,
)


feature_names = [
    'shorter_len_log',
    'longer_len_log',
    'len_diff_log',
    'len_ratio',
    'diff_ratio',
]

columns1 = ["simple_word_level_"+c for c in feature_names]
columns2 = ["simple_char_level_"+c for c in feature_names]
columns=columns1+columns2

for n in NGRAM_RANGE:
    columns.append('word_jaccard_ix_' + str(n) + 'gram')
    columns.append('word_jaccard_ix_norm_q1_' + str(n) + 'gram')
    columns.append('word_jaccard_ix_norm_q2_' + str(n) + 'gram')

for n in NGRAM_RANGE:
    columns.append('char_jaccard_ix_' + str(n) + 'gram')
    columns.append('char_jaccard_ix_norm_q1_' + str(n) + 'gram')
    columns.append('char_jaccard_ix_norm_q2_' + str(n) + 'gram')

features=np.concatenate([features,features2,features3,features4],axis=1)

# from sklearn import preprocessing
# features = preprocessing.scale(np.array(features))
features_df = pd.DataFrame(features, columns=columns)
features_df.to_csv(OUTPUT,encoding="utf-8",index=False)