import json
import sys
import warnings
warnings.filterwarnings("ignore")
import numpy as np
import pandas as pd
from sklearn.feature_extraction.text import TfidfVectorizer
INPUT = "data/tokens_train.json"
INPUT2 = "data/tokens2_train.json"
OUTPUT = "feature_data/tfidf_feature.csv"
sys.path.append(".")
sys.path.append("..")
from distance_mayi import *
tokens = json.load(open(INPUT))
tokens2 = json.load(open(INPUT2))
all_questions_flat = np.array(tokens).ravel()
documents = list(set(' '.join(question) for question in all_questions_flat))
del all_questions_flat
vectorizer = TfidfVectorizer(
    encoding='utf-8',
    analyzer='word',
    strip_accents='unicode',
    ngram_range=(1, 1),
    lowercase=True,
    norm='l2',
    use_idf=True,
    smooth_idf=True,
    sublinear_tf=True,
)
vectorizer.fit(documents)

def compute_pair_distances(pair):
    q1_one_string = ' '.join(pair[0])
    q2_one_string = ' '.join(pair[1])

    pair_result = vectorizer.transform([q1_one_string, q2_one_string])
    matrix0 = pair_result[0] # [1,vocab]
    matrix1 = pair_result[1] # [1,vocab]
    q1_doc_vec = matrix0.toarray().reshape(-1) # [vocab]
    q2_doc_vec = matrix1.toarray().reshape(-1) # [vocab]

    return get_two_vector_distance(q1_doc_vec, q2_doc_vec)
compute_pair_distances(tokens[0])

from kg import jobs
import time
begin_time = int(time.time()*1000)
features = jobs.map_batch_parallel(
    tokens,
    item_mapper=compute_pair_distances,
    batch_size=1000,n_jobs=1
)
print("tfidf feature time per 1000 line:")
print((int(time.time()*1000)-begin_time)/len(tokens)*1000)
print("ms")
begin_time = int(time.time()*1000)
features2 = jobs.map_batch_parallel(
    tokens2,
    item_mapper=compute_pair_distances,
    batch_size=1000,n_jobs=1
)
print("tfidf feature time per 1000 line:")
print((int(time.time()*1000)-begin_time)/len(tokens)*1000)
print("ms")
# from sklearn import preprocessing
# features = preprocessing.scale(np.array(features))
features=np.concatenate([features,features2],axis=1)
features_df = pd.DataFrame(features, columns=["tfidf_word_"+c for c in temp_columns]+["tfidf_char_"+c for c in temp_columns])
features_df.to_csv(OUTPUT,encoding="utf-8",index=False)
