import json
import sys

import numpy as np
import pandas as pd
INPUT = "data/tokens_train.json"
OUTPUT = "feature_data/fuzz_feature.csv"

sys.path.append(".")
sys.path.append("..")
from distance_mayi import *
tokens = json.load(open(INPUT))
from kg import jobs
import time
begin_time = int(time.time()*1000)
features = jobs.map_batch_parallel(
    tokens,
    item_mapper=get_one_pair_token_distance2,
    batch_size=1000,
n_jobs=1
)
print("fuzz feature time per 1000 line:")
print((int(time.time()*1000)-begin_time)/len(tokens)*1000)
print("ms")
begin_time = int(time.time()*1000)
features2 = jobs.map_batch_parallel(
    tokens,
    item_mapper=get_one_pair_token_distance,
    batch_size=1000,
n_jobs=1
)
print("fuzz feature time per 1000 line:")
print((int(time.time()*1000)-begin_time)/len(tokens)*1000)
print("ms")
features=np.concatenate([features,features2],axis=1)
print(np.shape(features))
temp_columns=np.concatenate([temp_columns2,temp_columns3])
# from sklearn import preprocessing
# features = preprocessing.scale(np.array(features))
features_df = pd.DataFrame(features, columns=["fuzz_"+c for c in temp_columns])
features_df.to_csv(OUTPUT,encoding="utf-8",index=False)
