import json
import sys

import numpy as np
import pandas as pd
from gensim.corpora import Dictionary
from gensim.models import LdaMulticore
INPUT = "data/tokens_train.json"
OUTPUT = "feature_data/lda_feature.csv"

sys.path.append(".")
sys.path.append("..")
from distance_mayi import *
tokens = json.load(open(INPUT))
documents = list(np.array(tokens).ravel())
dictionary = Dictionary(documents)
corpus = [dictionary.doc2bow(document) for document in documents]
NUM_TOPICS = 50
RANDOM_SEED = 42
LDAmodel = LdaMulticore(
    corpus,
    num_topics=NUM_TOPICS,
    id2word=dictionary,
    random_state=RANDOM_SEED,
)

def compute_topic_distances(pair):
    q1_bow = dictionary.doc2bow(pair[0])#[20,2]
    q2_bow = dictionary.doc2bow(pair[1])#[20,2]
    q1_topic_vec = np.array(LDAmodel.get_document_topics(q1_bow, minimum_probability=0))[:, 1]#[topic_num]
    q2_topic_vec = np.array(LDAmodel.get_document_topics(q2_bow, minimum_probability=0))[:, 1]#[topic_num]
    return get_two_vector_distance(q1_topic_vec,q2_topic_vec)


from kg import jobs

features = jobs.map_batch_parallel(
    tokens,
    item_mapper=compute_topic_distances,
    batch_size=1000,
)
# from sklearn import preprocessing
# features = preprocessing.scale(np.array(features))
features_df = pd.DataFrame(features, columns=["lda_"+c for c in temp_columns])
features_df.to_csv(OUTPUT,encoding="utf-8",index=False)

