# -*- coding: UTF-8 -*-

import logging
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)

from loadjson import load_json
import preprocess
import lda_model
from gensim.models import word2vec
from gensim.corpora import Dictionary
import os.path
import utils
import gensim
import preprocess
import similar_word_model
import lda_model


sim_word_model = similar_word_model.Similar_Word_Model()


# test_list = ['house', 'map', 'estate', 'rent', 'apartment']

# for tw in test_list:
#     preprocess.sentence_preprocess(tw)
#     print(tw, '-----', utils.get_top_n_similar_word(tw, w2v_model)) 




mashup_json = utils.load_json('mashup.json')
#print(len(mashup_json))

descs = []
apis = []
sentences = []
sentences2 = []

i=0
for mashup in mashup_json:
    if mashup['desc']:
        word_arr = preprocess.sentence_preprocess_1(mashup['desc'])
        # print('raw:',mashup['desc'])
        # print('processed:',word_arr)
        sim_word_arr = []
        for word in word_arr:
            sim_word_arr.extend(sim_word_model.get_top_n_similar_word(word))
        word_arr.extend(sim_word_arr)
        processed_word_arr = preprocess.sentence_preprocess_2(word_arr)
        sentences.append(processed_word_arr)
        sentences2.append(preprocess.sentence_preprocess(mashup['desc']))
        # print('final:',processed_word_arr)
        descs.append(mashup['desc'])
        apis.append(mashup['related_apis'])
        i += 1
    if i % 100 == 0:
        print('processed count:', i)
    # if i == 1:
    #     break

# print(sentences)

bow_dict = Dictionary(sentences)
corpus_bow = [bow_dict.doc2bow(sentence) for sentence in sentences]

bow2 = Dictionary(sentences2)
corpus2 = [bow2.doc2bow(sentence) for sentence in sentences2]

topic_model = lda_model.Topic_Model('lda_with_w2v.model', docs=descs, corpus=corpus_bow, id2word=bow_dict, num_topics=150)
topic_model2 = lda_model.Topic_Model('lda_not_with_w2v.model', docs=descs, corpus=corpus2, id2word=bow2, num_topics=150)

# print(corpus_bow)
# print(bow_dict)

# corpus_matrix = utils.tuplecorpus_to_onehotmatrix(corpus_bow, len(bow_dict))
# from sklearn.feature_extraction.text import TfidfTransformer   
# transformer = TfidfTransformer()
# tfidf = transformer.fit_transform(corpus_matrix)
# tfidf_result = tfidf.toarray()
# corpus_tfidf = utils.onehotmatrix_to_tuplecorpus(tfidf_result)
# corpus_tfidf = corpus_bow
# from gensim.models import TfidfModel
# tfidf_model = TfidfModel(corpus_bow)
# corpus_tfidf = tfidf_model[corpus_bow]


query = 'buy and rent an apartment, show the location.'
topic_model.get_topn_similar_doc(query)
topic_model2.get_topn_similar_doc(query)
# q_doc = preprocess.sentence_preprocess(query)
# q_bow = bow_dict.doc2bow(q_doc)
# # print(q_bow)


# topic_model = lda_model.get_lda_model(corpus_bow, bow_dict)
# q_lda = topic_model[q_bow]
# print(q_lda)


# from gensim import similarities
# import numpy as np
# from sklearn.cluster import k_means
# index = similarities.MatrixSimilarity(topic_model[corpus_bow], num_features=len(bow_dict))

# X = np.array(index)
# cluster_center, result, inertia = k_means(X.astype(np.float), n_clusters=20, init="k-means++")
# print(cluster_center)
# print(result)
# print(inertia)


# X_Y_dic = defaultdict(set)
# for i, pred_y in enumerate(result):
#     X_Y_dic[pred_y].add(''.join(ts.docs[i]))


# sims = index[q_lda]
# sort_sims = sorted(enumerate(sims), key=lambda item: -item[1])

# top10sims = sort_sims[:10]

# for i in range(len(top10sims)):
#     print(top10sims[i],i, descs[top10sims[i][0]])

sim_word_model.save_sims()