# -*- coding: UTF-8 -*-

import web_apis
import lda_model
import utils
import similar_word_model
import preprocess
from gensim.corpora import Dictionary

class Mashups:
    def __init__(self, use_w2v=False, sim_word_model: similar_word_model.Similar_Word_Model=None):
        self.mashup_dict = {}
        self.mashup_category = set()
        self.mashup_user = set()

        self.latent_topic = set()
        self.topic_model = None
        self.bow_dict = None
        self.corpus_bow = None

        self.id2name = {}
        self.name2id = {}
        self.raw_descs = []
        self.sentences = []
        self.total_count = 0
        self.id = 0

        mashup_json = utils.load_json('mashup.json')
        print('start read mashup')
        for mashup in mashup_json:
            self.total_count += 1
            if self.total_count % 3000 == 0:
                print('read mashup count:', self.total_count)
            name = utils.get_clean_data_from_url(mashup['url'])
            category = mashup['categories']
            api = [] if 'related_apis' not in mashup else [utils.get_clean_data_from_url(raw) for raw in mashup['related_apis']]
            user = [] if 'followers' not in mashup else [utils.get_clean_data_from_url(raw) for raw in mashup['followers']]
            raw_desc = mashup['desc']
            if utils.is_valid_data(name) and utils.is_valid_data(category) and utils.is_valid_data(api) and utils.is_valid_data(raw_desc) and utils.is_valid_data(user):
                self.mashup_category |= set(category)
                self.mashup_user |= set(user)
                self.mashup_dict[name] = {
                    'category': category,
                    'user': user,
                    'invoke_api': api
                }
                self.name2id[name] = self.id
                self.id2name[self.id] = name
                self.id += 1

                self.raw_descs.append(raw_desc)
                if use_w2v and sim_word_model:
                    word_arr = preprocess.sentence_preprocess_1(raw_desc)
                    sim_word_arr = []
                    for word in word_arr:
                        sim_word_arr.extend(sim_word_model.get_top_n_similar_word(word))
                    word_arr.extend(sim_word_arr)
                    processed_word_arr = preprocess.sentence_preprocess_2(word_arr)
                    self.sentences.append(processed_word_arr)
                else:
                    self.sentences.append(preprocess.sentence_preprocess(raw_desc))
        print('end read mashup')

    def train_topic_model(self):
        self.bow_dict = Dictionary(self.sentences)
        self.corpus_bow = [self.bow_dict.doc2bow(sentence) for sentence in self.sentences]
        # print(self.bow_dict)
        self.topic_model = lda_model.Topic_Model('lda_not_with_w2v.model', docs=self.raw_descs, corpus=self.corpus_bow, id2word=self.bow_dict, num_topics=50)
        print('get topic model done')
        # print(self.topic_model.model.print_topics())

        for id, name in self.id2name.items():
            desc_words = self.sentences[id]
            bow = self.bow_dict.doc2bow(desc_words)
            vec = self.topic_model.model[bow]
            vec.sort(key=lambda k: -k[1])
            # print(vec)
            if len(vec) > 3:
                vec = vec[:3]
            l_topic = []
            for tup in vec:
                l_topic.append('lt-' + str(tup[0]))
            self.latent_topic |= set(l_topic)
            self.mashup_dict[name]['latent_topic'] = l_topic


if __name__ == '__main__':
    import web_apis
    ms = Mashups()
    # print(ms.mashup_user)
    # print(ms.mashup_category)
    print(len(ms.mashup_dict), ms.id)
    ms.train_topic_model()
    print(len(ms.latent_topic))