import pyLDAvis
import pyLDAvis.sklearn
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.decomposition import LatentDirichletAllocation


def print_top_words(model, feature_names, topic_top_word_cnt):
    for topic_idx, topic in enumerate(model.components_):
        print("Topic #%d:" % (topic_idx + 1))
        topic_w = " ".join([feature_names[i] for i in topic.argsort()[:-topic_top_word_cnt - 1:-1]])
        print(topic_w)


def do_lda(data, feature_word_cnt, topic_cnt, topic_analysis_res_path):
    tf_vectorizer = CountVectorizer(strip_accents='unicode',
                                    max_features=feature_word_cnt,
                                    stop_words='english',
                                    max_df=0.5,
                                    min_df=10)
    tf = tf_vectorizer.fit_transform(data["tokenized"])

    lda = LatentDirichletAllocation(n_components=topic_cnt, max_iter=50,
                                    learning_method='batch',
                                    learning_offset=50,
                                    # doc_topic_prior=0.1,
                                    # topic_word_prior=0.01,
                                    random_state=0)
    lda.fit(tf)
    text_vectors = lda.transform(tf)
    feature_names = tf_vectorizer.get_feature_names_out()

    pic = pyLDAvis.sklearn.prepare(lda, tf, tf_vectorizer)
    pyLDAvis.save_html(pic, topic_analysis_res_path)



    return lda, feature_names, text_vectors

