# -*- coding:utf8  -*-
import sys
from nltk.corpus import stopwords
from sklearn.feature_extraction.text import CountVectorizer
from textblob import TextBlob

text = sys.stdin.read()
textblob = TextBlob(text)
arr = [word for word in textblob.words if str(word) not in stopwords.words('english')]
text = " ".join(arr)
data = [text]
n_features = 1000
tf_vectorizer = CountVectorizer(strip_accents='unicode',
                                max_features=n_features,
                                stop_words='english',
                                max_df=1,
                                min_df=1)
tf = tf_vectorizer.fit_transform(data)
from sklearn.decomposition import LatentDirichletAllocation

n_topics = 5
lda = LatentDirichletAllocation(n_topics=n_topics, max_iter=50,
                                learning_method='online',
                                learning_offset=50.,
                                random_state=0)
lda.fit(tf)
LatentDirichletAllocation(batch_size=128, doc_topic_prior=None,
                          evaluate_every=-1, learning_decay=0.7,
                          learning_method='online', learning_offset=50.0,
                          max_doc_update_iter=100, max_iter=50, mean_change_tol=0.001,
                          n_jobs=1, n_topics=5, perp_tol=0.1, random_state=0,
                          topic_word_prior=None, total_samples=1000000.0, verbose=0)


def print_top_words(model, feature_names, n_top_words):
    for topic_idx, topic in enumerate(model.components_):
        print "Topic #%d:" % topic_idx
        print " ".join([feature_names[i] for i in topic.argsort()[:-n_top_words - 1:-1]])
        pass
    print()
    pass


n_top_words = 20
tf_feature_names = tf_vectorizer.get_feature_names()
print_top_words(lda, tf_feature_names, n_top_words)
import pyLDAvis.sklearn

vhtml = pyLDAvis.sklearn.prepare(lda, tf, tf_vectorizer)
pyLDAvis.show(vhtml)
