from sklearn.feature_extraction.text import CountVectorizer
import pandas as pd
import random
from sklearn.feature_extraction import text
from gensim import matutils, models
import scipy.sparse
#from remove import add_stop_words


data = pd.read_csv("C:\Users\Gobinda Roy\Downloads\luxury_fil.csv")
comments = data["Comments"]
shuf_comments = random.sample(comments,1000)
stop_words = list(text.ENGLISH_STOP_WORDS)
extra = ["room", "rooms","darjeeling", "hotel","staff","good"]
#print add_stop_words
stop_words.extend(extra)

#text = ["The quick brown fox jumped over the lazy dog."]
vectorizer = CountVectorizer(stop_words=stop_words)
vectorizer.fit(shuf_comments)
#print(vectorizer.vocabulary_)
vector = vectorizer.transform(shuf_comments)
df = pd.DataFrame(data = vector.todense(), columns =vectorizer.get_feature_names())
tdf = df.transpose()

id2word = dict((v, k) for k, v in vectorizer.vocabulary_.items())
sparse_counts = scipy.sparse.csr_matrix(tdf)
corpus = matutils.Sparse2Corpus(sparse_counts)
lda = models.LdaModel(corpus=corpus, id2word=id2word, num_topics=3, passes=70)
print lda.print_topics()
