import sys, time, re, json
import math, collections, itertools
import sqlite3, spade

import nltk, nltk.classify.util, nltk.metrics
#from nltk.classify import NaiveBayesClassifier
#from nltk.metrics import BigramAssocMeasures
#from nltk.probability import FreqDist, ConditionalFreqDist
from nltk.corpus import wordnet, stopwords
from collections import Counter

if __name__ == "__main__":
	try:
		from_review = int(sys.argv[1])
		to_review = int(sys.argv[2])
	except:
		from_review = 0
		to_review = 65180
	print "Started vocabulary extraction for reviews: ",from_review, to_review

	database_file = "Reviews_from_Crete.db"
	database_path = "../datasets/"

	porter = nltk.PorterStemmer()
	wnl = nltk.WordNetLemmatizer()
	stopwords = nltk.corpus.stopwords.words('english') + ['',',','.','(',')',';','!']
	lemmatized = set([wnl.lemmatize(porter.stem(w)) for w in stopwords])

	conn = sqlite3.connect(database_path + database_file)
	source = conn.cursor()

	reviews = []

	for row in source.execute("SELECT * FROM reviews"):
		reviews.append(row)

	vocabulary = Counter()
	nouns = Counter()
	verbs = Counter()
	adjectives = Counter()
	adverbs = Counter()


	iterator = 0
	count = to_review - from_review

	for review in reviews[from_review:to_review]:
		iterator += 1
		print "processing review: " + str(iterator) + " of: " + str(count)
		text = review[3]
		sentences = nltk.sent_tokenize(text.replace("."," . "))
		for sentence in sentences:
			tokens = nltk.word_tokenize(sentence.lower())
			words = nltk.pos_tag(tokens)
			for (word, pos) in words:	# use pos to seperate nouns and adjectives and verbs
				token = wnl.lemmatize(porter.stem(word.strip(".-!;'")))
				if not token in lemmatized:
					vocabulary[token] += 1


	with open("word-count_dict_" + str(from_review) + "-" + str(to_review) + ".txt",'w') as f:
		f.write(json.dumps(vocabulary))

	with open("common-words_list_" + str(from_review) + "-" + str(to_review) + ".txt",'w') as cwf:
		cwf.write(json.dumps(vocabulary.most_common(1000)))

	sys.exit(0)

