import sys, time, re, json
import math, collections, itertools
import sqlite3, spade, datetime

import nltk, nltk.classify.util, nltk.metrics
from nltk.corpus import wordnet, stopwords
from collections import Counter
from collections import defaultdict
# from nltk.classify import NaiveBayesClassifier
# from nltk.metrics import BigramAssocMeasures
# from nltk.probability import FreqDist, ConditionalFreqDist


if __name__ == "__main__":

	total_time = datetime.datetime.now()
	#zero_time = datetime.datetime(2014,9,9)

	database_file = "Reviews_from_Crete.db"
	database_path = "../datasets/"

	#porter = nltk.PorterStemmer()
	wnl = nltk.WordNetLemmatizer()

	#punctuation = ["",",","(",")",".",":",";","!","?"]
	#negations = ["never","no","nothing","nowhere","noone","none","not","havent","hasnt","hadnt","cant","couldnt","shouldnt","wont","wouldnt","dont","doesnt","didnt","isnt","arent","aint","n't"]

	#eng_stopwords = punctuation #+ nltk.corpus.stopwords.words('english') + ["NEG_" + w for w in nltk.corpus.stopwords.words('english')]
	#stopwords = set([wnl.lemmatize(porter.stem(w)) for w in eng_stopwords])

	conn = sqlite3.connect(database_path + database_file)
	source = conn.cursor()

	topics = []
	for row in source.execute("SELECT * FROM reviews"):
		topics.append(row)

	#vocabulary = defaultdict(Counter)
	curr = 0
	try:
		from_review = int(sys.argv[1])
	except:
		from_review = 0
	try:
		to_review = int(sys.argv[2])
	except:
		to_review = len(topics)

	total = to_review - from_review

	reviews = topics[from_review:to_review]

	all_words = set()
	topics = defaultdict(set)
	for i in ['room','hotel','beach','day','pool','staff']: topics[i].add(i)

	print "Started topic extraction from reviews: ",from_review, to_review

	#for review in reviews:
	while len(reviews) > 0:
		curr += 1
		print "processing review: " + str(curr) + " of: " + str(total)
		start_time = datetime.datetime.now()
		text = reviews.pop()[3]
		sentences = nltk.sent_tokenize(text.replace("."," . "))
		for sentence in sentences:
			tokens = nltk.word_tokenize(sentence.lower())
			words = nltk.pos_tag(tokens)
			for (w, pos) in words:
				word = wnl.lemmatize(w)
				#if not pos[:2] == "NN":
				#elif not word in all_words:
				if ((pos[:2] == "NN") and (word not in all_words)):
					best_topic = word
					best_sim = 0
					wsyn = wordnet.synsets(word)
					if wsyn:
						for topic in topics:
							for aspect in topics[topic]:
								tsyn = wordnet.synsets(aspect)
								if tsyn: similarity = wsyn[0].wup_similarity(tsyn[0])
								else: similarity = -1
								if similarity > best_sim:
									best_sim = similarity
									best_topic = topic
						if similarity > 0.4: topics[best_topic].add(word)
						else: topics[word].add(word)
					else: topics["UNK"].add(word)
					all_words.add(word)
		print "time to process: ", str(datetime.datetime.now() - start_time)
		print "average time: ", str((datetime.datetime.now() - total_time)/curr)


	print "Total processing time: ", str(datetime.datetime.now() - total_time)

	results = defaultdict(list)

	for topic in topics:
		results[topic] = list(topics[topic])

	with open("topics_extracted_from_" + str(from_review) + "-" + str(to_review) + ".txt",'w') as f:
		f.write(json.dumps(results))

	sys.exit(0)

