import sys, time, re, json
import math, collections, itertools
import sqlite3, spade, datetime

import nltk, nltk.classify.util, nltk.metrics
#from nltk.classify import NaiveBayesClassifier
#from nltk.metrics import BigramAssocMeasures
#from nltk.probability import FreqDist, ConditionalFreqDist
from nltk.corpus import wordnet, stopwords
from collections import Counter
from collections import defaultdict

if __name__ == "__main__":

	total_time = datetime.datetime.now()
	zero_time = datetime.datetime(2014,9,9)

	database_file = "Reviews_from_Crete.db"
	database_path = "../datasets/"

	porter = nltk.PorterStemmer()
	wnl = nltk.WordNetLemmatizer()

	punctuation = ["",",","(",")",".",":",";","!","?"]
	negations = ["never","no","nothing","nowhere","noone","none","not","havent","hasnt","hadnt","cant","couldnt","shouldnt","wont","wouldnt","dont","doesnt","didnt","isnt","arent","aint","n't"]

	eng_stopwords = punctuation #+ nltk.corpus.stopwords.words('english') + ["NEG_" + w for w in nltk.corpus.stopwords.words('english')]
	stopwords = set([wnl.lemmatize(porter.stem(w)) for w in eng_stopwords])

	conn = sqlite3.connect(database_path + database_file)
	source = conn.cursor()

	reviews = []
	for row in source.execute("SELECT * FROM reviews"):
		reviews.append(row)

	vocabulary = defaultdict(Counter)
	curr = 0
	try:
		from_review = int(sys.argv[1])
	except:
		from_review = 0
	try:
		to_review = int(sys.argv[2])
	except:
		to_review = len(reviews)

	total = to_review - from_review
	print "Started vocabulary extraction for reviews: ",from_review, to_review

	for review in reviews[from_review:to_review]:
		curr += 1
		print "processing review: " + str(curr) + " of: " + str(total)
		start_time = datetime.datetime.now()
		text = review[3]
		sentences = nltk.sent_tokenize(text.replace("."," . "))
		for sentence in sentences:
			tokens = nltk.word_tokenize(sentence.lower())
			NEG = False
			ntokens = []
			for token in tokens:
				if token in negations:
					NEG = not NEG
				elif token in punctuation:
					NEG = False
				elif NEG:
					token = "NEG_" + token
				ntokens.append(token)
			words = nltk.pos_tag(ntokens)
			for (word, pos) in words:
				token = wnl.lemmatize(porter.stem(word.strip(".-!;'")))
				if not token in stopwords:
					vocabulary[pos[:2]][token] += 1
		print "time to process: ", str(datetime.datetime.now() - start_time)

	with open("dictionary" + str(from_review) + "-" + str(to_review) + ".txt",'w') as f:
		f.write(json.dumps(vocabulary))

	with open("common_nouns" + str(from_review) + "-" + str(to_review) + ".txt",'w') as f:
		f.write(json.dumps(vocabulary['NN'].most_common(1000)))


	print "Total processing time: ", str(datetime.datetime.now() - total_time)

	sys.exit(0)


with open("dictionary.txt") as f1:
 temp = json.loads(f1.readline())

for i in set([w for w in dic] + [q for q in tion]):
 dictionary[i] = Counter(dic[i]) + Counter(tion[i])

ndict = Counter([(word.strip("NEG_"),count) for (word,count) in dictionary['NN'].most_common(150) if word.startswith("NEG_")]+\
[(word,count) for (word,count) in dictionary['NN'].most_common(150) if not word.startswith("NEG_")])

unique = [(word.strip("NEG_"),count) for (word,count) in dictionary['NN'].most_common(150) if word.startswith("NEG_")]+\
[(word,count) for (word,count) in dictionary['NN'].most_common(150) if not word.startswith("NEG_")]



