import sys, time, re, json
import math, collections, itertools
import sqlite3, spade

import nltk, nltk.classify.util, nltk.metrics
#from nltk.classify import NaiveBayesClassifier
#from nltk.metrics import BigramAssocMeasures
#from nltk.probability import FreqDist, ConditionalFreqDist
from nltk.corpus import wordnet, stopwords
from collections import Counter

database_file = "Reviews_from_Crete.db"
database_path = "../datasets/"

porter = nltk.PorterStemmer()
wnl = nltk.WordNetLemmatizer()
stopwords = nltk.corpus.stopwords.words('english') + ['',',','.','(',')',';','!']
lemmatized = set([wnl.lemmatize(porter.stem(w)) for w in stopwords])

conn = sqlite3.connect(database_path + database_file)
source = conn.cursor()

reviews = []
hotels = []

for row in source.execute("SELECT * FROM reviews"):
	reviews.append(row)

topics = dict()

vocabulary = Counter()

iterator = 0
count = len(reviews)

for review in reviews: #[:10]
	iterator += 1
	print "processing review: " + str(iterator) + " of: " + str(count)
	text = review[3]
	sentences = nltk.sent_tokenize(text.replace("."," . "))
	for sentence in sentences:
		tokens = nltk.word_tokenize(sentence.lower())
		words = nltk.pos_tag(tokens)
		for (word, pos) in words:	# use pos to seperate nouns and adjectives and verbs
			token = wnl.lemmatize(porter.stem(word.strip(".-!;'")))
			if not token in lemmatized:
				vocabulary[token] += 1
				#if not token in set(vocabulary):
				#	vocabulary[token] = 1
				#else:

with open("word-count_dict.txt",'w') as f:
	f.write(json.dumps(vocabulary))

with open("common-words_list.txt",'w') as cwf:
	cwf.write(json.dumps(vocabulary.most_common(1000)))


'''
with open("common-words_list.txt",'w') as cwf:
	cwf.write(json.dumps([w[0] for w in sorted([(i, vocabulary[i]) for i in vocabulary],key=lambda x: x[1],reverse=True)]))

'''
'''
vocabulary = dict()
f = open("vocabulary.txt",'r')
vocabulary = json.loads(f.readline())
f.close()

print len(vocabulary)








text = []
review = reviews[5][3]
sentences = nltk.sent_tokenize(review.replace("."," . "))
for sentence in sentences:
	tokens = nltk.word_tokenize(sentence.lower())
	for token in tokens:
		text.append(token)

moby = Text(text)
fdist1 = FreqDist(moby)
fdist1.most_common(50)

def content_fraction(text):
	stopwords = nltk.corpus.stopwords.words('english')
	content = [w for w in text if w.lower() not in stopwords]
	return 100 * len(content) / len(text)

def compare(word1, word2):
	best = wordnet.synset('entity.n.01')
	best_so_far = 0
	nsyns = wordnet.synsets(word1)
	vsyns = wordnet.synsets(word2)
	for n in nsyns:
		for v in vsyns:
			if len(n.lowest_common_hypernyms(v)) > 0:
				if n.lowest_common_hypernyms(v)[0].min_depth() > best_so_far:
					best_so_far = n.lowest_common_hypernyms(v)[0].min_depth()
					best = n.lowest_common_hypernyms(v)[0]
	print best
	print best.part_meronyms()



ntext = []
for word in vocabulary:
	ntext.append(word)


moby = nltk.Text(ntext)
fdist = FreqDist(moby)
fdist.most_common(50)

items = []
for token in vocabulary:
	if ((token in stopwords) or (token in ['',',','.','(',')',';','!'])):
		items.append(token)

for item in items:
	del vocabulary[item]
'''
pass



