import sys, gzip, json, math, nltk, MySQLdb, os, enchant
from collections import Counter, defaultdict
from MySQLdb import cursors


def add_document_to_corpus(COUNT, DOCUMENT):
	NUM = float(COUNT["#NUM_OF_DOCUMENTS"])
	FREQ = Counter()
	for word in [w for w in COUNT if w != "#NUM_OF_DOCUMENTS"]:
		FREQ[word] = (NUM/math.pow(10,COUNT[word]))
	for word in DOCUMENT:
		FREQ[word] += 1
	NUM = float(NUM+1)
	for word in FREQ:
		COUNT[word] = math.log10(NUM / float(FREQ[word]))
	COUNT["#NUM_OF_DOCUMENTS"] = NUM
	return COUNT

def mass_add_documents(CORP_A, CORP_B):
	NUM_A = float(CORP_A["#NUM_OF_DOCUMENTS"])
	NUM_B = float(CORP_B["#NUM_OF_DOCUMENTS"])
	FREQ = Counter()
	for word in [w for w in CORP_A if w != "#NUM_OF_DOCUMENTS"]:
		FREQ[word] += (NUM_A/math.pow(10,CORP_A[word]))
	CORP_A.clear()
	for word in [w for w in CORP_B if w != "#NUM_OF_DOCUMENTS"]:
		FREQ[word] += (NUM_B/math.pow(10,CORP_B[word]))
	CORP_B = []
	NUM = NUM_A + NUM_B	
	for word in FREQ:
		CORP_A[word] = math.log10(NUM / float(FREQ[word]))
	CORP_A["#NUM_OF_DOCUMENTS"] = NUM
	return CORP_A


def get_counts(COUNT):
	NUM = float(COUNT["#NUM_OF_DOCUMENTS"])
	FREQ = Counter()
	for word in [w for w in COUNT if w != "#NUM_OF_DOCUMENTS"]:
		FREQ[word] = (NUM/math.pow(10,COUNT[word]))
	return FREQ

def read_reviews_from_file(filename):
	with gzip.open(filename,'r') as orfile:
		obj = defaultdict(str)
		line = orfile.readline()
		while line != '':
			buf = [w.strip() for w in line.split(":")]
			if buf[0] != "":
				obj[buf[0]] = ":".join(buf[1:])
			else:
				yield obj
				obj = defaultdict(str)
			line = orfile.readline()

def tokenize(text):
	sentences = nltk.sent_tokenize(text.replace("."," . "))
	sentences = [nltk.word_tokenize(sent) for sent in sentences]
	result = []
	for sentence in sentences:
		for word in sentence:
			result.append(word.lower().strip(".-!@#$%^&*{`}<>/;'[]()"))
	return result

def rem_stopwords(tokens):
	return [word for word in tokens if not word in nltk.corpus.stopwords.words('english')+[',','.',':','!','','``',' ']]

def get_lemmas(tokens):
	wnl = nltk.WordNetLemmatizer()
	return [wnl.lemmatize(word) for word in tokens]

def get_stems(tokens):
	porter = nltk.PorterStemmer()
	return [porter.stem(word) for word in tokens]

def get_spelled(tokens):
	spelling = enchant.Dict("en_US")
	return [w for w in tokens if spelling.check(w)]

def calculate_TF(tokens):
	CC = Counter()
	RET = Counter()
	for word in tokens:
		CC[word] += 1
	NUM = float(sum(CC.values()))
	for word in CC:
		RET[word] = CC[word] / NUM
	return RET

def calculate_TFIDF(TFs,IDFs):
	RES = Counter()
	for word in TFs:
		RES[word] = TFs[word] * IDFs[word]
	return RES

if __name__ == "__main__":
	IDF = Counter()
	if len(sys.argv) == 2:
		filename = sys.argv[1]
	else:
		filename = "empty.file"
	BookIDs = []
	line = "start"
	with gzip.open("books.txt.gz", 'r') as orfile:
		while line != '':
			line = orfile.readline()
			BookIDs.append(line.strip())
			print len(BookIDs), " out of 929,264 read"
	BookIDs.pop()
	BookIDs = set(BookIDs)
	print "Loaded BookId's"
	it = 0
	for review in read_reviews_from_file(filename):
		product = review["product/productId"]
		if product in BookIDs:
			docfile = gzip.open("TFMATRIX/"+str(product)+".json.gz",'w')
			description = review["product/description"]
			tokens = tokenize(description)
			tokens = rem_stopwords(tokens)
			tokens = get_spelled(tokens)
			tokens = get_lemmas(tokens)
			tokens = get_stems(tokens)
			IDF = add_document_to_corpus(IDF, tokens)
			docfile.write(json.dumps(calculate_TF(tokens))+"\n")
			docfile.write(json.dumps(description)+"\n")
			docfile.close()
			print "Documents added: ", IDF["#NUM_OF_DOCUMENTS"]
		if ((IDF["#NUM_OF_DOCUMENTS"]%500 == 0) and (IDF["#NUM_OF_DOCUMENTS"] > 0)):
			it += 1
			print "Added Matrix: ", it
			outfile = gzip.open("PART_IDF/PART_IDF"+str(it)+"matrix.json.gz",'w')
			outfile.write(json.dumps(IDF)+"\n")
			outfile.close()
			IDF = Counter()
	print "Completed TF calculation, Merging IDF matrices"

#	IDF = Counter()
	i = 0
	for INFILE in os.listdir("PART_IDF"):
		i += 1
		print "Merged ", i, " matrices"
		infile = gzip.open("PART_IDF/"+INFILE,'r')
		IDF = mass_add_documents(IDF,Counter(json.loads(infile.readline())))
		infile.close()
	print "All matrices merged. Writing complete matrix to file"
	outfile = gzip.open("IDFmatrix.json.gz",'w')
	outfile.write(json.dumps(IDF)+"\n")
	outfile.close()
	print "IDF matrix saved. Calculating TFIDFs"

#	outfile = gzip.open("IDFmatrix.json.gz",'r')
#	IDF = Counter(json.loads(outfile.readline()))
#	outfile.close()
#	print "Vocabulary size: ", len(IDF), " from ", IDF["#NUM_OF_DOCUMENTS"], " documents"
	iterator = 0
	for TFfile in os.listdir("TFMATRIX"):
		iterator += 1
		product = TFfile.split(".")[0]
		tfidfs = gzip.open("TFIDFs/"+str(product)+".json.gz",'w')
		tffile = gzip.open("TFMATRIX/"+TFfile,'r')
		TF = Counter(json.loads(tffile.readline()))
		tfidfs.write(json.dumps(calculate_TFIDF(TF,IDF))+"\n")
		print "Calculated ", iterator, " documents"
		tfidfs.close()
		tffile.close()
	print "Vocabulary size: ", len(IDF), " from ", IDF["#NUM_OF_DOCUMENTS"], " documents"
	sys.exit()


