'''
Created on Mar 20, 2013

@author: daoxuandung, letuananh
'''
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "SearchProject.settings")

from os import listdir
from os.path import join
from django.db import connection
from nltk.tokenize import word_tokenize
from tfidf.models import Document, TermFrequency, DocFrequency
from nltk.probability import FreqDist
from django.db.models import Count
from config import GooseConfig
import math
from nltk.stem.wordnet import WordNetLemmatizer
from trectool import list_content, read_content, ParserData
from xml2txt import read_document
from zipfile import ZipFile

# Return dict contains lemmatized term
# Key is the term, value is number of times it appear in doc
wordnet = WordNetLemmatizer()

ROOT_DIR = GooseConfig.RAW_FOLDER
parser = ParserData(data_root = ROOT_DIR)

def remove_stopword(tokens):
	return set(tokens) - set([char for char in '~`!@#$%^&*()_+-=[]{};:"\',<.>/?|\\'] + ['...', '..', '--'])
	return tokens

def get_term_freq_dict(data):
	# Change it to lower case
	lower_data = data.lower()
	
	# Tokenize it
	tokens = word_tokenize(lower_data)
	
	tokens = remove_stopword(tokens)
	
	freq_dist = FreqDist(tokens)
	
	# Lemmatize it
	word_freq = {}
	
	for term in freq_dist.keys():
		lemmatize_term = wordnet.lemmatize(term)
		val = freq_dist.get(term)
		
		# If it exist in word_freq, add value
		if lemmatize_term in word_freq:
			freq = word_freq[lemmatize_term]
			word_freq[lemmatize_term] = freq + val
			
		# Else, assign value
		else:
			word_freq[lemmatize_term] = val
	
	return word_freq

def tokenize_docs():
	# Run once
	if TermFrequency.objects.all().exists():
		return
	
	# Else, process files
	# Get list of files inside docs
	package_files = [pfile for pfile in parser.list_files() if pfile.endswith('.zip')]
	#files.sort()
	#print files
	
	for package_filename in package_files:
		entries = list_content(parser, package_filename, verbose=False)
		docs = []
		i = 0
		with ZipFile(parser.get_path(package_filename), 'r') as package_file:
			for entry in entries:
				content = package_file.read(entry)
				try:
					document = read_document(content, package_filename[:-4])
				except:
					document = None
				if document:
					content = document['content'].strip()
					title = document['title']
					doc = Document(content=content.strip(), title=title)
					docs.append(doc)
					i+=1
					print("Progress: %s / %s" % (i, len(entries)))
					#process_data(document['content'].strip(), document['title'], len(entries))
				else:
					print("Document error: %s / %s" % (package_filename, entry))
			Document.objects.bulk_create(docs)
			#exit()
			
	# Read content of files
	'''
	for path in files:
		with open(join(ROOT_DIR, path)) as f:
			data = f.read()
			process_data(data, path)
	'''

def generate_termfreq():
	docs = Document.objects.all()
	terms = []
	i = 0
	LIMIT = 5000
	for doc in docs:
		print("Saving term for: %s" % doc.id)
		i += 1
		if i < LIMIT:
			terms += process_termfreq(doc)
		else:
			i = 0
			print("Flushing %d tf set" % LIMIT) 
			TermFrequency.objects.bulk_create(terms)
			terms = []
	if len(terms) > 0:
		TermFrequency.objects.bulk_create(terms)
	
def process_termfreq(doc):
	freq_dist = get_term_freq_dict(doc.content)
	terms = []
	
	# Retrieve Django TermFrequency list of objects
	# words is a dictionary contain key + freq
	for word in freq_dist:
		term = TermFrequency()
		term.term = word
		term.frequency = freq_dist.get(word)
		term.document = doc
		term.score = 0
		terms.append(term)
		
	# Save to DB
	#TermFrequency.objects.bulk_create(terms) 
	return terms
	
def process_data(data, title, count = ''):
	freq_dist = get_term_freq_dict(data)	
	# Insert into db
	# Save document
	doc = Document(content=data.strip(), title=title)
	doc.save()
	print "processing doc %d %s (of %s)" % (doc.id, data[:40].replace('\r','').replace('\n',''), count)
	
	# Save term frequency
	terms = []
	
	# Retrieve Django TermFrequency list of objects
	# words is a dictionary contain key + freq
	for word in freq_dist:
		term = TermFrequency()
		term.term = word
		term.frequency = freq_dist.get(word)
		term.document = doc
		term.score = 0
		terms.append(term)
		
	# Save to DB
	TermFrequency.objects.bulk_create(terms)	

# Calculate number of appearance of each term in whole
# document space		
def calculate_docs_frequency():
	print("Calculating doc frequency ...")
	# Run once
	if DocFrequency.objects.all().exists():
		return
	
	# Else, insert data
	
	# Group by 'term' and Count it
	q = TermFrequency.objects.values('term').annotate(num_docs=Count('term'))
	
	# Create Django object
	doc_freqs = [DocFrequency(**item) for item in q]
	
	# Save it
	DocFrequency.objects.bulk_create(doc_freqs)

def flush(update_script):
	update_script = 'BEGIN TRANSACTION; %s COMMIT;' % update_script
	print("Saving tfidf ...")
	cursor = connection.cursor()
	cursor.executescript(update_script)
	
# Calculate tfidf for each term
def calculate_tfidf():
	print("=> Retrieving tf and df ...")
	
	docs = Document.objects.values('id').all()
	print("All docs: %s" % len(docs))
	
	print("Preparing doc freq map")
	n = DocFrequency.objects.count()	
	doc_freq_map = {}
	for doc_freq in DocFrequency.objects.all():
		doc_freq_map[doc_freq.term] = doc_freq.num_docs
	
	i = 0
	update_script = ''
	DOC_QUEUE_LENGTH = 100
	TF_ITEM_QUEUE_LENGTH = 300000
	doc_queue = []
	for j in range(len(docs)):
		doc = docs[j]
		print("processing doc ... %d" % doc['id'])
		#if doc['id'] < 57801: #44103
		#	continue
		
		doc_queue.append(doc['id'])
		if len(doc_queue) < DOC_QUEUE_LENGTH and j < len(docs) - 1:
			continue
		print("Current term pending: %d" % i)
		q = TermFrequency.objects.filter(document_id__in=doc_queue).values('id', 'term', 'frequency').all()
		doc_queue = []
		#q = TermFrequency.objects.filter(document_id=doc['id']).values('id', 'term', 'frequency').all()
		for item in q:
			#print("find doc_freq")
			num_docs = doc_freq_map[item['term']]
			# Calculate tfidf here
			#print("calculate tf-idf")
			item_score = item['frequency'] * math.log10(n/num_docs)
			update_statement = "UPDATE tfidf_termfrequency SET score = %f WHERE id = %d; " % (item_score, item['id'])
			update_script += update_statement
			i += 1
			if i > TF_ITEM_QUEUE_LENGTH:
				print("Saving %d of %d [id=%d]" % (i, len(q), item['id']))
				flush(update_script)
				update_script = ''
				i = 0
			pass
		
	if len(update_script) > 0:
		flush(update_script)
		update_script = ''
		i = 0
	print("All saved!")

def main():
	tokenize_docs()
	generate_termfreq()
	calculate_docs_frequency()
	calculate_tfidf()
	print "Done!"

# Execute
if __name__ == '__main__':
	main()
