"""

This script does preliminary translation to vectorize user data in preparation for LDA.

Input: list of tab-delimited triples of (username, timestamp, namespace) entries, each representing an edit.

Output: list of 23-tuples of (username, offset year, actual year, main edits, talk edits, user edits, user talk edits, project edits, project talk edits, file edits, file talk edits, mediawiki edits, mediawiki talk edits, template edits, template talk edits, help edits, help talk edits, category edits, category talk edits, portal edits, portal talk edits, book edits, book talk edits) that each represent the edits a user made during a particular offset year in all of those namespaces, where the actual year is the year that that offset year starts during. Keep in mind that if you see offset year 0, actual year 2011, it doesn't mean that the user's offset year 0 is all of 2011, just that they made their first edit somewhere in 2011. Their offset year 0 is the year starting at their first edit.

This script ignores years with no edits, as they are not helpful for LDA.

"""

import os
import sys
import multiprocessing
import datetime
import traceback

import wp_namespace as ns
import wp_datetime as dt

def main():

	# The number of worker processes the worker pool will use to process the work queue.
	# Recommend setting this to half the number of available cores to avoid hogging resources.
	# That would be 24 cores on platinum.
	# Once you get above a certain level of CPU usage, I/O throughput will bottleneck anyway.
	NUM_WORKERS = 24
	
	# The number of translation tasks given to each process at a time.
	# Recommend this setting to be 1 for maximum throughput.
	# If I/O throughput ever becomes an issue, increasing this value will make non-CPU bound tasks run more efficiently.
	TASK_PARTITION_SIZE = 1
	
	
	inputDir = sys.argv[1]
	outputDir = sys.argv[2]

	inputFileList = os.listdir(inputDir)

	filePathArgs = []
	
	for inputFileName in inputFileList:
		
		inputFilePath = inputDir+'/'+inputFileName
		outputFilePath = outputDir+'/'+inputFileName
		
		if os.path.isfile(inputFilePath):
			filePathArgs.append( (inputFilePath, outputFilePath) )

	workerPool = multiprocessing.Pool(NUM_WORKERS)
	workerPool.map_async(processFile, filePathArgs, TASK_PARTITION_SIZE)
	workerPool.close()
	workerPool.join()
			
def processFile(filePathArgs):
	
	try:
		inputFilePath = filePathArgs[0]
		outputFilePath = filePathArgs[1]

		print timestamp() + " Starting processing for " + inputFilePath + " to " + outputFilePath

		inputFileHandle = open(inputFilePath,"r")
		outputFileHandle = open(outputFilePath, 'a+')

		translate(inputFileHandle, outputFileHandle)
	
		inputFileHandle.close()
		outputFileHandle.close()

		print timestamp() + " Finished processing for " + inputFilePath + " in " + outputFilePath
	
	except:
		# There is a problem where if a child process encounters an error or exception, the traceback gets written
		# to stderr but not flushed, so you never see it. This code fixes that problem by pushing it to stdout, where it is 
		# piped back to the main process through the pool.
		traceback.print_exc(limit=3, file=sys.stdout)
		sys.stdout.flush()

def translate(inputFileHandle, outputFileHandle):

	last = None
	next = None

	count = {}

	for line in inputFileHandle:

		next = parseLine(line)

		if last is None:
			last = next
			starttime = last['timestamp']

		if next['username'] != last['username']:
			printUserData(last['username'], starttime, count, outputFileHandle)
			starttime = next['timestamp']
			count = {}

		offset = dt.offsetyear_fast(starttime, next['timestamp'])
		
		# print "offset: "+str(offset)+", starttime: "+starttime+", fields: "+ str(next)

		if offset not in count:
			count[offset] = initEditCount()

		count[offset][next['namespace']] += 1

		last = next

	# Print the data for the last user in the list
	printUserData(last['username'], starttime, count, outputFileHandle)


def printUserData(username, starttime, editCount, outputFileHandle):
	startyear = int(dt.year_fast(starttime))
	for key in sorted(editCount.keys()):
		output = [username, str(key), str(startyear+key)]
		for namespace in sorted(ns.used_namespaces()):
				output.append(str(editCount[key][namespace.name]))
		outputFileHandle.write('\t'.join(output)+'\n')
			


def initEditCount():
	count = {}

	for namespace in ns.used_namespaces():
		count[namespace.name] = 0

	return count
"""
def initEditCount():
	
	count = {}
	for index in range(10):
		count[index] = {}
		for namespace in ns.used_namespaces():
			count[index][namespace.name] = 0

	return count
"""

def parseLine(line):
	
	tokens = line.split('\t')

	fields = {}

	fields['username'] = tokens[0].strip()
	fields['timestamp'] = tokens[1].strip()
	fields['namespace'] = tokens[2].strip()

	return fields		

def timestamp():
	
	return datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S')

# Protects against the script being loaded and run recursively in the child process
# Did not seem to be a problem, but better safe than sorry
if __name__ == '__main__':
	main()
