# Turns preprocessed editbytes data into rich user-year vectors by edit count, hours. and bytes changed.

import os
import sys
import multiprocessing
import datetime
import traceback

import wp_namespace as ns
import wp_datetime as dt

def main():

	# The number of worker processes the worker pool will use to process the work queue.
	# Recommend setting this to half the number of available cores to avoid hogging resources.
	# That would be 24 cores on platinum.
	# Once you get above a certain level of CPU usage, I/O throughput will bottleneck anyway.
	NUM_WORKERS = 24
	
	# The number of translation tasks given to each process at a time.
	# Recommend this setting to be 1 for maximum throughput.
	# If I/O throughput ever becomes an issue, increasing this value will make non-CPU bound tasks run more efficiently.
	TASK_PARTITION_SIZE = 1
	
	
	inputDir = sys.argv[1]
	outputDir = sys.argv[2]

	inputFileList = os.listdir(inputDir)

	filePathArgs = []

	metrics = ['edits', 'hours', 'bytes']

	for metric in metrics:
		try:
			os.mkdir(outputDir + '/' + metric + '/')
		except OSError as e:
			print e.strerror
	
	for inputFileName in inputFileList:
		
		outputPaths = {}

		inputFilePath = inputDir+'/'+inputFileName

		for metric in metrics:
			outputPaths[metric] = outputDir + '/' + metric + '/' + inputFileName
		
		if os.path.isfile(inputFilePath):
			filePathArgs.append( (inputFilePath, outputPaths) )

	workerPool = multiprocessing.Pool(NUM_WORKERS)
	workerPool.map_async(processFile, filePathArgs, TASK_PARTITION_SIZE)
	workerPool.close()
	workerPool.join()
			
def processFile(filePathArgs):
	
	try:
		inputFilePath = filePathArgs[0]
		outputPaths = filePathArgs[1]

		print timestamp() + " Starting processing for " + inputFilePath + " to " + str(outputPaths)

		inputFileHandle = open(inputFilePath,'r')

		outputs = {}
		for metric in outputPaths:
			outputs[metric] = open(outputPaths[metric], 'w')

		translate(inputFileHandle, outputs)
	
		inputFileHandle.close()
		for metric in outputs:
			outputs[metric].close()

		print timestamp() + " Finished processing for " + inputFilePath + " in " + str(outputPaths)
	
	except:
		# There is a problem where if a child process encounters an error or exception, the traceback gets written
		# to stderr but not flushed, so you never see it. This code fixes that problem by pushing it to stdout, where it is 
		# piped back to the main process through the pool.
		traceback.print_exc(limit=3, file=sys.stdout)
		sys.stdout.flush()

def translate(inputFileHandle, outputs):

	last = None
	next = None

	count = {}

	for line in inputFileHandle:

		next = parseLine(line)

		if last is None:
			last = next
			starttime = last['timestamp']

		if next['username'] != last['username']:
			printUserData(last, count, outputs)
			starttime = next['timestamp']
			count = {}

		offset = next['offset']
		
		# print "offset: "+str(offset)+", starttime: "+starttime+", fields: "+ str(next)

		if offset not in count:
			count[offset] = initEditCount()

		count[offset][next['namespace']]['edits'] += 1
		count[offset][next['namespace']]['hours'] += next['duration'] / float(3600)
		count[offset][next['namespace']]['bytes'] += abs(next['bytes changed'])

		last = next

	# Print the data for the last user in the list
	printUserData(last, count, outputs)


def printUserData(userdata, count, outputs):
	

	for offset in sorted(count):
		editsout = [userdata['username'], str(userdata['cohort']), str(userdata['lifespan']), str(offset), str(userdata['cohort'] + offset)]
		hoursout = [userdata['username'], str(userdata['cohort']), str(userdata['lifespan']), str(offset), str(userdata['cohort'] + offset)]
		bytesout = [userdata['username'], str(userdata['cohort']), str(userdata['lifespan']), str(offset), str(userdata['cohort'] + offset)]

		for namespace in sorted(ns.used_namespaces()):

			editsout.append(str(count[offset][namespace.name]['edits']))
			hoursout.append(str(count[offset][namespace.name]['hours']))
			bytesout.append(str(count[offset][namespace.name]['bytes']))


		outputs['edits'].write('\t'.join(editsout) + '\n')
		outputs['hours'].write('\t'.join(hoursout) + '\n')
		outputs['bytes'].write('\t'.join(bytesout) + '\n')


def initEditCount():
	count = {}

	for namespace in ns.used_namespaces():
		
		count[namespace.name] = {}

		for metric in ['edits', 'bytes', 'hours']:	

			count[namespace.name][metric] = 0.0

	return count


def parseLine(line):
	
	tokens = line.strip().split('\t')

	fields = {}

	fields['username'] = tokens[0]
	fields['timestamp'] = tokens[1]
	fields['offset'] = int(tokens[2])
	fields['lifespan'] = int(tokens[3])
	fields['cohort'] = int(tokens[4])
	fields['namespace'] = tokens[5]
	fields['revert status'] = tokens[6]
	fields['duration'] = float(tokens[7])
	fields['bytes added'] = int(tokens[8])
	fields['bytes removed'] = int(tokens[9])
	fields['bytes changed'] = int(tokens[10])

	return fields		

def timestamp():
	
	return datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S')

# Protects against the script being loaded and run recursively in the child process
# Did not seem to be a problem, but better safe than sorry
if __name__ == '__main__':
	main()
