"""

This is a template for creating a translation or other processing task that can effectively utilize the massively parallel resources 
e.g. on platinum or titanium.

There are some restrictions to writing jobs to use this template.

The input must be in separate files, which will each be processed in parallel by a worker process. If your input is only in one file, 
consider splitting it up between multiple files if it still makes sense in isolation.

The output will be put into separate files, one for each input file and worker process. So in other words, if you have n input files, each 
will be processed by one process, and you will end up with n output files. The output files must be completely separate. Writing to 
multiple output files in parallel will have undefined behavior unless you use some sort of concurrency control, which is not 
implemented here.

Specify a directory where the input files are, and an output directory where the output files will be written to from the command line arguments.

Change the NUM_WORKERS variable to determine how many worker processes the script will use for processing. Input tasks will be fed into 
these worker processes as they finish pending tasks.

Change the TASK_PARTITION_SIZE variable to determine how many tasks are given to a worker process.

"""

import wp_datetime as dtime
import os
import sys
import multiprocessing
import datetime
import traceback
import collections

def main():
	start = timestamp()
	# The number of worker processes the worker pool will use to process the work queue.
	# Recommend setting this to half the number of available cores to avoid hogging resources.
	# That would be 24 cores on platinum.
	# Once you get above a certain level of CPU usage, I/O throughput will bottleneck anyway.
	NUM_WORKERS = 24
	
	# The number of translation tasks given to each process at a time.
	# Recommend this setting to be 1 for maximum throughput.
	# If I/O throughput ever becomes an issue, increasing this value will make non-CPU bound tasks run more efficiently.
	TASK_PARTITION_SIZE = 1
	filesRemoved = False
	
	inputDir = sys.argv[1]
	outputDir = sys.argv[2]
	
	global NS
	NS = str(sys.argv[3])	
	global Y
	Y = str(sys.argv[4])
	
	inputFileList = os.listdir(inputDir)

	for ffile in os.listdir(outputDir):
		os.remove(outputDir+ffile)
		filesRemoved =True
	if filesRemoved:
		print "Previous files have been deleted."

	filePathArgs = []
	
	for inputFileName in inputFileList:
		
		inputFilePath = inputDir+'/'+inputFileName
		outputFilePath = outputDir+'/'+inputFileName
		
		if os.path.isfile(inputFilePath):
			filePathArgs.append( (inputFilePath, outputFilePath) )

	workerPool = multiprocessing.Pool(NUM_WORKERS)
	workerPool.map_async(processFile, filePathArgs, TASK_PARTITION_SIZE)
	workerPool.close()
	workerPool.join()

	totalDict = collections.defaultdict(int)
	total = 0
	totalCheck = 0
	totals = collections.defaultdict(int)

	for ffile in os.listdir(outputDir):
		f = open(outputDir+ffile,"r")
		for line in f:
			tokens = line.split('\t')
			totalDict[tokens[0]] += int(tokens[1])
			totals[tokens[0].split(' ')[0]] += int(tokens[1])
	end = timestamp()
	#print total
	for key in sorted(totalDict.keys()):
		print key + ": " + str(float(totalDict[key])/totals[key.split(' ')[0]]) + " " + str(totalDict[key])
	print str(total)
	print start, end
			
def processFile(filePathArgs):
	
	try:
		inputFilePath = filePathArgs[0]
		outputFilePath = filePathArgs[1]

		print timestamp() + " Starting processing for " + inputFilePath + " to " + outputFilePath

		inputFileHandle = open(inputFilePath,"r")
		outputFileHandle = open(outputFilePath, 'a+')

		translate(inputFileHandle, outputFileHandle)
	
		inputFileHandle.close()
		outputFileHandle.close()

		#print timestamp() + " Finished processing for " + inputFilePath + " in " + outputFilePath
	
	except:
		# There is a problem where if a child process encounters an error or exception, the traceback gets written
		# to stderr but not flushed, so you never see it. This code fixes that problem by pushing it to stdout, where it is 
		# piped back to the main process through the pool.
		traceback.print_exc(limit=3, file=sys.stdout)
		sys.stdout.flush()

def translate(inputFileHandle, outputFileHandle):
	
	aDict = collections.defaultdict(int)
	userDict = collections.defaultdict(str)
	total = 0
	for line in inputFileHandle:
		tokens = line.split('\t')
		user = tokens[0].strip()
		tenure = userDict[user]		
		namespace = tokens[2]

		if namespace == NS:
			startyear = tokens[3].split('-')[0]

			if startyear == Y:
				offset = str(dtime.offsetyear_fast(tokens[3],tokens[1]))

				if tenure == "":
					userDict[user] = str(dtime.offsetyear_fast(tokens[3],tokens[4]))
					tenure = userDict[user]
				aDict[offset+" "+tenure] += 1
			
	for key in sorted(aDict.keys()):
		outputFileHandle.write(str(key) + "\t" + str(aDict[key])+"\n")
	#outputFileHandle.write(str(total))
	#outputFileHandle.write("total = " + str(len(userList)))

def timestamp():
	
	return datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%SZ')

# Protects against the script being loaded and run recursively in the child process
# Did not seem to be a problem, but better safe than sorry
if __name__ == '__main__':
	main()
