"""
newRawEdits.py

calculates raw edits for namespace, cohort, offset parallely

"""

import os
import sys
import multiprocessing
import datetime
import traceback
import math
import collections
import wp_datetime as dtime

def main():

	# The number of worker processes the worker pool will use to process the work queue.
	# Recommend setting this to half the number of available cores to avoid hogging resources.
	# That would be 24 cores on platinum.
	# Once you get above a certain level of CPU usage, I/O throughput will bottleneck anyway.
	NUM_WORKERS = 24
	
	# The number of translation tasks given to each process at a time.
	# Recommend this setting to be 1 for maximum throughput.
	# If I/O throughput ever becomes an issue, increasing this value will make non-CPU bound tasks run more efficiently.
	TASK_PARTITION_SIZE = 1
	
	inputDir = sys.argv[1]
	outputDir = sys.argv[2]
	removed = False

	inputFileList = os.listdir(inputDir)

	for ffile in os.listdir(outputDir):
		os.remove(outputDir+ffile)
		removed = True
	if removed:
		print "Files removed."

	filePathArgs = []
	
	for inputFileName in inputFileList:
		
		inputFilePath = inputDir+'/'+inputFileName
		outputFilePath = outputDir+'/'+inputFileName
		
		if os.path.isfile(inputFilePath):
			filePathArgs.append( (inputFilePath, outputFilePath) )

	workerPool = multiprocessing.Pool(NUM_WORKERS)
	workerPool.map_async(processFile, filePathArgs, TASK_PARTITION_SIZE)	
	workerPool.close()
	workerPool.join()

	outputFileList = os.listdir(outputDir)

	final = open(outputDir+"FINALrawedits.txt", "a")

	totalDict = collections.defaultdict(int)
	total = 0

	for ofn in outputFileList:
		f = open(outputDir+"/"+ofn, 'r')
		for line in f:
			tokens = line.split('\t')
			name = tokens[0]
			value = tokens[1]
			totalDict[name] += int(value)
			
	for key in sorted(totalDict.keys()):
		final.write(key + "\t" + str(totalDict[key]) + "\n")
			
def processFile(filePathArgs):
	
	try:
		inputFilePath = filePathArgs[0]
		outputFilePath = filePathArgs[1]

		print timestamp() + " Starting processing for " + inputFilePath + " to " + outputFilePath

		inputFileHandle = open(inputFilePath,"r")
		outputFileHandle = open(outputFilePath, 'a+')

		translate(inputFileHandle, outputFileHandle)
	
		inputFileHandle.close()
		outputFileHandle.close()

		print timestamp() + " Finished processing for " + inputFilePath + " in " + outputFilePath
	
	except:
		traceback.print_exc(limit=3, file=sys.stdout)
		sys.stdout.flush()


def translate(inputFileHandle, outputFileHandle):

	# Number of seconds we will consider to be the interval between two distinct editing sessions
	SESSION_THRESHOLD = 3600

	IGNORE_REVERTS = True
	IGNORE_REVERTED = True

	lastFields = None
	nextFields = None

	totalDict = collections.defaultdict(int)

	for line in inputFileHandle:
		tokens = line.split('\t')
		cohort = tokens[3].split('-')[0]
		offset = dtime.offsetyear_fast(tokens[3], tokens[1])
		ns = tokens[2]
		totalDict[ns+cohort+str(offset)] += 1
		


	for key in sorted(totalDict.keys()):
		outputFileHandle.write(key + "\t" + str(totalDict[key]) + "\n") 
		

def timestamp():
	return datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%SZ')

def timestampToDate(dateString):
	return datetime.datetime.strptime(dateString.strip(), '%Y-%m-%dT%H:%M:%SZ')

def computeYear(dateString):
	return str(timestampToDate(dateString).year)

def computeOffsetYear(startYear, dateString):
	return str(int(computeYear(dateString)) - int(startYear))


if __name__ == '__main__':
	main()


	
