"""

Reads in a list of entries, each representing an edit, of tab-separated 7-tuples of 
	Username, namespace, inserted bytes, deleted bytes, overall changed bytes (not absolute value), timestamp, revert status (revert, reverted, or no reversion indicated by no token)
Gives an output of entries, representing an inter-edit session of tab-separated 10-tuples of
	Username, inter-edit time (seconds), namespace, inserted bytes, deleted bytes, overall bytes changed, end timestamp of inter-edit time, user entry year, offset year, reversion status ('Revert', 'Reverted', 'No reversion')

"""

import os
import sys
import multiprocessing
import datetime
import traceback

def main():

	# The number of worker processes the worker pool will use to process the work queue.
	# Recommend setting this to half the number of available cores to avoid hogging resources.
	# That would be 24 cores on platinum.
	# Once you get above a certain level of CPU usage, I/O throughput will bottleneck anyway.
	NUM_WORKERS = 24
	
	# The number of translation tasks given to each process at a time.
	# Recommend this setting to be 1 for maximum throughput.
	# If I/O throughput ever becomes an issue, increasing this value will make non-CPU bound tasks run more efficiently.
	TASK_PARTITION_SIZE = 1
	
	inputDir = sys.argv[1]
	outputDir = sys.argv[2]

	inputFileList = os.listdir(inputDir)

	filePathArgs = []
	
	for inputFileName in inputFileList:
		
		inputFilePath = inputDir+'/'+inputFileName
		outputFilePath = outputDir+'/'+inputFileName
		
		if os.path.isfile(inputFilePath):
			filePathArgs.append( (inputFilePath, outputFilePath) )

	workerPool = multiprocessing.Pool(NUM_WORKERS)
	workerPool.map_async(processFile, filePathArgs, TASK_PARTITION_SIZE)	
	workerPool.close()
	workerPool.join()
			
def processFile(filePathArgs):
	
	try:
		inputFilePath = filePathArgs[0]
		outputFilePath = filePathArgs[1]

		print timestamp() + " Starting processing for " + inputFilePath + " to " + outputFilePath

		inputFileHandle = open(inputFilePath,"r")
		outputFileHandle = open(outputFilePath, 'a+')

		translate(inputFileHandle, outputFileHandle)
	
		inputFileHandle.close()
		outputFileHandle.close()

		print timestamp() + " Finished processing for " + inputFilePath + " in " + outputFilePath
	
	except:
		traceback.print_exc(limit=3, file=sys.stdout)
		sys.stdout.flush()


def translate(inputFileHandle, outputFileHandle):

	# Number of seconds we will consider to be the interval between two distinct editing sessions
	SESSION_THRESHOLD = 3600

	IGNORE_REVERTS = True
	IGNORE_REVERTED = True

	lastFields = None
	nextFields = None

	for line in inputFileHandle:
		nextFields = parseLine(line)

		if lastFields == None:
			lastFields = nextFields
			startYear = computeYear(lastFields['timestamp'])

		if nextFields['username'] != lastFields['username']:
			startYear = computeYear(nextFields['timestamp'])

		duration = timestampToDate(nextFields['timestamp']) - timestampToDate(lastFields['timestamp'])

		# If the next and last line are edits from the same user
		# And they are not the same edit (i.e. we ignore the first edit)
		# And the edits are not part of two different sessions
		# And the more recent edit is not a revert or reverted, if set appropriately
		if nextFields['username'] == lastFields['username'] and  SESSION_THRESHOLD > duration.total_seconds() > 0 and not ((IGNORE_REVERTS and nextFields['revert status'] == 'Revert') or (IGNORE_REVERTED and nextFields['revert status'] == 'Reverted')):
			offset = computeOffsetYear(startYear, nextFields['timestamp'])
			output = '\t'.join([nextFields['username'], str(duration.total_seconds()), nextFields['namespace'], nextFields['bytes added'], nextFields['bytes removed'], nextFields['bytes changed'], nextFields['timestamp'], startYear, offset, nextFields['revert status']]).replace('\n', '')+'\n'
			outputFileHandle.write(output)

		lastFields = nextFields

def timestamp():
	return datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%SZ')

def timestampToDate(dateString):
	return datetime.datetime.strptime(dateString.strip(), '%Y-%m-%dT%H:%M:%SZ')

def computeYear(dateString):
	return str(timestampToDate(dateString).year)

def computeOffsetYear(startYear, dateString):
	return str(int(computeYear(dateString)) - int(startYear))

def parseLine(line):

	tokens = line.split('\t')
	
	fields = {}

	fields['username'] = tokens[0]
	fields['namespace'] = tokens[1]
	fields['bytes added'] = tokens[2]
	fields['bytes removed'] = tokens[3]
	fields['bytes changed'] = tokens[4]
	fields['timestamp'] = tokens[5]
	
	if len(tokens) < 7:
		fields['revert status'] = 'No reversion'
	else :
		revert = tokens[6].strip(' \t\n\r')	
		if revert == 'Revert':
			fields['revert status'] = revert
		elif revert == 'Reverted':
			fields['revert status'] = revert
		elif revert == '':
			fields['revert status'] = 'No reversion'
		else:
			fields['revert status'] = 'Bad reversion token'
			print 'Poorly formed reversion data, non-expected token found: '+revert
	
	return fields

if __name__ == '__main__':
	main()
