"""

Reads in a list of entries, each representing an edit, of tab-separated 7-tuples of 
	Username, namespace, inserted bytes, deleted bytes, overall changed bytes (not absolute value), timestamp, revert status (revert, reverted, or no reversion indicated by no token)
Gives an output of entries, representing an inter-edit session of tab-separated 10-tuples of
	Username, inter-edit time (seconds), namespace, inserted bytes, deleted bytes, overall bytes changed, end timestamp of inter-edit time, user entry year, offset year, reversion status ('Revert', 'Reverted', 'No reversion')

"""

import os
import sys
import multiprocessing
import datetime
import traceback
import math
import collections

def main():

	# The number of worker processes the worker pool will use to process the work queue.
	# Recommend setting this to half the number of available cores to avoid hogging resources.
	# That would be 24 cores on platinum.
	# Once you get above a certain level of CPU usage, I/O throughput will bottleneck anyway.
	NUM_WORKERS = 24
	
	# The number of translation tasks given to each process at a time.
	# Recommend this setting to be 1 for maximum throughput.
	# If I/O throughput ever becomes an issue, increasing this value will make non-CPU bound tasks run more efficiently.
	TASK_PARTITION_SIZE = 1
	
	inputDir = sys.argv[1]
	outputDir = sys.argv[2]

	inputFileList = os.listdir(inputDir)

	filePathArgs = []
	
	for inputFileName in inputFileList:
		
		inputFilePath = inputDir+'/'+inputFileName
		outputFilePath = outputDir+'/'+inputFileName
		
		if os.path.isfile(inputFilePath):
			filePathArgs.append( (inputFilePath, outputFilePath) )

	workerPool = multiprocessing.Pool(NUM_WORKERS)
	workerPool.map_async(processFile, filePathArgs, TASK_PARTITION_SIZE)	
	workerPool.close()
	workerPool.join()

	outputFileList = os.listdir(outputDir)

	final = open(outputDir+"FINAL.txt", "a")

	totalDict = collections.defaultdict(float)
	total = 0

	for ofn in outputFileList:
		f = open(outputDir+"/"+ofn, 'r')
		for line in f:
			tokens = line.split('\t')
			name = tokens[0]
			value = tokens[1]
			totalDict[name] += float(value)
			totalDict[name[-5:]] += float(value)
			totalDict[name[:-5]] += float(value)
			total  += float(value)
	for key in sorted(totalDict.keys()):
		if len(key) > 5 and "2" in key:
			final.write(key + "\t" + str(totalDict[key]/totalDict[key[-5:]])+"\n")
		if "2" not in key:
			final.write(key + "\t" + str(totalDict[key]/total) + "\n")
			print total
			
def processFile(filePathArgs):
	
	try:
		inputFilePath = filePathArgs[0]
		outputFilePath = filePathArgs[1]

		print timestamp() + " Starting processing for " + inputFilePath + " to " + outputFilePath

		inputFileHandle = open(inputFilePath,"r")
		outputFileHandle = open(outputFilePath, 'a+')

		translate(inputFileHandle, outputFileHandle)
	
		inputFileHandle.close()
		outputFileHandle.close()

		print timestamp() + " Finished processing for " + inputFilePath + " in " + outputFilePath
	
	except:
		traceback.print_exc(limit=3, file=sys.stdout)
		sys.stdout.flush()


def translate(inputFileHandle, outputFileHandle):

	# Number of seconds we will consider to be the interval between two distinct editing sessions
	SESSION_THRESHOLD = 3600

	IGNORE_REVERTS = True
	IGNORE_REVERTED = True

	lastFields = None
	nextFields = None

	totalDict = collections.defaultdict(float)

	for line in inputFileHandle:
		nextFields = parseLine(line)
		cohort = nextFields['cohort']
		offset = nextFields['offset']
		ns = nextFields['namespace']
		totalDict[ns+cohort+offset] += float(nextFields['duration'])
		
		try:
			tokens = line.split('\t')
			something = tokens[9]
		except:
			"DATA MALFUNCTION. REPEAT. DATA MALFUNCTION."


	for key in sorted(totalDict.keys()):
		outputFileHandle.write(key + "\t" + str(totalDict[key]) + "\n") 
		

def timestamp():
	return datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%SZ')

def timestampToDate(dateString):
	return datetime.datetime.strptime(dateString.strip(), '%Y-%m-%dT%H:%M:%SZ')

def computeYear(dateString):
	return str(timestampToDate(dateString).year)

def computeOffsetYear(startYear, dateString):
	return str(int(computeYear(dateString)) - int(startYear))

def parseLine(line):

	tokens = line.split('\t')
	
	fields = {}

	fields['username'] = tokens[0]
	fields['duration'] = tokens[1]
	fields['namespace'] = tokens[2]
	fields['bytes added'] = tokens[3]
	fields['bytes removed'] = tokens[4]
	fields['bytes changed'] = tokens[5]
	fields['timestamp'] = tokens[6]
	fields['cohort'] = tokens[7]
	fields['offset'] = tokens[8]
	
	
	return fields

def getNumber(n):
	if n == "main":
		return 0
	elif n == "talk":
		return -36.757
	elif n == "book":
		return -27.0394
	elif n == "book talk":
		return -117.5718
	elif n == "category":
		return -95.4098
	elif n == "category talk":
		return -163.2207
	elif n == "file":
		return -45.6593
	elif n == "file talk":
		return -125.0978
	elif n == "help":
		return 77.6596  
	elif n == "help talk":
		return 82.3328  
	elif n == "mediawiki":
		return 116.2143
	elif n == "mediawiki talk":
		return 17.6563
	elif n == "portal":
		return -39.1726
	elif n == "portal talk":
		return -102.1312
	elif n == "project":
		return -0.1303
	elif n == "project talk":
		return 47.1360
	elif n == "template":
		return -24.9224
	elif n == "template talk":
		return -15.2166 
	elif n == "user":
		return 8.5755
	elif n == "user talk":
		return -103.8151 

if __name__ == '__main__':
	main()


	
