"""

This script is for the post-reu paper work.

It goes through the list of edits with byte data in the editbytes extraction, and for each edit, 
adds offset year of edit, lifespan of user, cohort of user, and the inter-edit time calculated 
exactly, or with our model in the case of the first edit in a session. So for each line in the 
editbytes extraction, it prints an 11-tuple of 
(username, timestamp, offset, lifespan, cohort, namespace, revert status, inter-edit time, bytes added, bytes removed, bytes changed)

"""

import os
import sys
import multiprocessing
import datetime
import traceback

import wp_datetime as dt
import wp_namespace as ns
import wp_model as md

def main():

	# The number of worker processes the worker pool will use to process the work queue.
	# Recommend setting this to half the number of available cores to avoid hogging resources.
	# That would be 24 cores on platinum.
	# Once you get above a certain level of CPU usage, I/O throughput will bottleneck anyway.
	NUM_WORKERS = 24
	
	# The number of translation tasks given to each process at a time.
	# Recommend this setting to be 1 for maximum throughput.
	# If I/O throughput ever becomes an issue, increasing this value will make non-CPU bound tasks run more efficiently.
	TASK_PARTITION_SIZE = 1
	
	
	inputDir = sys.argv[1]
	global outputDir
	outputDir = sys.argv[2]
	inputFileList = os.listdir(inputDir)

	filePathArgs = []
	
	for inputFileName in inputFileList:
		
		inputFilePath = inputDir+'/'+inputFileName
		outputFilePath = outputDir+'/'+inputFileName
		
		if os.path.isfile(inputFilePath):
			filePathArgs.append( (inputFilePath, outputFilePath) )
	results = []

	workerPool = multiprocessing.Pool(NUM_WORKERS)
	workerPool.map_async(processFile, filePathArgs, TASK_PARTITION_SIZE, callback = results.extend)
	workerPool.close()
	workerPool.join()

	stats = {}

	for result in results:
		for cohort in result:
			if cohort not in stats:
				stats[cohort] = {'censored': 0, 'zombies': 0, 'edits': 0}

			for metric in result[cohort]:
				stats[cohort][metric] += result[cohort][metric]

                

	statsFile = open(outputDir + '/' + 'corrected.stat', 'w')

        censoredout = ['Censored Users By Cohort\n']
        zombieout = ['Zombie Users by Cohort\n']
        editsout = ['Removed Edits by Cohort\n']

        for cohort in sorted(stats):

        	censoredout.append(str(cohort) + '\t' + str(stats[cohort]['censored']) + '\n')
                zombieout.append(str(cohort) + '\t' + str(stats[cohort]['zombies']) + '\n')
                editsout.append(str(cohort) + '\t' + str(stats[cohort]['edits']) + '\n')

	statsFile.write('\n'.join(["".join(censoredout), "".join(zombieout), "".join(editsout)]) + '\n')
		
			
def processFile(filePathArgs):
	
	try:
		inputFilePath = filePathArgs[0]
		outputFilePath = filePathArgs[1]

		print timestamp() + " Starting processing for " + inputFilePath + " to " + outputFilePath

		inputFileHandle = open(inputFilePath,"r")
		outputFileHandle = open(outputFilePath, 'a+')

		stats = translate(inputFileHandle, outputFileHandle)
	
		inputFileHandle.close()
		outputFileHandle.close()

		print timestamp() + " Finished processing for " + inputFilePath + " in " + outputFilePath
	
		return stats

	except:
		# There is a problem where if a child process encounters an error or exception, the traceback gets written
		# to stderr but not flushed, so you never see it. This code fixes that problem by pushing it to stdout, where it is 
		# piped back to the main process through the pool.
		traceback.print_exc(limit=5, file=sys.stdout)
		sys.stdout.flush()
		return None

def translate(inputFileHandle, outputFileHandle):

	CORRECT_ACTIVITY = True

	userData = []
	stats = {}

	for line in inputFileHandle:
		fields = parseLine(line)

		if userData and fields['username'] != userData[0]['username']:
			processUser(userData, outputFileHandle, stats, CORRECT_ACTIVITY)
			userData = []

		userData.append(fields)

	 # Get the very last user that won't get processed in the loop
	processUser(userData, outputFileHandle, stats, CORRECT_ACTIVITY)

	return stats	

def processUser(userData, outputFileHandle, stats=None, correct = False):

	SESSION_THRESHOLD = 3600

	ACTIVE_MONTHLY_EDITS = 5

	if correct:
		corrected = correctActivity(userData, ACTIVE_MONTHLY_EDITS, stats)
		if corrected:
			addWork(corrected, outputFileHandle, SESSION_THRESHOLD)
	else:
		addWork(userData, outputFileHandle, SESSION_THRESHOLD)

	return stats

def correctActivity(userData, monthly_edits, stats):
	"""
	A note about date comparison in this method. This whole script runs really slowly, so datetime has been taken out of this section.
	So the comparisons for exired users, and for what consitutes a month (the same date in the previous month) is not always exactly the same.
	The comparisons used in wp_datetime are agnostic to whether or not dates actually exist; if a number representinf a date would be written
	lexicographically after another, then it comes chronologically after it. So for instance, dates with eap years may exist at will in this section.

	There is some serious date math going on in here, watch out.
	"""
	correct = []
	censored = []

	last_month = []

	last_alive = None

	alive = None
	died = None

	zombie = False

	last = None

	for latest in userData:


		# Sanity check
		if last:
			assert latest['timestamp'] >= last['timestamp'], "Unsorted user data in correction: latest "+latest['timestamp']+' is earlier than last '+last['timestamp']

		# Find out the date of a month ago
		year = int(latest['timestamp'][0:4])
		month = int(latest['timestamp'][5:7])
		if month == 1:
			month = '12'
			year -= 1
			year = str(year)
		else:
			month -= 1
                        if month < 10:
				month = '0'+str(month)	
                     	else:
				month = str(month)
			year = str(year)


		monthago = year+'-'+month+latest['timestamp'][7:]


		"""
		if latest['username'] == 'Arzdb':
			print "ENTRY monthago latest "+str(latest)+" monthago "+monthago
		"""	

		old = []
		lastpast = None
		for past in last_month:

			if lastpast:
				assert past['timestamp'] >= lastpast['timestamp'], "Unsorted last month in correction: past "+past['timestamp']+"is earlier than lastpast "+lastpast['timestamp']
			lastpast = past

			#if dt.before_fast(past['timestamp'], monthago):
			if past['timestamp'] <= monthago:
				old.append(past)
			else:
				break

		for item in old:
			last_month.remove(item)

		"""
		if latest['username'] == 'Arzdb':
			for entry in last_month:
				print "ENTRY last month latest "+str(latest) + " lm "+ str(entry)
		"""


		last_month.append(latest)

		if alive and not died:

			"""
			if latest['username'] == 'Arzdb':
				print "ENTRY main loop " + str(latest)
			"""
			

			correct.append(latest)
		else:
			censored.append(latest)

		if len(last_month) >= monthly_edits:


			"""
			if latest['username'] == 'Arzdb':
				print "ENTRY enter active branch "+str(latest)
			"""

						
			if alive is None and died is None:
				alive = last_month[0]['timestamp']

				"""
				if latest['username'] == 'Arzdb':
					for entry in last_month:
						print "ENTRY birth "+str(entry)
				"""

				correct.extend(last_month)
				for item in last_month:
					censored.remove(item)
                       	
			if alive and not died:
				last_alive = latest



			if died:
				zombie = True

		else:

			"""
			if latest['username'] == 'Arzdb':
				print "ENTRY enter inactive branch "+str(latest)
			"""


			if alive and not died:

				year = str(int(latest['timestamp'][0:4]) - 1)
				exp_date = year+latest['timestamp'][4:]

				#if dt.before_fast(last_alive['timestamp'], exp_date):

				if last_alive['timestamp'] <= exp_date:
					year = str(int(last_alive['timestamp'][0:4]) + 1)
					died = year+last_alive['timestamp'][4:]

	
					"""
					if latest['username'] == 'Arzdb':
						print "ENTRY remove death "+str(latest)
					"""					

					correct.remove(latest)
					censored.append(latest)



		last = latest


        cohort = dt.year_fast(userData[0]['timestamp'])

	if len(censored) > 0:
		if cohort not in stats:
			stats[cohort] = {'zombies': 0, 'censored': 0, 'edits': 0}

		stats[cohort]['censored'] += 1
		if zombie:
			stats[cohort]['zombies'] += 1
		stats[cohort]['edits'] += len(censored)



	return correct



def addWork(userData, outputFileHandle, threshold):

        firstedit = userData[0]['timestamp']

        cohort = dt.year_fast(firstedit)
        lifespan = dt.offsetyear_fast(firstedit, userData[-1]['timestamp'])

        lastEntry = None


        for entry in userData:

                entry['offset'] = dt.offsetyear_fast(firstedit, entry['timestamp'])

                if lastEntry:
                        interduration = dt.duration(lastEntry['timestamp'], entry['timestamp']).total_seconds()

                if lastEntry is None or interduration >= threshold:
                        duration = md.calculate(entry['namespace'], entry['bytes added'], entry['bytes removed'], cohort, entry['offset'])
                else:   
                        duration = interduration

                entry['duration'] = duration


                output = [entry['username'], entry['timestamp'], str(entry['offset']), str(lifespan), str(cohort), entry['namespace'], entry['revert status'], str(entry['duration']), entry['bytes added'], entry['bytes removed'], entry['bytes changed']]

                outputFileHandle.write('\t'.join(output) + '\n')

                lastEntry = entry

def parseLine(line):

	tokens = line.strip().split('\t')

	fields = {}
	fields['username'] = tokens[0]
	fields['namespace'] = tokens[1]
	fields['bytes added'] = tokens[2]
	fields['bytes removed'] = tokens[3]
	fields['bytes changed'] = tokens[4]
	fields['timestamp'] = tokens[5]
	
	if len(tokens) < 7:
		fields['revert status'] = 'no reversion'
	else:
		fields['revert status'] = tokens[6].lower()

	return fields

def timestamp():
	
	return datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S')

# Protects against the script being loaded and run recursively in the child process
# Did not seem to be a problem, but better safe than sorry
if __name__ == '__main__':
	main()
