"""

This script will take a list of user edits with byte data formatted like, one line per edit, the output of week7/editPrep.py

And output statistics about

amount of work, broken down by kittur's methodology, by date, one for each edit count, time, and bytes changed
proportion of work, broken down by kittur's methodology, by date, one for each edit count, time, and bytes changed

overall statistics about
	number of users by cohort, lifespan,
	

"""

import os
import sys
import datetime

import wp_datetime as dt
import wp_namespace as ns

def main():

        inputDir = sys.argv[1]
        outputDir = sys.argv[2]

        inputFileList = os.listdir(inputDir)

	START_COHORT = 2003
	END_COHORT = 2012

	END_YEAR = 2012

	users = {}
	totalusers = {}
	for year in range(START_COHORT, END_COHORT + 1):

		users[year] = {}
		totalusers[year] = 0

		for offset in range (0, END_YEAR - year + 1):

			users[year][offset] = 0

	work = {}
	totalwork = {'edits': 0, 'hours': 0.0, 'bytes': 0}

        for inputFileName in inputFileList:

                inputFilePath = inputDir+'/'+inputFileName

                if os.path.isfile(inputFilePath):
                        processFile(inputFilePath, users, totalusers, work, totalwork)

	
	allusers = 0
	for cohort in totalusers:
		allusers += totalusers[cohort]

	user_prop = {}
	for cohort in sorted(users):
		user_prop[cohort] = {}

		for lifespan in sorted(users[cohort]):
			if users[cohort][lifespan] == 0:
				user_prop[cohort][lifespan] = 0
			else:
				user_prop[cohort][lifespan] = float(users[cohort][lifespan]) / float(allusers)

	work_prop = {}
	for date in sorted(work):
		work_prop[date] = {}
		datetotal = {'edits': 0, 'bytes': 0, 'hours': 0.0}

		for item in sorted(work[date]):
			for metric in sorted(work[date][item]):
				datetotal[metric] += work[date][item][metric]		

		for item in sorted(work[date]):
			work_prop[date][item] = {}
	
			for metric in sorted(work[date][item]):
				if datetotal[metric] == 0:
					work_prop[date][item][metric] = 0
				else:
					work_prop[date][item][metric] = float(work[date][item][metric]) / float(datetotal[metric])
			

	user_count_file = open(outputDir + '/' + 'user_count.stat', 'w')
	user_prop_file = open(outputDir + '/' + 'user_proportion.stat', 'w')

	edit_count_file = open(outputDir + '/' + 'edit_count.stat', 'w')
        edit_prop_file = open(outputDir + '/' + 'edit_prop.stat', 'w')
        hour_count_file = open(outputDir + '/' + 'hour_count.stat', 'w')
        hour_prop_file = open(outputDir + '/' + 'hour_prop.stat', 'w')
        byte_count_file = open(outputDir + '/' + 'byte_count.stat', 'w')
        byte_prop_file = open(outputDir + '/' + 'byte_prop.stat', 'w')



	count_header = ['Cohort', 'Lifespan', 'User Count']
	prop_header = ['Cohort', 'Lifespan', 'User Proportion']
	user_count_file.write('\t'.join(count_header) + '\n')
	user_prop_file.write('\t'.join(prop_header) + '\n')
        for cohort in sorted(users):
                for lifespan in sorted(users[cohort]):
                        user_count_file.write('\t'.join([str(cohort), str(lifespan), str(users[cohort][lifespan])]) + '\n')
			user_prop_file.write('\t'.join([str(cohort), str(lifespan), str(user_prop[cohort][lifespan])]) + '\n')

	types = ['article', 'article talk', 'user', 'user talk', 'other', 'maintenance']
	header = ['Date']
	typenames = ['Article', 'Article Talk', 'User', 'User Talk', 'Other', 'Maintenance']
	header.extend(typenames)
        edit_count_file.write('\t'.join(header) + '\n')
	edit_prop_file.write('\t'.join(header) + '\n')
	hour_count_file.write('\t'.join(header) + '\n')
	hour_prop_file.write('\t'.join(header) + '\n')
	byte_count_file.write('\t'.join(header) + '\n')
	byte_prop_file.write('\t'.join(header) + '\n')
	for date in sorted(work):
		editcountout = [date]
		editpropout = [date]
		hourcountout = [date]
		hourpropout = [date]
		bytecountout = [date]
		bytepropout = [date]
                
		for item in types:
			editcountout.append(str(work[date][item]['edits']))
			editpropout.append(str(work_prop[date][item]['edits']))
			hourcountout.append(str(work[date][item]['hours']))
			hourpropout.append(str(work_prop[date][item]['hours']))
			bytecountout.append(str(work[date][item]['bytes']))
			bytepropout.append(str(work_prop[date][item]['bytes']))
		
		
		edit_count_file.write('\t'.join(editcountout) + '\n')
		edit_prop_file.write('\t'.join(editpropout) + '\n')
		hour_count_file.write('\t'.join(hourcountout) + '\n')
		hour_prop_file.write('\t'.join(hourpropout) + '\n')
		byte_count_file.write('\t'.join(bytecountout) + '\n')
		byte_prop_file.write('\t'.join(bytepropout) + '\n')


        user_count_file.close()
        user_prop_file.close()

        edit_count_file.close()
        edit_prop_file.close()
        hour_count_file.close()
        hour_prop_file.close()
        byte_count_file.close()
        byte_prop_file.close()



def processFile(inputFilePath, users, totalusers, work, totalwork):

	print(timestamp() + " Processing " + inputFilePath)

	inputFileHandle = open(inputFilePath, 'r')

	lastline = None
	for line in inputFileHandle:

		fields = parseLine(line)

		# Update the users by cohort, lifespan count
		if not lastline or fields['username'] != lastline['username']:
			users[int(fields['cohort'])][int(fields['lifespan'])] += 1
			totalusers[int(fields['cohort'])] += 1

		#date = simpleDate(fields['timestamp'])
		date = str(int(fields['cohort']) + int(fields['offset']))	# only use year...
		if not date in work:
			initWorkTable(work, date)

		classification = classifyWork(fields)
		
		work[date][classification]['edits'] += 1
		totalwork['edits'] += 1

		work[date][classification]['hours'] += float(fields['duration'])/float(3600)
		totalwork['hours'] += float(fields['duration'])/float(3600)

		work[date][classification]['bytes'] += abs(int(fields['bytes changed']))
		totalwork['bytes'] += abs(int(fields['bytes changed']))

		lastline = fields
		

	print(timestamp() + " Finished processing " + inputFilePath)

def classifyWork(fields):

	if fields['revert status'] == 'revert':
		return 'maintenance'
	elif fields['namespace'] == 'main':
		return 'article'
	elif fields['namespace'] == 'talk':
		return 'article talk'
	elif fields['namespace'] == 'user':
		return 'user'
	elif fields['namespace'] == 'user talk':
		return 'user talk'
	else:
		return 'other'

def parseLine(line):

	tokens = line.strip().split('\t')

	fields = {}
	fields['username'] = tokens[0]
	fields['timestamp'] = tokens[1]
	fields['offset'] = tokens[2]
	fields['lifespan'] = tokens[3]
	fields['cohort'] = tokens[4]
	fields['namespace'] = tokens[5]
	fields['revert status'] = tokens[6]
	fields['duration'] = tokens[7]
	fields['bytes added'] = tokens[8]
	fields['bytes removed'] = tokens[9]
	fields['bytes changed'] = tokens[10]

	return fields

def initWorkTable(work, date):
	"""
	Inits a count for a date in the work table. Must be the simple date returned from simpleDate(), not a timestamp.
	"""
	
	work[date] = {}
	work[date]['article'] = {}
	work[date]['article talk'] = {}
	work[date]['user'] = {}
	work[date]['user talk'] = {}
	work[date]['other'] = {}
	work[date]['maintenance'] = {}

	for item in work[date]:
		work[date][item]['edits'] = 0
		work[date][item]['hours'] = 0.0
		work[date][item]['bytes'] = 0
	

def simpleDate(timestamp):

	return timestamp.split('T')[0]

def timestamp():

        return datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S')


if __name__ == '__main__':
        main()
