"""

This script takes in files that represent classifications of user-years into different clusters, and aggregates their data into:

	1. Count of users in each cluster by cohort, e.g.
		cohort	c1	c2	c3
		2003	100	10	10
		2004	20	30	30
		...
		2012	30	50	40

	2. Proportion of users in each cluster by cohort, e.g.
		cohort	p1	p2	p3
		2003	0.9	0.05	0.05
		2004	0.8	0.1	0.1
		...
		2012	0.5	0.25	0.25

Each block of statistics gets its own output file.

Specify an input directory to read input files from.
Specify an output directory to write output files to.
Specify the number of clusters in the model. This probably isn't necessary but is a sanity check in case no users are classified to a cluster.

There are lots of parameters for which elements to look for in this script that are not passed as arguments. The defaults here should be good for 
our project.

This script is well commented enough that reading through the code should answer any questions about how these statistics are compiled.

"""

import sys
import os
import datetime

def main():

	inputDir = sys.argv[1]
	outputDir = sys.argv[2]
	numclusters = int(sys.argv[3])


	# Make a list of input files and open them.

	inputFileList = os.listdir(inputDir)
	inputFileHandles = []
	
	for inputFileName in inputFileList:
		
		inputFilePath = inputDir+'/'+inputFileName
		
		if os.path.isfile(inputFilePath):
			inputFileHandles.append(open(inputFilePath, 'r'))


	# Initialize count and proportion by cohort

	cohort = {}			# Address cohort[cohort][cluster]
	proportion_cohort = {}		# Address proportion_cohort[cohort][cluster]

	start_cohort = 2003		# The first cohort we look at in the data, inclusive.
	end_cohort = 2012		# The last cohort we look at in the data, inclusive.

	for cohort_index in range(start_cohort, end_cohort + 1):

		cohort[cohort_index] = {}
		proportion_cohort[cohort_index] = {}

		for cluster_index in range (1, numclusters + 1):

			cohort[cohort_index][cluster_index] = 0
			proportion_cohort[cohort_index][cluster_index] = None


	# Compile counts

	print timestamp() + " Processing input files in " + inputDir

	total_entries = 0		# The total number of entries we look at

	for inputFileHandle in inputFileHandles:

		print timestamp() + " Processing input file " + inputFileHandle.name

		# This keeps track of the data for a single user so we can compute tenure and cohort for entries
		# As a result those counters are a line behind the ones that don't use this field
		# They also need a call after the loop body to fill in the data for the last user, since it breaks on 
		# only changes in username.
		userdata = []

		for line in inputFileHandle:

			fields = parseLine(line)

			
			# Update count by cohort

			if userdata and fields['user'] != userdata[0]['user']:
				updateCohort(cohort, userdata)


			# Reset user data if new user and update with this line if not
			
			if userdata and fields['user'] != userdata[0]['user']:
				userdata = [fields]
			else:
				userdata.append(fields)

			
			# Increment the total entry counter

			total_entries += 1


		# Update information for the last user in the list

		updateCohort(cohort, userdata)


		print timestamp() + " Finished processing input file " + inputFileHandle.name


	print timestamp() + " Finished processing input files in " + inputDir


	# Compute proportions

	print timestamp() + " Compiling proportions"


	# Compute cohort proportion

	cohort_total = 0	# Sanity check to make sure we counted all entries

	for cohort_index in cohort:

		single_cohort_total = 0		# The total for one cohort that we compute the proportion with

		# Figure out the total users in this cohort

		for cluster_index in cohort[cohort_index]:

			cohort_total += cohort[cohort_index][cluster_index]
			single_cohort_total += cohort[cohort_index][cluster_index]

		# Compute the proportion based on total users in this cohort

		for cluster_index in cohort[cohort_index]:

			if single_cohort_total != 0:
				proportion_cohort[cohort_index][cluster_index] = float(cohort[cohort_index][cluster_index]) / float(single_cohort_total)
			else:
				proportion_cohort[cohort_index][cluster_index] = 0

	assert(cohort_total == total_entries), "Cohort count is missing " + str(total_entries - cohort_total) + " entries"


	# Write output files

	print timestamp() + " Writing output files"


	# Write cohort count statistics file
	
	cohort_filename = 'cohort.stat'						# Filename to write cohort count statistics to
	cohort_filepath = outputDir + '/' + cohort_filename

	print timestamp() + " Writing cohort count statistics file to " + cohort_filepath

	cohort_filehandle = open(cohort_filepath, 'w')
	
	for cohort_index in sorted(cohort):

		output = [str(cohort_index)]

		for cluster_index in sorted(cohort[cohort_index]):

			output.append(str(cohort[cohort_index][cluster_index]))

		cohort_filehandle.write('\t'.join(output) + '\n')

	cohort_filehandle.close()

	print timestamp() + " Finished writing cohort count statistics file to " + cohort_filepath

	
	# Write  proportion statistics file

	proportion_cohort_filename = 'cohort_proportion.stat'			# Filename to write cohort proportion statistics to
	proportion_cohort_filepath = outputDir + '/' + proportion_cohort_filename

	print timestamp() + " Writing cohort proportion statistics file to " + proportion_cohort_filepath

	proportion_cohort_filehandle = open(proportion_cohort_filepath, 'w')

	for cohort_index in sorted(proportion_cohort):

		output = [str(cohort_index)]

		for cluster_index in sorted(proportion_cohort[cohort_index]):

			output.append(str(proportion_cohort[cohort_index][cluster_index]))

		proportion_cohort_filehandle.write('\t'.join(output) + '\n')

	proportion_cohort_filehandle.close()

	print timestamp() + " Finished writing cohort proportion statistics file to " + proportion_cohort_filepath


def updateCohort(cohort, userdata):
	usercohort = userdata[0]['year']

	for entry in userdata:
		cohort[usercohort][entry['cluster']] += 1

def parseLine(line):
	
	tokens = line.strip().split('\t')

	fields = {}
	fields['user'] = tokens[0]
	fields['offset'] = int(tokens[1])
	fields['year'] = int(tokens[2])
	fields['cluster'] = int(tokens[3])

	return fields

def timestamp():
	
	return datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S')

main()
