"""

This script takes in files that represent classifications of user-years into different clusters, and aggregates their data into:

	1. Count of users in each cluster by tenure, e.g.
		tenure	c1	c2	c3
		0	100	50	50
		1	90	20	30
		...
		9	160	20	30

	2. Proportion of users in each cluster by tenure, e.g.
		tenure	p1	p2	p3
		0	0.9	0.05	0.05
		1	0.8	0.1	0.1
		...
		9	0.7	0.2	0.1

Each block of statistics gets its own output file.

Specify an input directory to read input files from.
Specify an output directory to write output files to.
Specify the number of clusters in the model. This probably isn't necessary but is a sanity check in case no users are classified to a cluster.

There are lots of parameters for which elements to look for in this script that are not passed as arguments. The defaults here should be good for 
our project.

This script is well commented enough that reading through the code should answer any questions about how these statistics are compiled.

"""

import sys
import os
import datetime

def main():

	inputDir = sys.argv[1]
	outputDir = sys.argv[2]
	numclusters = int(sys.argv[3])


	# Make a list of input files and open them.

	inputFileList = os.listdir(inputDir)
	inputFileHandles = []
	
	for inputFileName in inputFileList:
		
		inputFilePath = inputDir+'/'+inputFileName
		
		if os.path.isfile(inputFilePath):
			inputFileHandles.append(open(inputFilePath, 'r'))


	# Initialize count and proportion by tenure

	tenure = {}
	proportion_tenure = {}

	start_tenure = 0		# The first tenure we look at in the data, inclusive.
	end_tenure = 9			# The last tenure we look at in the data, inclusive. The largest tenure in our data in 2012 is 9

	for tenure_index in range(start_tenure, end_tenure + 1):
	
		tenure[tenure_index] = {}
		proportion_tenure[tenure_index] = {}

		for cluster_index in range (1, numclusters + 1):

			tenure[tenure_index][cluster_index] = 0
			proportion_tenure[tenure_index][cluster_index] = None


	# Compile counts

	print timestamp() + " Processing input files in " + inputDir

	total_entries = 0		# The total number of entries we look at

	for inputFileHandle in inputFileHandles:

		print timestamp() + " Processing input file " + inputFileHandle.name

		# This keeps track of the data for a single user so we can compute tenure and cohort for entries
		# As a result those counters are a line behind the ones that don't use this field
		# They also need a call after the loop body to fill in the data for the last user, since it breaks on 
		# only changes in username.
		userdata = []

		for line in inputFileHandle:

			fields = parseLine(line)

			# Update count by tenure

			if userdata and fields['user'] != userdata[0]['user']:
				updateTenure(tenure, userdata)


			# Reset user data if new user and update with this line if not
			
			if userdata and fields['user'] != userdata[0]['user']:
				userdata = [fields]
			else:
				userdata.append(fields)

			
			# Increment the total entry counter

			total_entries += 1


		# Update information for the last user in the list

		updateTenure(tenure, userdata)


		print timestamp() + " Finished processing input file " + inputFileHandle.name


	print timestamp() + " Finished processing input files in " + inputDir


	# Compute tenure proportion

	tenure_total = 0	# Sanity check to make sure we counted all entries

	for tenure_index in tenure:

		single_tenure_total = 0		# The total for one tenure group that we compute the proportion with

		# Figure out the total users in this tenure group

		for cluster_index in tenure[tenure_index]:

			tenure_total += tenure[tenure_index][cluster_index]
			single_tenure_total += tenure[tenure_index][cluster_index]

		# Compute the proportion based on total users in this tenure group

		for cluster_index in tenure[tenure_index]:

			if single_tenure_total != 0:
				proportion_tenure[tenure_index][cluster_index] = float(tenure[tenure_index][cluster_index]) / float(single_tenure_total)
			else:
				proportion_tenure[tenure_index][cluster_index] = 0


	print "total "+ str(total_entries)
	print "tenure total "+str(tenure_total)

	assert(tenure_total == total_entries), "Tenure count is missing " + str(total_entries - tenure_total) + " entries"
	
	

	print timestamp() + " Finished compiling proportions"
	

	# Write output files

	print timestamp() + " Writing output files"

	# Write tenure count statistics file
	
	tenure_filename = 'tenure.stat'						# Filename to write tenure count statistics to
	tenure_filepath = outputDir + '/' + tenure_filename

	print timestamp() + " Writing tenure count statistics file to " + tenure_filepath

	tenure_filehandle = open(tenure_filepath, 'w')
	
	for tenure_index in sorted(tenure):

		output = [str(tenure_index)]

		for cluster_index in sorted(tenure[tenure_index]):

			output.append(str(tenure[tenure_index][cluster_index]))

		tenure_filehandle.write('\t'.join(output) + '\n')

	tenure_filehandle.close()

	print timestamp() + " Finished writing tenure count statistics file to " + tenure_filepath

	
	# Write tenure proportion statistics file

	proportion_tenure_filename = 'tenure_proportion.stat'			# Filename to write tenure proportion statistics to
	proportion_tenure_filepath = outputDir + '/' + proportion_tenure_filename

	print timestamp() + " Writing tenure proportion statistics file to " + proportion_tenure_filepath

	proportion_tenure_filehandle = open(proportion_tenure_filepath, 'w')

	for tenure_index in sorted(proportion_tenure):

		output = [str(tenure_index)]

		for cluster_index in sorted(proportion_tenure[tenure_index]):

			output.append(str(proportion_tenure[tenure_index][cluster_index]))

		proportion_tenure_filehandle.write('\t'.join(output) + '\n')

	proportion_tenure_filehandle.close()

	print timestamp() + " Finished writing tenure proportion statistics file to " + proportion_tenure_filepath


def updateTenure(tenure, userdata):
	startyear = userdata[0]['year']
	endyear = userdata[len(userdata)-1]['year']
	usertenure = endyear - startyear

	for entry in userdata:
		tenure[usertenure][entry['cluster']] += 1

def parseLine(line):
	
	tokens = line.strip().split('\t')

	fields = {}
	fields['user'] = tokens[0]
	fields['offset'] = int(tokens[1])
	fields['year'] = int(tokens[2])
	fields['cluster'] = int(tokens[3])

	return fields

def timestamp():
	
	return datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S')

main()
