"""

This script takes in files that represent classifications of user-years into different clusters, and aggregates their data into:

	1. Count of users in each cluster by offset year, e.g.
		offset	c1	c2	c3
		0	131	21	30
		1	100	10	3
		2	80	7	2
		...
		9	20	20	20
	
	2. Proportion of users in each cluster to all users by offset year, e.g.
		offset	p1	p2	p3
		0	0.9	0.07	0.03
		1	0.8	0.1	0.1
		2	0.7	0.2	0.1
		...
		9	0.6	0.2	0.2

Each block of statistics gets its own output file.

Specify an input directory to read input files from.
Specify an output directory to write output files to.
Specify the number of clusters in the model. This probably isn't necessary but is a sanity check in case no users are classified to a cluster.

There are lots of parameters for which elements to look for in this script that are not passed as arguments. The defaults here should be good for 
our project.

This script is well commented enough that reading through the code should answer any questions about how these statistics are compiled.

"""

import sys
import os
import datetime

def main():

	inputDir = sys.argv[1]
	outputDir = sys.argv[2]
	numclusters = int(sys.argv[3])


	# Make a list of input files and open them.

	inputFileList = os.listdir(inputDir)
	inputFileHandles = []
	
	for inputFileName in inputFileList:
		
		inputFilePath = inputDir+'/'+inputFileName
		
		if os.path.isfile(inputFilePath):
			inputFileHandles.append(open(inputFilePath, 'r'))


	# Initialize count and proportion by offset

	offset = {}			# Address offset[offset][cluster]
	proportion_offset = {}		# Address proportion_offset[offset][cluster]

	start_offset = 0		# The first offset year we look at, inclusive.
	end_offset = 9			# The last offset year we look at, inclusive. The largest offset in our data in 2012 is 9.

	for offset_index in range(start_offset, end_offset + 1):
		
		offset[offset_index] = {}
		proportion_offset[offset_index] = {}
		
		for cluster_index in range(1, numclusters + 1):
			
			offset[offset_index][cluster_index] = 0
			proportion_offset[offset_index][cluster_index] = None


	# Compile counts

	print timestamp() + " Processing input files in " + inputDir

	total_entries = 0		# The total number of entries we look at

	for inputFileHandle in inputFileHandles:

		print timestamp() + " Processing input file " + inputFileHandle.name

		for line in inputFileHandle:

			fields = parseLine(line)
				
			# Update count by offset

			offset[fields['offset']][fields['cluster']] += 1

			
			# Increment the total entry counter

			total_entries += 1

		print timestamp() + " Finished processing input file " + inputFileHandle.name


	print timestamp() + " Finished processing input files in " + inputDir


	# Compute offset proportion

	print timestamp() + " Compiling offset proportion"

	offset_total = 0		# Sanity check to make sure we counted all entries	

	for offset_index in offset:
		
		single_offset_total = 0		# The total for one offset year that we compute the proportion with
		
		# Figure out the total users in this offset year

		for cluster_index in offset[offset_index]:
			
			offset_total += offset[offset_index][cluster_index]
			single_offset_total += offset[offset_index][cluster_index]

		# Compute the proportion based on the total users in this offset year

		for cluster_index in offset[offset_index]:

			if single_offset_total != 0:
				proportion_offset[offset_index][cluster_index] = float(offset[offset_index][cluster_index]) / float(single_offset_total)
			else:
				proportion_offset[offset_index][cluster_index] = 0
			
	assert(offset_total == total_entries), "Offset count is missing " + str(total_entries - offset_total) + " entries"


	# Write offset count statistics file
	
	offset_filename = 'offset.stat'						# Filename to write offset count statistics to
	offset_filepath = outputDir + '/' + offset_filename

	print timestamp() + " Writing offset count statistics file to " + offset_filepath

	offset_filehandle = open(offset_filepath, 'w')
	
	for offset_index in sorted(offset):

		output = [str(offset_index)]

		for cluster_index in sorted(offset[offset_index]):

			output.append(str(offset[offset_index][cluster_index]))

		offset_filehandle.write('\t'.join(output) + '\n')

	offset_filehandle.close()

	print timestamp() + " Finished writing offset count statistics file to " + offset_filepath

	
	# Write offset proportion statistics file

	proportion_offset_filename = 'offset_proportion.stat'			# Filename to write offset proportion statistics to
	proportion_offset_filepath = outputDir + '/' + proportion_offset_filename

	print timestamp() + " Writing offset proportion statistics file to " + proportion_offset_filepath

	proportion_offset_filehandle = open(proportion_offset_filepath, 'w')

	for offset_index in sorted(proportion_offset):

		output = [str(offset_index)]

		for cluster_index in sorted(proportion_offset[offset_index]):

			output.append(str(proportion_offset[offset_index][cluster_index]))

		proportion_offset_filehandle.write('\t'.join(output) + '\n')

	proportion_offset_filehandle.close()

	print timestamp() + " Finished writing offset proportion statistics file to " + proportion_offset_filepath


def parseLine(line):
	
	tokens = line.strip().split('\t')

	fields = {}
	fields['user'] = tokens[0]
	fields['offset'] = int(tokens[1])
	fields['year'] = int(tokens[2])
	fields['cluster'] = int(tokens[3])

	return fields


def timestamp():
	
	return datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S')

main()
