"""

This script takes in classified user vectors as the output of classifiedVectors.py

And output statistics about

amount of work, broken down by cluster, by date,
proportion of work, broken down by cluster, by date
	

"""

import os
import sys
import datetime

import wp_datetime as dt
import wp_namespace as ns

def main():

        inputDir = sys.argv[1]
        outputDir = sys.argv[2]

	clusters = int(sys.argv[3])

        inputFileList = os.listdir(inputDir)

	work  = {}
	totalwork = {num: 0 for num in range(1, clusters + 1)}

        for inputFileName in inputFileList:

                inputFilePath = inputDir+'/'+inputFileName

                if os.path.isfile(inputFilePath):
                        processFile(inputFilePath, work, totalwork, clusters)


	work_prop = {}
	for date in sorted(work):
		work_prop[date] = {num: 0 for num in range(1, clusters + 1)}
		datetotal = 0

		for cluster in sorted(work[date]):
			datetotal += work[date][cluster]		

		for cluster in sorted(work[date]):

				if datetotal == 0:
					work_prop[date][cluster] = 0
				else:
					work_prop[date][cluster] = float(work[date][cluster]) / float(datetotal)
			

	count_file = open(outputDir + '/' + 'count.stat', 'w')
	prop_file = open(outputDir + '/' + 'proportion.stat', 'w')

	types = range(1, clusters + 1)
	typenames = [str(number) for number in types]
	header = ['Date']
	header.extend(typenames)
        count_file.write('\t'.join(header) + '\n')
	prop_file.write('\t'.join(header) + '\n')
	for date in sorted(work):
		countout = [str(date)]
		propout = [str(date)]
                
		for cluster in sorted(work[date]):
			countout.append(str(work[date][cluster]))
			propout.append(str(work_prop[date][cluster]))
		
		count_file.write('\t'.join(countout) + '\n')
		prop_file.write('\t'.join(propout) + '\n')

        count_file.close()
        prop_file.close()


def processFile(inputFilePath, work, totalwork, clusters):

	print(timestamp() + " Processing " + inputFilePath)

	inputFileHandle = open(inputFilePath, 'r')

	lastline = None
	for line in inputFileHandle:

		fields = parseLine(line)

		date = fields['year']

		if not date in work:
			initWorkTable(work, date, clusters)

		work[date][fields['cluster']] += 1
		totalwork[fields['cluster']] += 1		

	print(timestamp() + " Finished processing " + inputFilePath)

def parseLine(line):

	tokens = line.strip().split('\t')

	fields = {}
	fields['username'] = tokens[0]
	fields['cohort'] = int(tokens[1])
	fields['lifespan'] = int(tokens[2])
	fields['offset'] = int(tokens[3])
	fields['year'] = int(tokens[4])
	fields['cluster'] = int(tokens[5])

	return fields

def initWorkTable(work, date, clusters):
	"""
	Inits a count for a date in the work table. Must be the simple date returned from simpleDate(), not a timestamp.
	"""
	
	work[date] = {}
	work[date] = {num: 0.0 for num in range(1, clusters + 1)}

def simpleDate(timestamp):

	return timestamp.split('T')[0]

def timestamp():

        return datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S')


if __name__ == '__main__':
        main()
