"""

This script takes in an input file of user-year entries with distributions of identification across cluster groups (topics) from an LDA model
I.e. lines of (username, year, (k distributions)) for a k-cluster model

Creates an output file where each user is classified into exactly one cluster (creating hard clusters from soft clusters) based on which 
cluster's dirichlet-multinomial posterior for that user-year's word vector is highest. 
I.e. lines of (username, year, cluster index) where the clusters are indexed from 1. 

If the posteriors for two clusters are equal, one is chosen at random.

"""

import os
import sys
import multiprocessing
import datetime
import traceback

import random

def main():

	# The number of worker processes the worker pool will use to process the work queue.
	# Recommend setting this to half the number of available cores to avoid hogging resources.
	# That would be 24 cores on platinum.
	# Once you get above a certain level of CPU usage, I/O throughput will bottleneck anyway.
	NUM_WORKERS = 24
	
	# The number of translation tasks given to each process at a time.
	# Recommend this setting to be 1 for maximum throughput.
	# If I/O throughput ever becomes an issue, increasing this value will make non-CPU bound tasks run more efficiently.
	TASK_PARTITION_SIZE = 1
	
	
	inputDir = sys.argv[1]
	outputDir = sys.argv[2]

	inputFileList = os.listdir(inputDir)

	filePathArgs = []
	
	for inputFileName in inputFileList:
		
		inputFilePath = inputDir+'/'+inputFileName
		outputFilePath = outputDir+'/'+inputFileName
		
		if os.path.isfile(inputFilePath):
			filePathArgs.append( (inputFilePath, outputFilePath) )

	workerPool = multiprocessing.Pool(NUM_WORKERS)
	workerPool.map_async(processFile, filePathArgs, TASK_PARTITION_SIZE)
	workerPool.close()
	workerPool.join()
			
def processFile(filePathArgs):
	
	try:
		inputFilePath = filePathArgs[0]
		outputFilePath = filePathArgs[1]

		print timestamp() + " Starting processing for " + inputFilePath + " to " + outputFilePath

		inputFileHandle = open(inputFilePath,"r")
		outputFileHandle = open(outputFilePath, 'a+')

		translate(inputFileHandle, outputFileHandle)
	
		inputFileHandle.close()
		outputFileHandle.close()

		print timestamp() + " Finished processing for " + inputFilePath + " in " + outputFilePath
	
	except:
		# There is a problem where if a child process encounters an error or exception, the traceback gets written
		# to stderr but not flushed, so you never see it. This code fixes that problem by pushing it to stdout, where it is 
		# piped back to the main process through the pool.
		traceback.print_exc(limit=3, file=sys.stdout)
		sys.stdout.flush()

def translate(inputFileHandle, outputFileHandle):

	for line in inputFileHandle:
		fields = line.strip().split('\t')

		identifiers = fields[0:3]
		distribution = []
		for field in fields[3:]:
			distribution.append(float(field))

		# There is no clever way to do exactly this with python lists that I can tell
		maxPosterior = max(distribution)
		maxClusters = []
		for index in range(len(distribution)):
			if distribution[index] == maxPosterior:
				maxClusters.append(index + 1) 		# + 1 because we index the clusters from 1, but arrays in python from 0
		
		if len(maxClusters) == 1:
			assignment =  maxClusters[0]
		else:
			assignment =  random.choice(maxClusters)

		output = identifiers
		output.append(str(assignment))
	
		outputFileHandle.write('\t'.join(output)+'\n')
					
def timestamp():
	
	return datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S')

# Protects against the script being loaded and run recursively in the child process
# Did not seem to be a problem, but better safe than sorry
if __name__ == '__main__':
	main()
