# Given a k-means set of centroids, cluster rich user-year vectors into the model clusters by closes centroidw

import os
import sys
import multiprocessing
import datetime
import traceback

import random

def main():

	# The number of worker processes the worker pool will use to process the work queue.
	# Recommend setting this to half the number of available cores to avoid hogging resources.
	# That would be 24 cores on platinum.
	# Once you get above a certain level of CPU usage, I/O throughput will bottleneck anyway.
	NUM_WORKERS = 24
	
	# The number of translation tasks given to each process at a time.
	# Recommend this setting to be 1 for maximum throughput.
	# If I/O throughput ever becomes an issue, increasing this value will make non-CPU bound tasks run more efficiently.
	TASK_PARTITION_SIZE = 1
	
	
	inputDir = sys.argv[1]
	outputDir = sys.argv[2]
	modelFilePath = sys.argv[3]

	inputFileList = os.listdir(inputDir)

	filePathArgs = []
	
	for inputFileName in inputFileList:
		
		inputFilePath = inputDir+'/'+inputFileName
		outputFilePath = outputDir+'/'+inputFileName
		
		if os.path.isfile(inputFilePath):
			filePathArgs.append( (inputFilePath, outputFilePath, modelFilePath) )

	workerPool = multiprocessing.Pool(NUM_WORKERS)
	workerPool.map_async(processFile, filePathArgs, TASK_PARTITION_SIZE)
	workerPool.close()
	workerPool.join()
			
def processFile(filePathArgs):
	
	try:
		inputFilePath = filePathArgs[0]
		outputFilePath = filePathArgs[1]
		modelFilePath = filePathArgs[2]

		print timestamp() + " Starting processing for " + inputFilePath + " to " + outputFilePath + " with model " + modelFilePath

		inputFileHandle = open(inputFilePath,"r")
		outputFileHandle = open(outputFilePath, 'a+')
		modelFileHandle = open(modelFilePath, 'r')

		translate(inputFileHandle, outputFileHandle, modelFileHandle)
	
		inputFileHandle.close()
		outputFileHandle.close()
		modelFileHandle.close()

		print timestamp() + " Finished processing for " + inputFilePath + " in " + outputFilePath + " with model " + modelFilePath
	
	except:
		# There is a problem where if a child process encounters an error or exception, the traceback gets written
		# to stderr but not flushed, so you never see it. This code fixes that problem by pushing it to stdout, where it is 
		# piped back to the main process through the pool.
		traceback.print_exc(limit=3, file=sys.stdout)
		sys.stdout.flush()

def translate(inputFileHandle, outputFileHandle, modelFileHandle):

	centers = []
	clusters = None
	dimensions = None

	i = 0
	for line in modelFileHandle:
		fields = line.strip().split('\t')

		if not dimensions:
			dimensions = len(fields)
		
		assert len(fields) == dimensions, "Model cluster centers do not have same dimensions"

		center = []
		for field in fields:
			center.append(float(field))
		centers.append(center)

		i += 1

	clusters = i

	vector_offset = 5
	for line in inputFileHandle:
		fields = line.strip().split('\t')
		
		vector = []
		for dim in fields[vector_offset:]:
			vector.append(float(dim))

		distances = []
		for center in centers:
			distances.append(distance_squared(center, vector))

		min_distance = min(distances)
		min_clusters = []
		for i in range(0, len(distances)):
			if distances[i] == min_distance:
				min_clusters.append(i + 1)	# i + 1 because we index clusters from 1
		if len(min_clusters) == 1:
			assignment = min_clusters[0]
		else:
			assignment = random.choice(min_clusters)

		fields.insert(vector_offset, str(assignment))

		outputFileHandle.write('\t'.join(fields) + '\n')

def distance_squared(p1, p2):

	assert len(p1) == len(p2), "Points have different dimension"

	d = []
	for i in range(0, len(p1)):
		d.append(p1[i] - p2[i])

	total = 0
	for i in d:
		total += i ** 2

	return total

def timestamp():
	
	return datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S')

# Protects against the script being loaded and run recursively in the child process
# Did not seem to be a problem, but better safe than sorry
if __name__ == '__main__':
	main()
