"""

This script, given a beta lda model, will give the distributions over usage patterns for a user year that are identified in the model.

The input model should be a matrix with K columns (tab-separated, use the ones with the _formatted.beta suffix) and V rows, 
where there are K use patterns (this varies depending on the model you use) and V unique words in the vocabulary (20 namepaces in our case).

The input data files should be 22-vectors of (username, year, (20 namespace edit counts))

The output file will be (k+2)-vectors of (username, year, (k use pattern distributions))
Where each output line corresponds to exactly one input line, and there is exactly one input file corresponding to each output file.

Usage:

	python classifyUsers.py [inputdir] [outputdir] [modelfile]

"""

import os
import sys
import multiprocessing
import datetime
import traceback

import math
import scipy
from scipy import special

def main():

	# The number of worker processes the worker pool will use to process the work queue.
	# Recommend setting this to half the number of available cores to avoid hogging resources.
	# That would be 24 cores on platinum.
	# Once you get above a certain level of CPU usage, I/O throughput will bottleneck anyway.
	NUM_WORKERS = 24
	
	# The number of translation tasks given to each process at a time.
	# Recommend this setting to be 1 for maximum throughput.
	# If I/O throughput ever becomes an issue, increasing this value will make non-CPU bound tasks run more efficiently.
	TASK_PARTITION_SIZE = 1
	
	
	inputDir = sys.argv[1]
	outputDir = sys.argv[2]
	modelFilePath = sys.argv[3]

	inputFileList = os.listdir(inputDir)

	filePathArgs = []
	
	for inputFileName in inputFileList:
		
		inputFilePath = inputDir+'/'+inputFileName
		outputFilePath = outputDir+'/'+inputFileName
		
		if os.path.isfile(inputFilePath):
			filePathArgs.append( (inputFilePath, outputFilePath, modelFilePath) )

	workerPool = multiprocessing.Pool(NUM_WORKERS)
	workerPool.map_async(processFile, filePathArgs, TASK_PARTITION_SIZE)
	workerPool.close()
	workerPool.join()
			
def processFile(filePathArgs):
	
	try:
		inputFilePath = filePathArgs[0]
		outputFilePath = filePathArgs[1]
		modelFilePath = filePathArgs[2]

		print timestamp() + " Starting processing for " + inputFilePath + " to " + outputFilePath + " with model " + modelFilePath

		inputFileHandle = open(inputFilePath,"r")
		outputFileHandle = open(outputFilePath, 'a+')
		modelFileHandle = open(modelFilePath, 'r')

		translate(inputFileHandle, outputFileHandle, modelFileHandle)
	
		inputFileHandle.close()
		outputFileHandle.close()
		modelFileHandle.close()

		print timestamp() + " Finished processing for " + inputFilePath + " in " + outputFilePath + " with model " + modelFilePath
	
	except:
		# There is a problem where if a child process encounters an error or exception, the traceback gets written
		# to stderr but not flushed, so you never see it. This code fixes that problem by pushing it to stdout, where it is 
		# piped back to the main process through the pool.
		traceback.print_exc(limit=3, file=sys.stdout)
		sys.stdout.flush()

def translate(inputFileHandle, outputFileHandle, modelFileHandle):

	# Indexed as beta[numclusters][numwords]
	beta = []

	# Populate it from file
	numclusters = None
	index = 0
	for line in modelFileHandle:
		fields = line.strip().split('\t')
	
		# Initialize numclusters and a list of word distributions for each cluster
		if numclusters is None:
			numclusters = len(fields)
			for col in range(numclusters):
				beta.append([])

		# Check to make sure lines have uniform cluster size
		if len(fields) != numclusters:
			print 'Found '+len(fields)+' columns in line '+index+' instead of expected numclusters = '+numclusters+'.'
			sys.exit(1)

		# Set the (line #)th entry for each cluster to be its respective (line #'th) word distribution
		for cluster in range(numclusters):
			beta[cluster].append(float(fields[cluster]))

		index += 1

	numwords = index

	for line in inputFileHandle:
		fields = line.strip().split('\t')
		
		# Break the line into things that identify a user-year and edit counts
		identifiers = fields[0:3]
		words = fields[3:len(fields)]
		
		# Fill a vector with values of the word counts
		wordvector = []
		for word in words:
			wordvector.append(int(word))

		# Calculate the posterior probability for each cluster's topic distribution
		posteriors = []
		for cluster in range(numclusters):
			posteriors.append(calculatePosteriorBeta(wordvector, beta[cluster], numwords))

		output = identifiers
		for posterior in posteriors:
			output.append(str(posterior))
		outputFileHandle.write('\t'.join(output)+'\n')	

def calculatePosteriorBeta(nvector, alphavector, k):
	"""
	Does the same thing as calculatePosteriorGamma, except using the Beta function, hopefully to prevent overflow.
	Note that the name Beta in this method has NOTHING to do with the beta models from LDA, but refers to the statistical function used 
	to compute the dirichlet-multinomial posterior

	This does in fact prevent overflow, so this one should be used instead of the gamma one, which will be left in for completeness.

	See http://en.wikipedia.org/wiki/Dirichlet-multinomial_distribution#For_a_multinomial_distribution_over_category_counts
	for an explanation of how this works.


	The notation here is confusing! It follows that in the linked Wikipedia page. The way we use this is as follows:
		nvector is a word count vector, i.e. an edit count vector. it has dimension 20.
		alphavector is the dimension 20 vector of dirichlet priors from the BETA lda model. each call to this method uses one 20-dimension vector, 
			and there are as many of these 20-vectors as there are clusters in the beta model
		k is the size of alphavector and nvector, which should be the same. this is NOT the k in LDA that denotes the 
			number of clusters.

	"""

	alpha_magnitude = 0
	n_magnitude = 0
	denominator = 1

	assert(len(alphavector) == k)
	assert(len(nvector) == k)

	for index in range(k):
		alpha_magnitude += float(alphavector[index])
		n_magnitude += nvector[index]
		if nvector[index] > 0:
			denominator *= float(nvector[index]*scipy.special.beta(alphavector[index], nvector[index]))

	numerator = float(n_magnitude * scipy.special.beta(alpha_magnitude, n_magnitude))

	return float(numerator / denominator)
	

def calculatePosteriorGamma(nvector, alphavector, k):
	"""
	Calculates the posterior probability of a document vector conditioned on the alpha topic distribution.

	See http://en.wikipedia.org/wiki/Dirichlet-multinomial_distribution#For_a_multinomial_distribution_over_category_counts
	for an explanation of how this works.


	The notation here is confusing! It follows that in the linked Wikipedia page. The way we use this is as follows:
		nvector is a word count vector, i.e. an edit count vector. it has dimension 20.
		alphavector is the dimension 20 vector of dirichlet priors from the BETA lda model. each call to this method uses one 20-dimension vector, 
			and there are as many of these 20-vectors as there are clusters in the beta model
		k is the size of alphavector and nvector, which should be the same. this is NOT the k in LDA that denotes the 
			number of clusters.

	As of right now this overflows because the gamma function gets too large. Don't use it unless you think you're really cool

	"""

	# Initialize all the quantities that require iterating over the vectors
	alpha_magnitude = 0
	n_magnitude = 0
	nfactorial_product = 1
	gamma_product = 1
	for index in range(k):
		alpha_magnitude += float(alphavector[index])
		n_magnitude += nvector[index]	
		nfactorial_product *= math.factorial(nvector[index])
		gamma_product *= (float(math.gamma(nvector[index] + alphavector[index])) / float(math.gamma(alphavector[index])))

	# Compute the three product terms as shown on the Wikipedia page
	term1 = float(math.factorial(n_magnitude)) / float(nfactorial_product)
	term2 = float(math.gamma(alpha_magnitude)) / float(math.gamma(alpha_magnitude + float(n_magnitude)))
	term3 = gamma_product

	return float(term1)*float(term2)*float(term3)
	
def timestamp():
	
	return datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S')

# Protects against the script being loaded and run recursively in the child process
# Did not seem to be a problem, but better safe than sorry
if __name__ == '__main__':
	main()
