"""

This script samples pre-processed lda user edit vectors (NOT in the format expected by the C package).

It picks out some particular number of vectors that are good for LDA data, meaning that are distributed appropriately according to the actual distribution of offset year among the good vectors. Parameters must be set in this file for this.

This script uses more memory than most others, (roughly O(n), but only line numbers, not the whole line) so cut the input files down to a reasonable size. 
Around 1 MB each should be okay. 
Keep in mind, however, that you will probably need a lot more lines in the input file than your file sample size to ensure you get 
a good distribution of year data.

"""

import os
import sys
import multiprocessing
import datetime
import traceback
import random

def main():

	# The number of worker processes the worker pool will use to process the work queue.
	# Recommend setting this to half the number of available cores to avoid hogging resources.
	# That would be 24 cores on platinum.
	# Once you get above a certain level of CPU usage, I/O throughput will bottleneck anyway.
	NUM_WORKERS = 24
	
	# The number of translation tasks given to each process at a time.
	# Recommend this setting to be 1 for maximum throughput.
	# If I/O throughput ever becomes an issue, increasing this value will make non-CPU bound tasks run more efficiently.
	TASK_PARTITION_SIZE = 1
	
	
	inputDir = sys.argv[1]
	outputDir = sys.argv[2]

	inputFileList = os.listdir(inputDir)

	filePathArgs = []
	
	for inputFileName in inputFileList:
		
		inputFilePath = inputDir+'/'+inputFileName
		outputFilePath = outputDir+'/'+inputFileName
		
		if os.path.isfile(inputFilePath):
			filePathArgs.append( (inputFilePath, outputFilePath) )

	workerPool = multiprocessing.Pool(NUM_WORKERS)
	workerPool.map_async(processFile, filePathArgs, TASK_PARTITION_SIZE)
	workerPool.close()
	workerPool.join()
			
def processFile(filePathArgs):
	
	try:
		inputFilePath = filePathArgs[0]
		outputFilePath = filePathArgs[1]

		print timestamp() + " Starting processing for " + inputFilePath + " to " + outputFilePath

		inputFileHandle = open(inputFilePath,"r")
		outputFileHandle = open(outputFilePath, 'a+')

		translate(inputFileHandle, outputFileHandle)
	
		inputFileHandle.close()
		outputFileHandle.close()

		print timestamp() + " Finished processing for " + inputFilePath + " in " + outputFilePath
	
	except:
		# There is a problem where if a child process encounters an error or exception, the traceback gets written
		# to stderr but not flushed, so you never see it. This code fixes that problem by pushing it to stdout, where it is 
		# piped back to the main process through the pool.
		traceback.print_exc(limit=3, file=sys.stdout)
		sys.stdout.flush()

def translate(inputFileHandle, outputFileHandle):

	# Change this value according to how many sample vectors you want from this input file.
	# It will probably not be exact to account for sampling from the year distribution.
	samplesize = 1370

	# Change these values according to the year distribution in your good data.
	distribution = {}
	distribution[2003] = 0.00337771602292
	distribution[2004] = 0.0148051671245
	distribution[2005] = 0.0521870141788
	distribution[2006] = 0.154333637317
	distribution[2007] = 0.174946491023
	distribution[2008] = 0.157718674979
	distribution[2009] = 0.148892032674
	distribution[2010] = 0.136982166928
	distribution[2011] = 0.127495763618
	distribution[2012] = 0.029261336134

	# This figures out about how many samples for each year to include in the aggregate sample, rounding down.
	yearsamplesize = {}
	for year in distribution:
		yearsamplesize[year] = int(samplesize * distribution[year])

	# Init this here to make sure all years get a list
	yearlines = {}
	for key in yearsamplesize:
		yearlines[key] = []

	# Count the number of lines in the file and organize those lines according to what year they are from
	numlines = 0
	for line in inputFileHandle:
		numlines += 1
		year = int(line.strip().split('\t')[2])
		
		yearlines[year].append(numlines)

	# Figure out whether there are enough lines for each year to meet our by-year sample size, and if not, use all of them.
	actualyearsamplesize = {}
	for key in yearsamplesize:
		if yearsamplesize[key] <= len(yearlines[key]):
			actualyearsamplesize[key] = yearsamplesize[key]		# We have enough lines to sample this year
			print "In "+inputFileHandle.name+", found "+str(len(yearlines[key]))+" lines for "+str(key)+", using desired "+str(yearsamplesize[key])+"." 
		else:
			actualyearsamplesize[key] = len(yearlines[key])		# We don't have enough for this year, use all of them.
			print "In "+inputFileHandle.name+", found "+str(len(yearlines[key]))+" lines for "+str(key)+" instead of desired "+str(yearsamplesize[key])+", using all." 
	
	"""
	This turns out to be unnecessary

	# Speed up the reference of line numbers so we don't have to look through every list entry
	lineindex = {}
	for key in yearlines:
		for linenum in yearlines[key]:
			lineindex[linenum] = key
	"""

	# Figure out which lines to sample
	samplelines = []
	for key in actualyearsamplesize:
		for line in random.sample(range(len(yearlines[key])), actualyearsamplesize[key]):
			samplelines.append(yearlines[key][line])

	# Sample these lines.
	print "In "+inputFileHandle.name+" sampling "+str(len(samplelines))+" total lines."
	samplelines = sorted(samplelines)
	inputFileHandle.seek(0, 0)
	index = 0
	for line in inputFileHandle:
		index += 1
		if index in samplelines:
			outputFileHandle.write(line)
	


def timestamp():
	
	return datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S')

# Protects against the script being loaded and run recursively in the child process
# Did not seem to be a problem, but better safe than sorry
if __name__ == '__main__':
	main()
