import math
import sys
import io
import re
import shlex
import Parser
import Post
import StatusTotals
import WordProcessor
import WordBag
import FileIO
import OversamplingWeights
import Probability
import Evaluator
import ProbabilityUpdater

class CurrentOversamplingWeights:
	OPEN = 1
	OFF_TOPIC = float(4.00091272105)
	NOT_CONSTRUCTIVE = float(4.47895778785)
	NOT_A_REAL_QUESTION = float(2.277956413)
	TOO_LOCALIZED = float(11.3894121468)

#Configurable Control variables
class Controls:
	#Indicates whether or not the test file has labels (open status)
	TEST_FILE_CONTAINS_LABELS = False
	#How often to drop the least occuring words
	DROP_WORDS_PERIOD = 5000
	#How often to score the model
	SCORE_MODEL_PERIOD = 5000 
	#Drop words occuring equal/less than this number
	DROP_WORDS_THRESHOLD = 1
	#The number of top words to consider when predicting a post via the predictPostUsingTopAndBottomWords method
	NUM_TOP_WORDS_IN_PREDICTION = 1
	#The number of bottom words to consider when predicting a post via the predictPostUsingTopAndBottomWords method
	NUM_BOTTOM_WORDS_IN_PREDICTION = 2
	#The number of top relevant words to consider when predicting a post via the predictPostUsingRelevantWords method
	NUM_RELEVANT_WORDS_IN_PREDICTION = 5
	#The minimum probability a word can be associated with a class
	EPSILON = .001
	#The minimum times a word must appear to be included in the probability calculation
	WORD_OCCURENCE_THRESHOLD = 6
	#Indicates whether or not the probabilities should be written to a file
	WRITE_RESULTS_TO_FILE = True

#Caps probabilitys so that they are never 1 or 0
def capProbability(probability):
	if(probability>1-Controls.EPSILON):
		return (1-Controls.EPSILON)
	if(probability<Controls.EPSILON):
			return Controls.EPSILON
	return probability
	
#Given a post and a wordbag: gives a numeric prediction that the post belongs to a given status.
#Uses the top and bottom words method which only considers the "top" and "bottom" words of a post in the prediction
def predictPostUsingTopAndBottomWords(post, wordBag, statusToPredict):
	logTotal = float(0)
	
	#Since there are five statuses, each status has a 20% overall probability
	probabilityOfStatusOverall = float(.2)
	probabilityOfNotStatusOverall = float(.8)
	
	weights = OversamplingWeights.Weights(CurrentOversamplingWeights.OPEN, CurrentOversamplingWeights.OFF_TOPIC, CurrentOversamplingWeights.NOT_CONSTRUCTIVE, CurrentOversamplingWeights.NOT_A_REAL_QUESTION, CurrentOversamplingWeights.TOO_LOCALIZED)
	words = WordProcessor.processText(post.bodyMarkdown)
	
	wordProbabilities = {}
	
	for word in words:
		if(wordBag.contains(word) and wordBag.get(word).totalCount()>Controls.WORD_OCCURENCE_THRESHOLD):
			probabilityOfStatusByWord = wordBag.get(word).getProbabilityOfStatusWithWeights(statusToPredict, weights)
			probabilityOfNotStatusByWord = float(1)  - probabilityOfStatusByWord
			probability = (probabilityOfStatusByWord * probabilityOfStatusOverall)/((probabilityOfStatusByWord * probabilityOfStatusOverall) + (probabilityOfNotStatusByWord * probabilityOfNotStatusOverall))
			wordProbabilities[word] = capProbability(probability)
			
	
	#Add probabilities of the top words
	count = 0
	for word in sorted(wordProbabilities, key=wordProbabilities.get, reverse=True):
		if(count>=Controls.NUM_TOP_WORDS_IN_PREDICTION):
			break
		probability = wordProbabilities[word]
		logTotal += math.log(1-probability) - math.log(probability)
		count +=1

	#Add probabilities of the bottom words		
	count = 0
	for word in sorted(wordProbabilities, key=wordProbabilities.get, reverse=False):
		if(count>=Controls.NUM_BOTTOM_WORDS_IN_PREDICTION):
			break	
		probability = wordProbabilities[word]
		logTotal += math.log(1-probability) - math.log(probability)
		count +=1
			
	return float(1)/(float(1)+math.exp(logTotal))

	
#Given a post and a wordbag: gives a numeric prediction that the post belongs to a given status.
#Uses all words from the post in the prediction
def predictPostUsingAllWords(post, wordBag, statusToPredict):
	logTotal = float(0)
	
	#Since there are five statuses, each status has a 20% overall probability
	probabilityOfStatusOverall = float(.2)
	probabilityOfNotStatusOverall = float(.8)
	
	weights = OversamplingWeights.Weights(CurrentOversamplingWeights.OPEN, CurrentOversamplingWeights.OFF_TOPIC, CurrentOversamplingWeights.NOT_CONSTRUCTIVE, CurrentOversamplingWeights.NOT_A_REAL_QUESTION, CurrentOversamplingWeights.TOO_LOCALIZED)
	words = WordProcessor.processText(post.bodyMarkdown)
	for word in words:
		if(wordBag.contains(word) and wordBag.get(word).totalCount()>Controls.WORD_OCCURENCE_THRESHOLD):
			probabilityOfStatusByWord = wordBag.get(word).getProbabilityOfStatusWithWeights(statusToPredict, weights)
			probabilityOfNotStatusByWord = float(1)  - probabilityOfStatusByWord
			probability = (probabilityOfStatusByWord * probabilityOfStatusOverall)/((probabilityOfStatusByWord * probabilityOfStatusOverall) + (probabilityOfNotStatusByWord * probabilityOfNotStatusOverall))
			probability = capProbability(probability)
			logTotal += math.log(1-probability) - math.log(probability)
	try:
		eToTheLog = math.exp(logTotal)
		return float(1)/(float(1)+eToTheLog)
	except:
		#Overflow occured for eToTheLog (use the max float instead)
		return float(1)/(sys.float_info.max)
	
#Given a post and a wordbag: gives a numeric prediction that the post belongs to a given status.
#Uses only the top most "relevant" words when predicting a post
def predictPostUsingRelevantWords(post, wordBag, statusToPredict):
	logTotal = float(0)
	
	#Since there are five statuses, each status has a 20% overall probability
	probabilityOfStatusOverall = float(.2)
	probabilityOfNotStatusOverall = float(.8)
	
	weights = OversamplingWeights.Weights(CurrentOversamplingWeights.OPEN, CurrentOversamplingWeights.OFF_TOPIC, CurrentOversamplingWeights.NOT_CONSTRUCTIVE, CurrentOversamplingWeights.NOT_A_REAL_QUESTION, CurrentOversamplingWeights.TOO_LOCALIZED)
	words = WordProcessor.processText(post.bodyMarkdown)
	
	wordProbabilities = {}
	wordRelevances = {}
	
	#Assign relevance
	for word in words:
		if(wordBag.contains(word)):
			probabilityOfStatusByWord = wordBag.get(word).getProbabilityOfStatusWithWeights(statusToPredict, weights)
			probabilityOfNotStatusByWord = float(1)  - probabilityOfStatusByWord
			probability = (probabilityOfStatusByWord * probabilityOfStatusOverall)/((probabilityOfStatusByWord * probabilityOfStatusOverall) + (probabilityOfNotStatusByWord * probabilityOfNotStatusOverall))
			probability = capProbability(probability)
			wordProbabilities[word] = probability
			wordRelevances[word] = math.fabs(probability -.5)
	
	#Combine the probabilities of the top most relevant words
	count = 0
	for word in sorted(wordRelevances, key=wordRelevances.get, reverse=True):
		if(count>=Controls.NUM_RELEVANT_WORDS_IN_PREDICTION):
			break
		print word + "  " +str(wordRelevances[word]) + "  " + str(wordProbabilities[word])
		probability = wordProbabilities[word]
		logTotal += math.log(1-probability) - math.log(probability)
		count +=1
		
	return float(1)/(float(1)+math.exp(logTotal))

def calculateProbabilities(wordBag):
	testingFileStream = FileIO.openTestingFile(FileIO.READ_MODE)
	Parser.simpleReadHeader(testingFileStream)
	
	probabilities = []
	
	while (not Parser.isEndOfFile(testingFileStream)):
		if(Controls.TEST_FILE_CONTAINS_LABELS):
			testPost = Parser.readPost(testingFileStream)
		else:
			testPost = Parser.readLeaderboardPost(testingFileStream)
		probability = calculateProbability(wordBag, testPost)
		probabilities.append(probability)
	return probabilities
	
def calculateProbability(wordBag, post):
	postID = post.postID
	actualStatus =  post.openStatus
	
	openPrediction = predictPostUsingTopAndBottomWords(post, wordBag, Post.PostStatusEnum.OPEN)
	offTopicPrediction = predictPostUsingTopAndBottomWords(post, wordBag, Post.PostStatusEnum.OFF_TOPIC)
	notConstructivePrediction = predictPostUsingTopAndBottomWords(post, wordBag, Post.PostStatusEnum.NOT_CONSTRUCTIVE)
	notARealQuestionPrediction = predictPostUsingTopAndBottomWords(post, wordBag, Post.PostStatusEnum.NOT_A_REAL_QUESTION)
	tooLocalizedPrediction = predictPostUsingTopAndBottomWords(post, wordBag, Post.PostStatusEnum.TOO_LOCALIZED)
	totalPrediction = openPrediction + offTopicPrediction + notConstructivePrediction + notARealQuestionPrediction + tooLocalizedPrediction
	
	normalizedOpenPrediction = openPrediction / totalPrediction
	normalizedOffTopicPrediction = offTopicPrediction / totalPrediction
	normalizedNotConstructivePrediction = notConstructivePrediction / totalPrediction
	normalizedNotARealQuestionPrediction = notARealQuestionPrediction / totalPrediction
	normalizedTooLocalizedPrediction = tooLocalizedPrediction / totalPrediction
	
	probability = Probability.Probability(postID,actualStatus,normalizedOpenPrediction,normalizedOffTopicPrediction,normalizedNotConstructivePrediction,normalizedNotARealQuestionPrediction,normalizedTooLocalizedPrediction)
	return probability

def runCustomWordOccurenceThreshold(wordOccurenceThreshold):
	Controls.WORD_OCCURENCE_THRESHOLD = wordOccurenceThreshold
	run()
	
def runCustomTopBottomWords(topWords,bottomWords):
	Controls.NUM_TOP_WORDS_IN_PREDICTION = topWords
	Controls.NUM_BOTTOM_WORDS_IN_PREDICTION = bottomWords
	run()

def train():
	trainingFileStream = FileIO.openTrainingFile(FileIO.READ_MODE)
	Parser.readHeader(trainingFileStream)
	wordBag = WordBag.WordBag()
	totals = StatusTotals.StatusTotals()
	
	while (not Parser.isEndOfFile(trainingFileStream)):
		newPost = Parser.readPost(trainingFileStream)
		WordProcessor.fillWordBag(newPost, wordBag)
		postStatus =  newPost.openStatus
		totals.increment(postStatus)
		
		#Drop the least occuring words to keep memory usage low
		if(totals.getTotal()%Controls.DROP_WORDS_PERIOD==0):
			wordBag.dropLeastOccuringWords(Controls.DROP_WORDS_THRESHOLD)
	return wordBag

def run():
	print "Training..."
	wordBag = train()
	
	print "Calculating probabilties..."
	probabilities = calculateProbabilities(wordBag)
	
	print "Before updating from priors " + str(Evaluator.calculateLogLoss(probabilities))
	
	ProbabilityUpdater.updateProbabilities(probabilities,Controls.EPSILON)
	
	print "After updating from priors  " + str(Evaluator.calculateLogLoss(probabilities))
	
	ProbabilityUpdater.setToPriorBenchmark(probabilities)
	print "Prior Benchmark  " + str(Evaluator.calculateLogLoss(probabilities))
	
	ProbabilityUpdater.setToUniformBenchmark(probabilities)
	print "Uniform Benchmark  " + str(Evaluator.calculateLogLoss(probabilities))
	
	if(Controls.WRITE_RESULTS_TO_FILE):
		print "Writing results to file..."
		writeResultsToFile(probabilities)
	
	print "Complete"

def writeResultsToFile(probabilities):
	resultsFileStream = FileIO.openResultsFile(FileIO.WRITE_MODE)
	for probability in probabilities:
		resultsFileStream.write(probability.toLine())
	resultsFileStream.close()
	
if __name__=="__main__":
    run()