# File contains tools for "Not Constructive" post text quantification

import Post, re, sys
from Resources.config import NOT_CONSTRUCTIVE_QUANTIFIER_SPLIT

sys.setrecursionlimit(5000)

sentence_delimiters = { ".", ";", "...", "!!!", "?!", "!?", "!", "?" }
punctuation = { ".,", ".", ",", ":", ";", "-", "...", "!!!", "?!", "!?", "!", "?" }

def countSentences(text):
	
	if ( len(text) <= 0):
		return 0

	for delimiter in sentence_delimiters:
		i = text.find(delimiter)
		if ( i != -1 ):
			return 1 + countSentences( text[i + len(delimiter) :])

	return 1

def countPunctuation(text):
	
	if ( len(text) <= 0):
		return 0

	for sign in punctuation:
		i = text.find(sign)
		if ( i != -1 ):
			return 1 + countPunctuation( text[i + len(sign) :])

	return 1

def URL_inText(text):
	
	if ( len( re.findall('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', text) ) > 0 ):
		return True
	
	return False

def countSmiles(text):
	return len( re.findall('((?::|;|=)(?:-)?(?:\)|D|P))', text))

def countCapitals(text):
	capitals = 0
	for c in text:
		if c.isupper():
			capitals += 1
	return capitals

def set_split_points(filestream):
	import Parser, Post
	Parser.readHeader(fileStream)

	count = 0

	reputationAll = 0
	avgReputation = 0
	devReputation = 0
	reputation = []

	sentenceCountAll = 0
	centences = []

	charsCountAll = 0
	avgChars = 0
	devChars = 0
	chars = []

	punctuationCountAll = 0
	avgPunctuation = 0
	devPunctuation = 0
	punctuation = []

	smilesCountAll = 0
	avgSmiles = 0
	devSmiles = 0
	smiles = []

	capitalsCountAll = 0
	avgCapitals = 0
	devCapitals = 0
	capitals = []

	while (not Parser.isEndOfFile(fileStream)):

		newPost = Parser.readPost(fileStream)
		bodyMarkdown = newPost.bodyMarkdown

		reputation.append(int(newPost.reputationAtPostCreation))
		reputationAll += int(newPost.reputationAtPostCreation)

		# Sentence count routine
		centences.append(countSentences(bodyMarkdown))
		sentenceCountAll += centences[count]

		# Character counting routine
		chars.append(len(bodyMarkdown))
		charsCountAll += chars[count]	

		# Smiles per centence counting routine
		smiles.append(float(countSmiles(bodyMarkdown)) / centences[count])
		smilesCountAll += smiles[count]	

		# punctuation signs per sentense
		punctuation.append(float(countPunctuation(bodyMarkdown))/centences[count])
		punctuationCountAll += punctuation[count]

		# capitals per sentence
		capitals.append(float(countCapitals(bodyMarkdown))/centences[count])
		capitalsCountAll += capitals[count]

		print "Post:" + str(count) + "; Length:"  + str(chars[count])
		count += 1

	avgReputation = float(reputationAll) / count	
	avgChars = float(charsCountAll) / count
	avgSmiles = float(smilesCountAll) / count
	avgPunctuation = float(punctuationCountAll) / count
	avgCapitals = float(capitalsCountAll) / count

	import math
	for i in range(count):
		devReputation += math.sqrt(math.pow(avgReputation - reputation[i], 2))
		devChars += math.sqrt(math.pow(avgChars - chars[i], 2))
		devSmiles += math.sqrt(math.pow(avgSmiles - smiles[i], 2))
		devPunctuation += math.sqrt(math.pow(avgPunctuation - punctuation[i], 2))
		devCapitals += math.sqrt(math.pow(avgCapitals - capitals[i], 2))
	devReputation /= count
	devChars /= count
	devSmiles /= count
	devPunctuation /= count
	devCapitals /= count

	f_out = open("Resources/NC_limits.py", 'w')
	f_out.write("NC_LIMITS = {\n\t'LENGTH' : {\n")
	f_out.write("\t\t'VERY_SHORT': " + str(avgChars - 2*devChars) + ",\n")
	f_out.write("\t\t'SHORT': " + str(avgChars - 1*devChars) + ",\n")
	f_out.write("\t\t'MEDIUM': " + str(avgChars) + ",\n")
	f_out.write("\t\t'LONG': " + str(avgChars + 1*devChars) + ",\n")
	f_out.write("\t\t'VERY_LONG': " + str(avgChars + 2*devChars) + "},\n\n")

	f_out.write("\t'REPUTATION': {\n")
	f_out.write("\t\t'VERY_LOW': " + str(avgReputation - 2*devReputation) + ",\n")
	f_out.write("\t\t'LOW': " + str(avgReputation - 1*devReputation) + ",\n")
	f_out.write("\t\t'MEDIUM': " + str(avgReputation) + ",\n")
	f_out.write("\t\t'HIGH': " + str(avgReputation + 1*devReputation) + ",\n")
	f_out.write("\t\t'VERY_HIGH': " + str(avgReputation + 2*devReputation) + "},\n\n")

	f_out.write("\t'CAPITAL_LETTERS': {\n")
	f_out.write("\t\t'TOO_FEW': " + str(avgCapitals - 2*devCapitals) + ",\n")
	f_out.write("\t\t'FEW': " + str(avgCapitals - 1*devCapitals) + ",\n")
	f_out.write("\t\t'NORMAL': " + str(avgCapitals) + ",\n")
	f_out.write("\t\t'MANY': " + str(avgCapitals + 1*devCapitals) + ",\n")
	f_out.write("\t\t'TOO_MANY': " + str(avgCapitals + 2*devCapitals) + "},\n\n")

	f_out.write("\t'PUNCTUATION_USE': {\n")
	f_out.write("\t\t'TOO_FEW': " + str(avgPunctuation- 2*devPunctuation) + ",\n")
	f_out.write("\t\t'FEW': " + str(avgPunctuation - 1*devPunctuation) + ",\n")
	f_out.write("\t\t'NORMAL': " + str(avgPunctuation) + ",\n")
	f_out.write("\t\t'MANY': " + str(avgPunctuation + 1*devPunctuation) + ",\n")
	f_out.write("\t\t'TOO_MANY': " + str(avgPunctuation + 2*devPunctuation) + "},\n\n")

	f_out.write("\t'EMOTIONS_USE': {\n")
	f_out.write("\t\t'FEW': " + str(avgSmiles) + ",\n")
	f_out.write("\t\t'SOME': " + str(avgSmiles + 1*devSmiles) + ",\n")
	f_out.write("\t\t'MODERATE': " + str(avgSmiles + 2*devSmiles) + "}}")
	f_out.close() 

def quantify(Post):
	
	from Resources.NC_limits import NC_LIMITS
	text = Post.bodyMarkdown

	result = 0

	if URL_inText(text):
		result += NOT_CONSTRUCTIVE_QUANTIFIER_SPLIT['EXTERNAL_LINK_PRESENT']

	reputation = Post.reputationAtPostCreation
	char_len = len(text)
	sentence_num = countSentences(text)
	punctuation_per_sentence = float(countPunctuation(text)) / sentence_num
	smiles_per_sentence = float(countSmiles(text)) / sentence_num
	capitals_per_sentence = float(countCapitals(text)) / sentence_num

	# Reputation quantification
	if (char_len < NC_LIMITS['REPUTATION']['VERY_LOW']):
		result += NOT_CONSTRUCTIVE_QUANTIFIER_SPLIT['REPUTATION']['VERY_LOW']
	elif (char_len < NC_LIMITS['REPUTATION']['LOW']):
		result += NOT_CONSTRUCTIVE_QUANTIFIER_SPLIT['REPUTATION']['LOW']
	elif (char_len > NC_LIMITS['REPUTATION']['VERY_HIGH']):
		result += NOT_CONSTRUCTIVE_QUANTIFIER_SPLIT['REPUTATION']['VERY_HIGH']
	elif (char_len > NC_LIMITS['REPUTATION']['HIGH']):
		result += NOT_CONSTRUCTIVE_QUANTIFIER_SPLIT['REPUTATION']['HIGH']
	else:
		result += NOT_CONSTRUCTIVE_QUANTIFIER_SPLIT['REPUTATION']['MEDIUM'] 

	# Length quantification
	if (char_len < NC_LIMITS['LENGTH']['VERY_SHORT']):
		result += NOT_CONSTRUCTIVE_QUANTIFIER_SPLIT['LENGTH']['VERY_SHORT']
	elif (char_len < NC_LIMITS['LENGTH']['SHORT']):
		result += NOT_CONSTRUCTIVE_QUANTIFIER_SPLIT['LENGTH']['SHORT']
	elif (char_len > NC_LIMITS['LENGTH']['VERY_LONG']):
		result += NOT_CONSTRUCTIVE_QUANTIFIER_SPLIT['LENGTH']['VERY_LONG']
	elif (char_len > NC_LIMITS['LENGTH']['LONG']):
		result += NOT_CONSTRUCTIVE_QUANTIFIER_SPLIT['LENGTH']['LONG']
	else:
		result += NOT_CONSTRUCTIVE_QUANTIFIER_SPLIT['LENGTH']['MEDIUM'] 

	# Punctuation use quantification
	if (punctuation_per_sentence < NC_LIMITS['PUNCTUATION_USE']['TOO_FEW']):
		result += NOT_CONSTRUCTIVE_QUANTIFIER_SPLIT['PUNCTUATION_USE']['TOO_FEW']
	elif (punctuation_per_sentence < NC_LIMITS['PUNCTUATION_USE']['FEW']):
		result += NOT_CONSTRUCTIVE_QUANTIFIER_SPLIT['PUNCTUATION_USE']['FEW']
	elif (punctuation_per_sentence > NC_LIMITS['PUNCTUATION_USE']['TOO_MANY']):
		result += NOT_CONSTRUCTIVE_QUANTIFIER_SPLIT['PUNCTUATION_USE']['TOO_MANY']
	elif (punctuation_per_sentence > NC_LIMITS['PUNCTUATION_USE']['MANY']):
		result += NOT_CONSTRUCTIVE_QUANTIFIER_SPLIT['PUNCTUATION_USE']['MANY']
	else:
		result += NOT_CONSTRUCTIVE_QUANTIFIER_SPLIT['PUNCTUATION_USE']['NORMAL']

	#Smiles use quantification
	if (smiles_per_sentence < NC_LIMITS['EMOTIONS_USE']['FEW']):
		result += NOT_CONSTRUCTIVE_QUANTIFIER_SPLIT['EMOTIONS_USE']['FEW']
	elif (smiles_per_sentence > NC_LIMITS['EMOTIONS_USE']['MODERATE']):
		result += NOT_CONSTRUCTIVE_QUANTIFIER_SPLIT['EMOTIONS_USE']['MODERATE']
	else:
		result += NOT_CONSTRUCTIVE_QUANTIFIER_SPLIT['EMOTIONS_USE']['SOME']

	# Capital letters use quantification
	if (capitals_per_sentence < NC_LIMITS['CAPITAL_LETTERS']['TOO_FEW']):
		result += NOT_CONSTRUCTIVE_QUANTIFIER_SPLIT['CAPITAL_LETTERS']['TOO_FEW']
	elif (capitals_per_sentence < NC_LIMITS['CAPITAL_LETTERS']['FEW']):
		result += NOT_CONSTRUCTIVE_QUANTIFIER_SPLIT['CAPITAL_LETTERS']['FEW']
	elif (capitals_per_sentence > NC_LIMITS['CAPITAL_LETTERS']['TOO_MANY']):
		result += NOT_CONSTRUCTIVE_QUANTIFIER_SPLIT['CAPITAL_LETTERS']['TOO_MANY']
	elif (capitals_per_sentence > NC_LIMITS['CAPITAL_LETTERS']['MANY']):
		result += NOT_CONSTRUCTIVE_QUANTIFIER_SPLIT['CAPITAL_LETTERS']['MANY']
	else:
		result += NOT_CONSTRUCTIVE_QUANTIFIER_SPLIT['CAPITAL_LETTERS']['NORMAL']

	return result


# Everything below is for testing purposes only
"""
import Parser
import Post
import FileIO

fileStream = FileIO.openTrainingFile(FileIO.READ_MODE)
# set_split_points(fileStream)
Parser.readHeader(fileStream)

overall_quantified_val_acc = 0
counter = 0

open_quantified_val_acc = 0
open_counter = 0

closed_quantified_val_acc = 0
closed_counter = 0

# NC = Non constructive
NC_quantified_val_acc = 0
NC_counter = 0

while (not Parser.isEndOfFile(fileStream)):
	
	quantified_val = 0
	newPost = Parser.readPost(fileStream)
	bodyMarkdown = newPost.bodyMarkdown
	quantified_val = quantify(newPost)

	overall_quantified_val_acc += quantified_val
	counter += 1

	post_status = Post.getFullPostStatus( newPost.openStatus )

	if ( post_status == "open"):
		open_quantified_val_acc += quantified_val
		open_counter += 1
	else:
		closed_quantified_val_acc += quantified_val
		closed_counter += 1

	if ( post_status == "not constructive"):
		NC_quantified_val_acc += quantified_val
		NC_counter += 1 

	print "Post - ", counter, "; quantified_val - ", quantified_val, "; Post status - \'" + post_status + "\'"

import datetime
f = open("Output/NC_quantifier_" + datetime.datetime.now().strftime("%Y%m%d_%H%M%S") + ".log", 'w')

f.write( "All posts: " + str(counter) )
f.write( "\n\nAverage NC quantifier value over all posts:    " + str(float(overall_quantified_val_acc)/counter) )
f.write( "\nAverage NC quantifier value over open posts:   " + str(float(open_quantified_val_acc)/open_counter) )
f.write( "\nAverage NC quantifier value over closed posts: " + str(float(closed_quantified_val_acc)/closed_counter) )
f.write( "\nAverage NC quantifier value over NC posts:     " + str(float(NC_quantified_val_acc)/NC_counter) )

f.close()
"""