"""
This script performs SVM learning. Contains methods for core SVM learning,
cross-validation, hill climbing for getting optimal parameters of cross-validation
and RBF kernel parameters. Includes methods for calculating accuracy and logloss.
"""
from Resources.config import SVM_config
import numpy, math

def train(learning_data_filename = SVM_config["default_training_filename"], 
	      sample_size = SVM_config["training_sample_size"], 
	      offset = SVM_config["default_offset"], 
	      number_of_quantifiers = SVM_config["number_of_quantifiers"], 
	      kernel = SVM_config["default_kernel"], 
	      penalty_pow = SVM_config["default_penalty_parameter_power"],
	      gamma_pow = SVM_config["default_gamma_power"],
	      positive_label = SVM_config["default_positive_label"]):

	f_in = open(learning_data_filename, "r")
	skip_counter = 0
	samples_counter = 0

	# Array of quantified attributes
	X = []
	# Array of labels
	Y = []

	# Skip offset numbe of lines, add sample size number of labels and attributes to Y and X
	print "Reading data from learning file ..."
	for line in f_in:
		if (skip_counter < offset):
			skip_counter += 1
			continue
		else:
			if ( samples_counter < sample_size ):
				# Strip \n, split line with whitespaces
				line = line[:-1].split(" ")

				if line[0] == positive_label:
					Y.append(1.0)
				else:
					Y.append(0.0)

				X.append(map(lambda s: float(s), line[1:number_of_quantifiers+1]))
				samples_counter += 1
			else:
				break

	# Populate matrix of supporting hyperplanes, for different kernels			
	print "Building matrix of supporting hyperplanes ..."
	H = []
	for i in range(sample_size):
		h_row = []
		for j in range(sample_size):
			if kernel == "linear":
				x_i = numpy.array(X[i])
				x_j = numpy.array(X[j])
				h_row.append( Y[i] * Y[j] * numpy.dot( x_i, x_j ))
			elif kernel == "RBF":
				x_i = numpy.array(X[i])
				x_j = numpy.array(X[j])
				gamma = math.pow(2, gamma_pow)
				h_row.append( math.exp( -gamma * numpy.linalg.norm( x_i - x_j ) ) ) 
			else: 
				raise Exception("Unknown kernel.")
		H.append(h_row)

	""" 
	To get alpha vector which maximizes normal to hyperplane we will use
		quadratic programming techniques provided by cvxopt module.
		Description available at: http://courses.csail.mit.edu/6.867/wiki/images/a/a7/Qp-cvxopt.pdf
	"""
	print "Maximazing vector norm to hyperplane ..."
	from cvxopt import matrix
	# initializing cvxopt required parameters
	P = matrix(H, tc='d')
	q = matrix(numpy.ones((sample_size, 1), dtype=int), tc='d')
	q *= -1
	C = math.pow(2, penalty_pow)
	G = matrix(numpy.identity(sample_size), tc='d')
	h = matrix(numpy.ones((sample_size, 1)), tc='d')
	h = h * C

	from cvxopt import solvers
	sol = solvers.qp(P,q,G,h)

	alphas = sol["x"]

	print "Calculating resulting W vector ..."
	W = numpy.zeros(number_of_quantifiers)
	for i in range(sample_size):
		W += ( alphas[i] * Y[i] ) * numpy.array(X[i])

	print "Retrieving support vectors ..."
	S = {"X": [], "Y": [], "alphas": []}
	for i in range(sample_size):
		if ( alphas[i] > 0 ):
			S["X"].append(X[i])
			S["Y"].append(Y[i])
			S["alphas"].append(alphas[i])
 
 	print "Calculating resulting bias ..."

 	b = 0
 	for s in range(len(S["alphas"])):
 		b_s = S["Y"][s]
 		for m in range(len(S["alphas"])):
 			b_s -= numpy.dot( S["alphas"][m] * S["Y"][m] * numpy.array(S["X"][m]), numpy.array(S["X"][s])) 
 		b += b_s
 	b /= len(S["alphas"])

 	result = {"W": W, "bias": b}
 	return result

# For given X parameters of particular sample will return 0 or 1 prediction based on provided W and b paramters
def predict( Xs, W_and_bias ):
	
	result = numpy.dot(numpy.array(Xs), numpy.array(W_and_bias["W"])) + W_and_bias["bias"]

	if ( result > 0 ):
		return 1.0
	else:
		return 0.0


def cross_validation( learning_data_filename = SVM_config["default_training_filename"], 
	                  number_of_samples = SVM_config["known_number_of_samples"],
	                  sample_size = SVM_config["training_sample_size"],
	                  training_runs = SVM_config["training_runs"],
	                  number_of_quantifiers = SVM_config["number_of_quantifiers"],
	                  kernel = SVM_config["default_kernel"], 
	      			  penalty_pow = SVM_config["default_penalty_parameter_power"],
	      			  gamma_pow = SVM_config["default_gamma_power"],
	      			  positive_label = SVM_config["default_positive_label"] ):
	
	import random
	random.seed()

	possible_runs = range( number_of_samples / sample_size )
	# initialize W and bias
	averaged_result = { "W" : numpy.zeros(number_of_quantifiers), "bias": 0 }

	for i in range(training_runs):
		# Get new offset for training
		offset = possible_runs.pop( random.randint( 0, len(possible_runs) - 1 ) ) * sample_size
		print "Training run #" + str(i + 1), learning_data_filename, "sample size : " + str(sample_size), "offset : " + str(offset)
		training_result = train( learning_data_filename = learning_data_filename, 
								 sample_size = sample_size, 
								 offset = offset,
								 positive_label = positive_label)
		print "Training result : " + str(training_result)
		averaged_result["W"] += training_result["W"]
		averaged_result["bias"] += training_result["bias"]

	averaged_result["W"] /= training_runs
	averaged_result["bias"] /= training_runs

	print "Averaged training result : " + str(averaged_result)

	return averaged_result

def calculate_accuracy( W_and_bias,
						testing_sample_offset = 0,
						learning_data_filename = SVM_config["default_training_filename"], 
	                    number_of_samples = SVM_config["known_number_of_samples"],
	                    sample_size = SVM_config["testing_sample_size"],
	                    number_of_quantifiers = SVM_config["number_of_quantifiers"],
	      			    positive_label = SVM_config["default_positive_label"] ):
	
	f_in = open(learning_data_filename, "r")

	skip_counter = 0
	samples_counter = 0
	correct_predictions = 0.

	for line in f_in:
		if (skip_counter < testing_sample_offset):
			skip_counter += 1
			continue
		else:
			if ( samples_counter < sample_size ):
				# Strip \n, split line with whitespaces
				line = line[:-1].split(" ")
				
				if line[0] == positive_label:
					Y = 1.0
				else:
					Y = 0.0
				
				X = map(lambda s: float(s), line[1:number_of_quantifiers+1])

				if ( Y == predict(X, W_and_bias)):
					correct_predictions += 1
				
				samples_counter += 1
			
			else:
				break

	f_in.close()

	return correct_predictions / samples_counter

# Searching for best RBF parameters using hill climbing based on accuracy
def RBF_hill_climbing2D( output_filename = SVM_config["RBF_hill_climbing2D_output_filename"],
						 penalty_pow = SVM_config["default_penalty_parameter_power"],
	      			  	 gamma_pow = SVM_config["default_gamma_power"], 
	      			  	 step = SVM_config["hill_climbing_initial_step"], 
	      			  	 step_limit = SVM_config["hill_climbing_step_limit"],
	      			  	 testing_sample_offset = SVM_config["default_offset"],
						 learning_data_filename = SVM_config["default_training_filename"], 
	                     number_of_samples = SVM_config["known_number_of_samples"],
	                     sample_size = SVM_config["training_sample_size"],
	      			     positive_label = SVM_config["default_positive_label"] ):

	f_out = open(output_filename, "w")
	f_out.write("Penalty | Alpha | Accuracy\n")

	base_case = calculate_accuracy( W_and_bias = cross_validation( penalty_pow = penalty_pow, gamma_pow = gamma_pow, positive_label = positive_label))

	step_update = False
	while True:

		if not step_update:
			f_out.write( "2^" + str(penalty_pow) + " | 2^" + str(gamma_pow) + " | " + str(base_case) + "\n")

		# Alternative attributes measurments
		penalty_inc = calculate_accuracy( W_and_bias = cross_validation( penalty_pow = penalty_pow + step, gamma_pow = gamma_pow, positive_label = positive_label ))
		penalty_dec = calculate_accuracy( W_and_bias = cross_validation( penalty_pow = penalty_pow - step, gamma_pow = gamma_pow, positive_label = positive_label ))
		gamma_inc = calculate_accuracy( W_and_bias = cross_validation( penalty_pow = penalty_pow, gamma_pow = gamma_pow + step, positive_label = positive_label))
		gamma_dec = calculate_accuracy( W_and_bias = cross_validation( penalty_pow = penalty_pow, gamma_pow = gamma_pow - step, positive_label = positive_label))

		step_update = False
		# Searching for better alternative
		if ( penalty_inc > base_case ):
			penalty_pow += step
			base_case = penalty_inc
		elif ( penalty_dec > base_case ):
			penalty_pow -= step
			base_case = penalty_dec
		elif ( gamma_inc > base_case ):
			gamma_pow += step
			base_case = gamma_inc
		elif ( gamma_dec > base_case ):
			gamma_pow -= step
			base_case = gamma_dec
		else:
			# If no better alternatuves found reduce step size in 2 and try again
			step /= 2
			step_update = True
			if ( step < step_limit):
				break	

	f_out.close()

# learning runs and sample size hill_climbing
def LRASS_hill_climbing2D( 
						 output_filename = SVM_config["LRASS_hill_climbing2D_output_filename"],
						 penalty_pow = SVM_config["default_penalty_parameter_power"],
	      			  	 gamma_pow = SVM_config["default_gamma_power"],
	      			  	 training_runs = SVM_config["training_runs"],
						 training_runs_step = SVM_config["training_runs_step"],
						 training_runs_max = SVM_config["training_runs_max"], 
	      			  	 sample_size = SVM_config["training_sample_size"],
	      			  	 sample_size_step = SVM_config["training_sample_size_step"], 
	      			  	 sample_size_max =  SVM_config["training_sample_size_max"],
	      			  	 testing_sample_offset = SVM_config["default_offset"],
						 learning_data_filename = SVM_config["default_training_filename"], 
	                     number_of_samples = SVM_config["known_number_of_samples"],
	      			     positive_label = SVM_config["default_positive_label"] ):

	f_out = open(output_filename, "w")
	f_out.write("Learning Runs | Sample Size | Accuracy\n")

	base_case = calculate_accuracy( W_and_bias = cross_validation( penalty_pow = penalty_pow, gamma_pow = gamma_pow, sample_size = sample_size, training_runs = training_runs, positive_label = positive_label))

	step_update = False
	while True:

		f_out.write( str(training_runs) + " | " + str(sample_size) + " | " + str(base_case) + "\n")
		# Gettign alternatives
		runs_inc = calculate_accuracy( W_and_bias = cross_validation( penalty_pow = penalty_pow, gamma_pow = gamma_pow, sample_size = sample_size, training_runs = training_runs + training_runs_step, positive_label = positive_label ))
		runs_dec = calculate_accuracy( W_and_bias = cross_validation( penalty_pow = penalty_pow, gamma_pow = gamma_pow, sample_size = sample_size, training_runs = training_runs - training_runs_step, positive_label = positive_label ))
		size_inc = calculate_accuracy( W_and_bias = cross_validation( penalty_pow = penalty_pow, gamma_pow = gamma_pow, sample_size = sample_size + sample_size_step, training_runs = training_runs, positive_label = positive_label))
		size_dec = calculate_accuracy( W_and_bias = cross_validation( penalty_pow = penalty_pow, gamma_pow = gamma_pow, sample_size = sample_size - sample_size_step, training_runs = training_runs, positive_label = positive_label))

		# Looking for better alternative
		if ( runs_inc > base_case ):
		
			training_runs += training_runs_step
			
			if training_runs > training_runs_max:
				break
			
			base_case = runs_inc
		
		elif ( runs_dec > base_case ):
			
			training_runs -= training_runs_step
			
			if training_runs <= 0:
				break
			
			base_case = runs_dec
		
		elif ( size_inc > base_case ):

			sample_size += sample_size_step
			
			if sample_size > sample_size_max:
				break

			base_case = size_inc
		
		elif ( size_dec > base_case ):
		
			sample_size -= sample_size_step
		
			if sample_size <= 0:
				break

			base_case = size_dec

		else:
			break	

	f_out.close()

def logLoss( testing_sample_offset = 0,
			 learning_data_filename = SVM_config["default_training_filename"], 
	         number_of_samples = SVM_config["known_number_of_samples"],
	         sample_size = SVM_config["testing_sample_size"],
	         number_of_quantifiers = SVM_config["number_of_quantifiers"]):
	
	import math
	# Getting 5 trained SVM models
	W_and_bias_open = cross_validation( sample_size = 500, training_runs = 5, penalty_pow = -8.5, gamma_pow = -8, positive_label = "1")
	W_and_bias_off_topic = cross_validation( sample_size = 500, training_runs = 5, penalty_pow = -8.5, gamma_pow = -8, positive_label = "2")
	W_and_bias_not_constructive = cross_validation( sample_size = 500, training_runs = 5, penalty_pow = -8.5, gamma_pow = -8, positive_label = "3")
	W_and_bias_not_a_question = cross_validation( sample_size = 500, training_runs = 5, penalty_pow = -8.5, gamma_pow = -8, positive_label = "4")
	W_and_bias_too_localized = cross_validation( sample_size = 500, training_runs = 5, penalty_pow = -8.5, gamma_pow = -8, positive_label = "5")
	
	Ws = [W_and_bias_open, W_and_bias_off_topic, W_and_bias_not_constructive, W_and_bias_not_a_question, W_and_bias_too_localized]

	logTotal = 0

	f_in = open(learning_data_filename, "r")

	skip_counter = 0
	samples_counter = 0
	correct_predictions = 0.

	for line in f_in:
		if (skip_counter < testing_sample_offset):
			skip_counter += 1
			continue
		else:
			if ( samples_counter < sample_size ):
				
				# Strip \n, split line with whitespaces
				line = line[:-1].split(" ")
				y = int(line[0])
				X = map(lambda s: float(s), line[1:number_of_quantifiers+1])

				for i in range(number_of_quantifiers):
					if y == i + 1:
						logTotal += math.log( 1 + math.exp( -( numpy.dot(numpy.array(Ws[i]["W"]), numpy.array(X)) + Ws[i]["bias"] )))
						print logTotal

				samples_counter += 1
			
			else:
				break

	f_in.close()

	return logTotal/samples_counter

# Below examples of above code use

#RBF_hill_climbing2D()
#LRASS_hill_climbing2D(penalty_pow = -8.5)

"""
W_and_bias_open = cross_validation( sample_size = 500, training_runs = 5, penalty_pow = -8.5, gamma_pow = -8, positive_label = "1")
W_and_bias_off_topic = cross_validation( sample_size = 500, training_runs = 5, penalty_pow = -8.5, gamma_pow = -8, positive_label = "2")
W_and_bias_not_constructive = cross_validation( sample_size = 500, training_runs = 5, penalty_pow = -8.5, gamma_pow = -8, positive_label = "3")
W_and_bias_not_a_question = cross_validation( sample_size = 500, training_runs = 5, penalty_pow = -8.5, gamma_pow = -8, positive_label = "4")
W_and_bias_too_localized = cross_validation( sample_size = 500, training_runs = 5, penalty_pow = -8.5, gamma_pow = -8, positive_label = "5")

print "Open post classification accuracy : " + str(calculate_accuracy(W_and_bias = W_and_bias_open, testing_sample_offset = 5000, positive_label = "1"))
print "Off topic classification accuracy : " + str(calculate_accuracy(W_and_bias = W_and_bias_off_topic, testing_sample_offset = 5000, positive_label = "2"))
print "Not constructive classification accuracy : " + str(calculate_accuracy(W_and_bias = W_and_bias_not_constructive, testing_sample_offset = 5000, positive_label = "3"))
print "Not a real question classification accuracy : " + str(calculate_accuracy(W_and_bias = W_and_bias_not_a_question, testing_sample_offset = 5000, positive_label = "4"))
print "Too localized classification accuracy : " + str(calculate_accuracy(W_and_bias = W_and_bias_too_localized, testing_sample_offset = 5000, positive_label = "5"))
"""

#logLoss()