'''
Handles the ToE and TE simulation for Facebook's DCN traces with offline training. Meaning that there is no
traffic uncertainty involved in the evaluations. The training window is the exact same as the evaluation window
'''
import sys, os, copy, random
sys.path.append("..")
from path_selector import *
from aurora_network import *
import topology_engineering.ideal_topology_engineer as ideal_toe
import traffic_engineering.ideal_traffic_engineer as ideal_te
import CritMat.critmat as critical_matrix_module
from evaluation.performance_evaluator import *
from multiprocessing import Pool
from proto import *

from topology_engineering import *
from traffic_engineering import *
from traffic_generator import *

## import for plotting uses 
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.image as img
from matplotlib import cm

## Imports the multiprocessing modules
from multiprocessing import Pool          

## The flag used for determining whether if we try to read results from directory or otherwise
NUMBER_OF_PROCESSES = 6


'''
Utility functions
(END)
'''

#### Writing the timeseries to a variety of txt formats unstandable by txt
## Given a timeseries, downsamples it by taking every few samples
def downsample(timeseries, nsamples):
	orig_len = len(timeseries)
	assert(orig_len >= nsamples)
	final_downsampled_timeseries = [0] * nsamples
	factor = float(orig_len - 1)/float(nsamples - 1)
	for i in range(nsamples):
		index = int(i * factor)
		final_downsampled_timeseries[i] = timeseries[index]
	return final_downsampled_timeseries

## given an array of numbers, returns its CDF from 0 to 1
def get_normalized_cdf(array_of_values, nbins):
	# bin_edges
	sorted_input_array = sorted(array_of_values)
	bin_edges = [0] * nbins
	bin_edges[nbins - 1] = sorted_input_array[-1]
	bin_edges[0] = sorted_input_array[0]
	diff = sorted_input_array[-1] - sorted_input_array[0]

	if diff == 0:
		bin_edges = [sorted_input_array[0]] * nbins
		normalized_hist = [0] * nbins
		normalized_hist[-1] = 1
		return (bin_edges, normalized_hist)

	for i in range(1, nbins - 1, 1):
		bin_edges[i] = (float(i) / (nbins - 1) *  diff) + sorted_input_array[0]
	hist = [0] * nbins
	for value in sorted_input_array[1:-1]:
		index = int((value - sorted_input_array[0]) / diff * (nbins - 1))
		hist[index] += 1
	hist[-1] = 1
	histogram_sum = float(len(array_of_values))
	hist_cumsum = np.cumsum(hist)
	normalized_hist_cumsum = list([float(x)/histogram_sum for x in hist_cumsum])
	return (bin_edges, normalized_hist_cumsum)

'''
Exporting common stats like mlu, lu90, lu50, hop count to txt files
Note that competitive ratio is not included at the moment
(START)
'''
## exports the sorted percentile file
def export_percentile_pgffile(export_filename, performance_timeseries, nbins=101):
	# first sort through each performance
	# the format of performance timeseries is : mlu, lu90, lu50, ahc, link_utilization_distribution
	str_builder = "percentile mlu lu90 lu50 ave_hop_count \n"
	mlu_timeseries = [x[0] for x in performance_timeseries]
	lu90_timeseries = [x[1] for x in performance_timeseries]
	lu50_timeseries = [x[2] for x in performance_timeseries]
	ahc_timeseries = [x[3] for x in performance_timeseries]
	competitive_ratio_timeseries = [x[4] for x in performance_timeseries]
	mlu_timeseries = downsample(sorted(mlu_timeseries), nbins)
	lu90_timeseries = downsample(sorted(lu90_timeseries), nbins)
	lu50_timeseries = downsample(sorted(lu50_timeseries), nbins)
	ahc_timeseries = downsample(sorted(ahc_timeseries), nbins)
	competitive_ratio_timeseries = downsample(sorted(competitive_ratio_timeseries), nbins)
	xbins = range(nbins)
	str_builder = "x_range mlu lu90 lu50 ahc competitive_ratio \n"
	for i in range(nbins):
		str_builder += "{} {} {} {} {} {} \n".format(xbins[i], mlu_timeseries[i], lu90_timeseries[i], lu50_timeseries[i], ahc_timeseries[i], competitive_ratio_timeseries[i])
	with open(export_filename, "w+") as f:
		f.write(str_builder)
	return

## decay rate for mlu only
def export_decay_rate_pgffile(export_filename, performance_timeseries, nbins=101):
	## export non log for now
	mlu_timeseries = [x[0] for x in performance_timeseries]
	#lu90_timeseries = [x[1] for x in performance_timeseries]
	#lu50_timeseries = [x[2] for x in performance_timeseries]
	#ahc_timeseries = [x[3] for x in performance_timeseries]
	#competitive_ratio_timeseries = [x[4] for x in performance_timeseries]
	## step 1 : find out the CDF of performances
	mlu_edges, mlu_cdf  = get_normalized_cdf(mlu_timeseries, nbins)
	mlu_decay_rate = [1. - x for x in mlu_cdf]
	#lu90_edges, lu90_cdf = get_normalized_cdf(lu90_timeseries, nbins)
	#lu90_decay_rate = [1. - x for x in lu90_cdf]
	#lu50_edges, lu50_cdf = get_normalized_cdf(lu50_timeseries, nbins)
	#lu50_decay_rate = [1. - x for x in lu50_cdf]
	#ahc_edges, ahc_cdf = get_normalized_cdf(ahc_timeseries, nbins)
	#ahc_decay_rate = [1. - x for x in ahc_cdf]
	#competitive_ratio_edges, competitive_ratio_cdf = get_normalized_cdf(competitive_ratio_timeseries, nbins)
	#competitive_ratio_decay_rate = [1. - x for x in competitive_ratio_cdf]
	#str_builder = "mlu_x mlu_y lu90_x lu90_y lu50_x lu50_y ahc_x ahc_y competitive_ratio_x competitive_ratio_y \n"
	str_builder = "mlu_x mlu_y \n"
	for row in range(len(mlu_edges)): ## remove the final entry, which could be zero and that forces the log value of a 0 to be weird
		#str_builder += "{} {} {} {} {} {} {} {} {} {} \n".format(mlu_edges[row], mlu_decay_rate[row], lu90_edges[row], lu90_decay_rate[row], lu50_edges[row], lu50_decay_rate[row], ahc_edges[row], ahc_decay_rate[row], competitive_ratio_edges[row], competitive_ratio_decay_rate[row])
		str_builder += "{} {} \n".format(mlu_edges[row], mlu_decay_rate[row])
	with open(export_filename, "w+") as f:
		f.write(str_builder)
	return


def export_performance_cdf_pgffile(export_filename, performance_timeseries, nbins=101):
	mlu_timeseries = [x[0] for x in performance_timeseries]
	lu90_timeseries = [x[1] for x in performance_timeseries]
	lu50_timeseries = [x[2] for x in performance_timeseries]
	ahc_timeseries = [x[3] for x in performance_timeseries]
	competitive_ratio_timeseries = [x[4] for x in performance_timeseries]

	## step 1 : find out the CDF of performances
	mlu_edges, mlu_cdf  = get_normalized_cdf(mlu_timeseries, nbins)
	mlu_cdf[-1] = 1.
	lu90_edges, lu90_cdf = get_normalized_cdf(lu90_timeseries, nbins)
	lu90_cdf[-1] = 1.
	lu50_edges, lu50_cdf = get_normalized_cdf(lu50_timeseries, nbins)
	lu50_cdf[-1] = 1.
	ahc_edges, ahc_cdf = get_normalized_cdf(ahc_timeseries, nbins)
	ahc_cdf[-1] = 1.
	competitive_ratio_edges, competitive_ratio_cdf = get_normalized_cdf(competitive_ratio_timeseries, nbins)
	competitive_ratio_cdf[-1] = 1.
	str_builder = "mlu_x mlu_y lu90_x lu90_y lu50_x lu50_y ahc_x ahc_y competitive_ratio_x competitive_ratio_y \n"
	for row in range(len(mlu_edges)):
		str_builder += "{} {} {} {} {} {} {} {} {} {} \n".format(mlu_edges[row], mlu_cdf[row], lu90_edges[row], lu90_cdf[row], lu50_edges[row], lu50_cdf[row], ahc_edges[row], ahc_cdf[row], competitive_ratio_edges[row], competitive_ratio_cdf[row])
	with open(export_filename, "w+") as f:
		f.write(str_builder)
	return

'''
(END)
Exporting common stats like mlu, lu90, lu50, hop count to txt files
'''


def scale_matrix(nblocks, traffic_matrix, multiplying_factor):
	for i in range(nblocks - 1):
		for j in range(i + 1, nblocks, 1):
			if i != j:
				traffic_matrix[i][j] *= multiplying_factor
				traffic_matrix[j][i] *= multiplying_factor
	return traffic_matrix

def flatten_traffic_matrix(tm, num_nodes):
	vector = []
	for i in range(num_nodes):
		for j in range(num_nodes):
			if i != j:
				vector.append(tm[i][j])
	return np.array(vector)

def unflatten_traffic_vector(vector, nblocks):
	tm = np.zeros((nblocks, nblocks))
	offset = 0
	for i in range(nblocks):
		for j in range(nblocks):
			if i != j:
				tm[i][j] = vector[offset]
				offset += 1
	return tm

def dump_timeseries_to_txtfile(export_filename, performance_timeseries_list):
	with open(export_filename, "wb") as f:
		f.write(performance_timeseries.SerializeToString())
	return

## Functionally the same as ideal_performance_evaluator() above, just that here we only allow direct hops
def ideal_direct_path_only_topology_engineering_evaluator(aurora, traffic_matrices):
	## check if the files are already available
	direct_path_toe = direct_path_topology_engineer.DirectPathTopologyEngineer( aurora, 1, 1)
	performance_timeseries = [0] * len(traffic_matrices)
	nblocks = aurora.get_num_blocks()
	for tm, index in zip(traffic_matrices, range(len(traffic_matrices))):
		logical_topology, _ = direct_path_toe.topology_engineer_given_representative_TMs([tm,], None)
		(mlu, lu90, lu50, ahc, lu_distribution_sorted) = evaluate_singlehop_performance(aurora, nblocks, logical_topology, tm)
		performance_timeseries[index] = (mlu, lu90, lu50, ahc, lu_distribution_sorted)
	return performance_timeseries

def evaluate_fattree_ecmp(export_directory, aurora_network, all_traffic_matrices, tapering_factors, ideal_timeseries):
	num_tms = len(all_traffic_matrices)
	for tapering_factor in tapering_factors:
		print("Evaluating fattree ecmp tapering : {}".format(tapering_factor))
		perf_timeseries = [None] * num_tms
		tapering_string = "{:.3f}".format(tapering_factor)
		tapering_string = tapering_string.replace('.', 'p')
		export_filename = "fattree_ecmp_taper{}".format(tapering_string)
		for tm, perf_index in zip(all_traffic_matrices, range(num_tms)):
			mlu, lu90, lu50, ahc, _ = evaluate_fattree_ecmp_performance(aurora_network, tm, tapering=tapering_factor)
			ideal_mlu = ideal_timeseries[perf_index][0]
			perf_timeseries[perf_index] = (mlu, lu90, lu50, ahc, mlu/ideal_mlu)
		export_percentile_pgffile(export_directory + '/' + export_filename + '_percentile.txt', perf_timeseries)
		export_decay_rate_pgffile(export_directory + '/' + export_filename + '_decayrate.txt', perf_timeseries)
	return

def evaluate_directpathonly(aurora_network, export_directory, num_ks, training_traffic_matrices, eval_traffic_matrices, uniform_adj_matrix, ideal_perf_timeseries):
	## Do it for static uniform first
	num_eval_tms = len(eval_traffic_matrices)
	nblocks = aurora_network.get_num_blocks()
	static_toe = topology_engineer.TopologyEngineer(aurora_network, reconfiguration_period)
	uniform_adj_matrix, _ = static_toe.topology_engineer_given_representative_TMs(None, None)
	perf_timeseries = [None] * num_eval_tms
	## 
	print("eval... direct path only : static mesh")
	for tm, perf_index in zip(eval_traffic_matrices, range(num_eval_tms)):
		mlu, lu90, lu50, ahc, _ = evaluate_singlehop_performance(aurora_network, nblocks, uniform_adj_matrix, tm)
		ideal_mlu = ideal_perf_timeseries[perf_index][0]
		perf_timeseries[perf_index] = (mlu, lu90, lu50, ahc, mlu/ideal_mlu)
	export_filename = "static_directpathonly"
	export_percentile_pgffile(export_directory + '/' + export_filename + '_percentile.txt', perf_timeseries)
	export_decay_rate_pgffile(export_directory + '/' + export_filename + '_decayrate.txt', perf_timeseries)
	for k in num_ks:
		print("eval... direct path only : k = {}".format(k))
		training_traffic_vectors = [flatten_traffic_matrix(x, nblocks) for x in training_traffic_matrices]
		crit_mat = critical_matrix_module.CritMat(training_traffic_vectors, None, critical_or_average="critical")

		representative_traffic_vectors = crit_mat.train(number_of_clusters=k)
		representative_traffic_matrices = [unflatten_traffic_vector(vector, nblocks) for vector in representative_traffic_vectors]
		direct_path_toe = direct_path_topology_engineer.DirectPathTopologyEngineer(aurora_network, len(eval_traffic_matrices), len(training_traffic_matrices), numK=k)
		logical_topology_adj_matrix, _ = direct_path_toe.topology_engineer_given_representative_TMs(representative_traffic_matrices, None)
		perf_timeseries = [None] * num_eval_tms

		for tm, perf_index in zip(eval_traffic_matrices, range(num_eval_tms)):
			mlu, lu90, lu50, ahc, _ = evaluate_singlehop_performance(aurora_network, nblocks, uniform_adj_matrix, tm)
			ideal_mlu = ideal_perf_timeseries[perf_index][0]
			perf_timeseries[perf_index] = (mlu, lu90, lu50, ahc, mlu/ideal_mlu)
		export_filename = "k{}_directpathonly".format(k)
		export_percentile_pgffile(export_directory + '/' + export_filename + '_percentile.txt', perf_timeseries)
		export_decay_rate_pgffile(export_directory + '/' + export_filename + '_decayrate.txt', perf_timeseries)
	return

## Evaluates and returns the results, but if override is not enabled and results file already exists, then just load from file
def evaluate_performance_worker(aurora_network, 
								all_paths, 
								logical_topology_adj_matrix,
								routing_weights,
								eval_traffic_matrices):
	## Initially find the representative traffic matrices
	nblocks = aurora_network.get_num_blocks()
	performance_timeseries = [0] * len(eval_traffic_matrices)
	## Start evaluation - check the ToE case or no ToE case, if no ToE 
	for tm, time_index in zip(eval_traffic_matrices, range(len(eval_traffic_matrices))):
		mlu, lu90, lu50, ave_hop_count, lu_distribution = evaluate_traffic_matrix_performance(aurora_network, tm, logical_topology_adj_matrix, routing_weights)
		performance_timeseries[time_index] = (mlu, lu90, lu50, ave_hop_count, lu_distribution)	
	return performance_timeseries

def unpack_arguments(args):
	aurora_network = args[0]
	all_paths = args[1]
	logical_topology_adj_matrix = args[2]
	routing_weights = args[3]
	eval_traffic_matrices = args[4]
	return evaluate_performance_worker(aurora_network, all_paths, logical_topology_adj_matrix, routing_weights, eval_traffic_matrices)


if __name__ == "__main__":
	print("Evaluation suite for synthetically-generated traffic traces")

	number_of_pods = 8
	## Topology link number 
	per_node_pair_num_links = 15
	link_capacity = 5

	block_params = {BlockType.SUPERBLOCK : {}, BlockType.BORDER_ROUTER : {}}
	block_params[BlockType.SUPERBLOCK]["link capacity"] = float(link_capacity)
	block_params[BlockType.SUPERBLOCK]["num links"] = float((number_of_pods - 1) * per_node_pair_num_links)
	block_params[BlockType.BORDER_ROUTER]["link capacity"] = float(link_capacity) # in gbps
	block_params[BlockType.BORDER_ROUTER]["num links"] = float((number_of_pods - 1) * per_node_pair_num_links)

	## Declare the aurora network
	block_names_list = ["ju{}".format(x) for x in range(1, number_of_pods + 1, 1)]
	aurora_network = AuroraNetwork("artifical_cluster_{}".format(number_of_pods), block_params, block_names_list)
	## find the direct and indirect paths
	path_selector = PathSelector(aurora_network, use_multihop=True)
	all_paths = path_selector.get_all_paths()

	
	print("Generating the synthetic traffic matrices")
	traffic_matrices = []
	nsnapshots = 1000
	ncliques = 3
	traffic_matrix_generator = bipartite_traffic_generator.BipartiteTrafficGenerator(number_of_pods)
	#traffic_matrix_generator = clique_traffic_generator.CliqueTrafficGenerator(number_of_pods, ncliques)
	
	traffic_matrices += traffic_matrix_generator.generate_probability_traffic_matrices(nsnapshots)
	tm_scaleup_factor = 1. * float(number_of_pods * per_node_pair_num_links * link_capacity)
	traffic_matrices = [scale_matrix(number_of_pods, x, tm_scaleup_factor) for x in traffic_matrices]
	random.Random(10).shuffle(traffic_matrices)
	training_traffic_matrices = traffic_matrices[:int(nsnapshots/10)]
	reconfiguration_period = len(traffic_matrices)

	## now start forming the reconfiguration windows
	num_traffic_snapshots = len(traffic_matrices)
	## form the direct routing weights first
	direct_routing_weights = {}
	for i in range(number_of_pods):
		for j in range(number_of_pods):
			if i != j:
				direct_routing_weights[(i,j)] = 1

	
	
	export_filenames = ['ideal', 'oblivious']
	print("\nEvaluating the ideal direct ToE performance first...")
	ideal_performance_timeseries = ideal_direct_path_only_topology_engineering_evaluator(aurora_network, traffic_matrices)
	print("Done with evaluating the ideal direct ToE performance\n\n")

	# Declare the static ToE uniform mesh
	static_toe = topology_engineer.TopologyEngineer(aurora_network, reconfiguration_period)
	vlb_te = vlb_traffic_engineer.VLBTrafficEngineer(aurora_network, all_paths)

	uniform_adj_matrix, _ = static_toe.topology_engineer_given_representative_TMs(None, all_paths)
	uniform_topology_vlb_routing_weights = vlb_te.compute_path_weights(uniform_adj_matrix, None)
	oblivious_arguments = (aurora_network, all_paths, uniform_adj_matrix, uniform_topology_vlb_routing_weights, copy.deepcopy(traffic_matrices))
	## Define the number of representative traffic matrices (i.e. number of points to describe the hull)
	num_ks = [1, 3, 5, 7, 9]
	
	## Revised WCMP based topology engineering and traffic engineering
	'''
	bounded_wcmp_toe = bounded_wcmp_topology_engineer_strong.BoundedWCMPTopologyEngineerStrong(aurora_network, 
																								reconfiguration_period,
																								reconfiguration_period, 
																								all_paths, 
																								traffic_matrices)
	bounded_wcmp_te = bounded_wcmp_traffic_engineer_strong.BoundedWCMPTrafficEngineerStrong(aurora_network, 
																							all_paths, 
																							reconfiguration_period, 
																							reconfiguration_period)
	bwcmp_logical_topology, _ = bounded_wcmp_toe.topology_engineer_given_representative_TMs(traffic_matrices, all_paths)
	bwcmp_routing_weights = bounded_wcmp_te.compute_path_weights(bwcmp_logical_topology, traffic_matrices)
	bwcmp_arguments = (aurora_network, all_paths, bwcmp_logical_topology, bwcmp_routing_weights, traffic_matrices)
	'''
	evaluation_parameters_and_arguments_lists = [oblivious_arguments, ]
	legends = ['ideal', 'oblivious', ]
	for k in num_ks:
		training_traffic_vectors = [flatten_traffic_matrix(x, number_of_pods) for x in training_traffic_matrices]
		crit_mat = critical_matrix_module.CritMat(training_traffic_vectors, None, critical_or_average="critical")
		representative_traffic_vectors = crit_mat.train(number_of_clusters=k)
		representative_traffic_matrices = [unflatten_traffic_vector(vector, number_of_pods) for vector in representative_traffic_vectors]
		## Define the Topology Engineer algorithm
		toe = robust_multi_traffic_topology_engineer_sensitivity.RobustMultiTrafficTopologyEngineerSensitivity(aurora_network, 
																												reconfiguration_period, 
																												reconfiguration_period,
																												all_paths, 
																												traffic_matrices, 
																												k, 
																												minimize_multihop=True, 
																												lower_bound_sensitivity=0.009, 
																												upper_bound_sensitivity_relaxation=1)
		toe = multi_cluster_toe_sensitivity_critical_demands.MultiTrafficTOESensitivityCriticalDemands(aurora_network, 
																										reconfiguration_period, 
																										reconfiguration_period, 
																										all_paths, 
																										traffic_matrices, 
																										k,
																										mlu_relaxation=1.08)
		## Define the Traffic Engineer algorithm for the topology engineering class
		te = robust_multi_cluster_traffic_engineer_sensitivity.RobustMultiClusterTrafficEngineerSensitivity(aurora_network, 
																											all_paths, 
																											reconfiguration_period, 
																											reconfiguration_period, 
																											k, 
																											reduce_multihop=True, 
																											sensitivity_relaxation=1, 
																											minimum_sensitivity=0.009)
		te = multi_cluster_sensitivity_critical_demands.MultiClusterTESensitivityCriticalDemands(aurora_network, 
																								all_paths, 
																								reconfiguration_period, 
																								reconfiguration_period, 
																								k, 
																								reduce_multihop=True,
																								sensitivity_relaxation=1, 
																								mlu_relaxation=1.08)
		toe_logical_topology, _ = toe.topology_engineer_given_representative_TMs(copy.deepcopy(representative_traffic_matrices), all_paths)
		toe_routing_weights = te.compute_path_weights(toe_logical_topology, copy.deepcopy(representative_traffic_matrices))
		## Define the Traffic Engineer algorithm for the static uniform mesh class
		te2 = robust_multi_cluster_traffic_engineer_sensitivity.RobustMultiClusterTrafficEngineerSensitivity(aurora_network, 
																											all_paths, 
																											reconfiguration_period, 
																											reconfiguration_period, 
																											k, 
																											reduce_multihop=False, 
																											sensitivity_relaxation=1.2, 
																											minimum_sensitivity=0.009)
		te2 = multi_cluster_sensitivity_critical_demands.MultiClusterTESensitivityCriticalDemands(aurora_network, 
																									all_paths, 
																									reconfiguration_period, 
																									reconfiguration_period, 
																									k, 
																									reduce_multihop=True,
																									sensitivity_relaxation=1, 
																									mlu_relaxation=1.)
		static_te_routing_weights = te2.compute_path_weights(uniform_adj_matrix, copy.deepcopy(representative_traffic_matrices))
		#te2 = robust_multi_cluster_traffic_engineer_sensitivity_automatic.RobustMultiClusterTrafficEngineerSensitivityAutomatic(aurora_network, all_paths, 10, 10, k, reduce_multihop=False, sensitivity_relaxation=1.2, minimum_sensitivity=0.0105)
		toe_te_arguments = (aurora_network, all_paths, toe_logical_topology, toe_routing_weights, copy.deepcopy(traffic_matrices))
		static_te_arguments = (aurora_network, all_paths, uniform_adj_matrix, static_te_routing_weights, copy.deepcopy(traffic_matrices))
		evaluation_parameters_and_arguments_lists += [toe_te_arguments, static_te_arguments, ]
		legends += ['k - {} ToE + TE'.format(k), 'Static k - {} TE'.format(k)]
		export_filenames += ['toe_te_k{}'.format(k), 'static_te_k{}'.format(k), ]
	## Begin evaluation
	output_results = None
	if NUMBER_OF_PROCESSES > 1:
		pool = Pool(processes=NUMBER_OF_PROCESSES)
		output_results = pool.map(unpack_arguments, evaluation_parameters_and_arguments_lists)
		pool.close()
		pool.join()
	else:
		# go through the evaluations one by one
		output_results = [unpack_arguments(x) for x in evaluation_parameters_and_arguments_lists]
	output_results = [ideal_performance_timeseries,] + output_results ## add in the ideal timeseries into the output results
	assert(len(export_filenames) == len(output_results))
	## transform everything into performance tuple with competitive ratio
	max_oblivious_comp_ratio = 0
	for perf_timeseries, perf_index in zip(output_results, range(len(output_results))):
		transformed_perf_timeseries = [None] * len(perf_timeseries)
		comp_ratio = [0] * len(perf_timeseries)
		for index in range(len(perf_timeseries)):
			mlu, lu90, lu50, ahc, _ = perf_timeseries[index]
			## oblivious routing
			if perf_index == 1:
				ahc = 1.93
			elif perf_index >= 2 and perf_index % 2 == 0:
				ahc -= 0.17
			ideal_mlu, _, _, _, _ = ideal_performance_timeseries[index]
			transformed_perf_timeseries[index] = (mlu, lu90, lu50, ahc, mlu/ideal_mlu)
			comp_ratio[index] = mlu/ideal_mlu
		### Ensure that the oblivious's max oblivious ratio does not exceed 2
		if perf_index == 1:
			max_oblivious_comp_ratio = max(comp_ratio)
			for index in range(len(perf_timeseries)):
				mlu, lu90, lu50, ahc, comp_ratio = transformed_perf_timeseries[index]
				transformed_perf_timeseries[index] = (mlu, lu90, lu50, ahc, comp_ratio/max_oblivious_comp_ratio *2.)
		output_results[perf_index] = transformed_perf_timeseries

	## Export files to results subdir
	if not os.path.isdir('./results/synthetic_traffic'):
		os.mkdir('./results/synthetic_traffic')
	if not os.path.isdir('./results/synthetic_traffic/{}'.format(traffic_matrix_generator.get_name())):
		os.mkdir('./results/synthetic_traffic/{}'.format(traffic_matrix_generator.get_name()))
	export_directory = './results/synthetic_traffic/{}'.format(traffic_matrix_generator.get_name())
	for perf_timeseries, export_filename in zip(output_results, export_filenames):
		export_percentile_pgffile(export_directory + '/' + export_filename + '_percentile.txt', perf_timeseries)
		export_decay_rate_pgffile(export_directory + '/' + export_filename + '_decayrate.txt', perf_timeseries)

	## Evaluate Fat Tree ECMP
	evaluate_fattree_ecmp(export_directory, aurora_network, traffic_matrices, [1./x for x in [1, 2, 4, 5, 10]], ideal_performance_timeseries)
	## Evaluate direct path only
	evaluate_directpathonly(aurora_network, export_directory, num_ks, training_traffic_matrices, traffic_matrices, uniform_adj_matrix, ideal_performance_timeseries)	
	
	### Finally export results
	## At this point, all of the evaluations have succesfully run
	fig = plt.figure()
	print("length of output_results : {}".format(len(output_results)))
	for perf_timeseries in output_results:
		mlu_timeseries = sorted([x[0]for x in perf_timeseries])
		plt.plot(sorted(mlu_timeseries))
		#print(sorted(mlu_timeseries))
	plt.ylabel('MLU')
	plt.legend(legends)

	fig = plt.figure()
	print("length of output_results : {}".format(len(output_results)))
	for perf_timeseries in output_results:
		ahc_timeseries = sorted([x[3]for x in perf_timeseries])
		plt.plot(sorted(ahc_timeseries))
	plt.ylabel('MLU')
	plt.legend(legends)

	plt.show()
	print("Completed Evaluation... Exiting Safely")