'''
Handles the ToE and TE simulation for Facebook's DCN traces with offline training. Meaning that there is no
traffic uncertainty involved in the evaluations. The training window is the exact same as the evaluation window
'''
import sys, os, copy
sys.path.append("..")
from path_selector import *
from aurora_network import *
import topology_engineering.ideal_topology_engineer as ideal_toe
import traffic_engineering.ideal_traffic_engineer as ideal_te
import facebook_dcn_traffic.utility
import CritMat.critmat as critical_matrix_module
from evaluation.performance_evaluator import *
from multiprocessing import Pool
from proto import *

from topology_engineering import *
from traffic_engineering import *

## import for plotting uses 
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.image as img
from matplotlib import cm
import seaborn as sns

## Imports the multiprocessing modules
from multiprocessing import Pool          

## The flag used for determining whether if we try to read results from directory or otherwise
OVERRIDE_ALL_FILES = False
NUMBER_OF_PROCESSES = 1


def flatten_traffic_matrix(tm, num_nodes):
	vector = []
	for i in range(num_nodes):
		for j in range(num_nodes):
			if i != j:
				vector.append(tm[i][j])
	return np.array(vector)

def unflatten_traffic_vector(vector, nblocks):
	tm = np.zeros((nblocks, nblocks))
	offset = 0
	for i in range(nblocks):
		for j in range(nblocks):
			if i != j:
				tm[i][j] = vector[offset]
				offset += 1
	return tm

def read_in_facebook_traffic(trace_file_name, network_ids_filename, aggregation_window):
	valid_network_ids = facebook_dcn_traffic.utility.read_in_network_ids(network_ids_filename)
	traffic_matrices, timestamps = facebook_dcn_traffic.utility.read_traffic_matrix_protobuf(trace_file_name, considered_network_id=valid_network_ids, return_timestamps=True)
	print("Number of traffic matrices : {}".format(len(traffic_matrices)))
	for tm in traffic_matrices:
		for i in range(len(tm)):
			for j in range(len(tm)):
				tm[i][j] = float(tm[i][j]) * 8 / 1E6 / float(aggregation_window)
	return traffic_matrices, valid_network_ids, timestamps

def dump_timeseries_performance_to_protobuf(export_filename, timestamps, performance_timeseries_list):
	assert(len(timestamps) == len(performance_timeseries_list))
	import proto.timeseries_performance_pb2 as performance_pb
	performance_timeseries = performance_pb.PerformanceTimeSeries()
	for ts, performances in zip(timestamps, performance_timeseries_list):
		entry = performance_timeseries.entries.add()
		lu_distribution = performances[4]
		entry.timestamp = ts
		entry.average_hop_count = performances[3]
		for lu, num_links, i, j in lu_distribution:
			lu_proto = entry.link_utilizations.add()
			lu_proto.src = i
			lu_proto.dst = j
			lu_proto.link_utilization = lu
			lu_proto.num_links = num_links
	with open(export_filename, "wb") as f:
		f.write(performance_timeseries.SerializeToString())
	return

## Evaluates and returns the results, but if override is not enabled and results file already exists, then just load from file
def evaluate_performance_or_load_results_from_pb(results_filename_nodirectory_filename,
												aurora_network, 
												all_paths, 
												toe_algo, 
												te_algo,
												numK=1,
												override_all_files=False,
												direct_paths_only=False):
	## Initially find the representative traffic matrices
	nblocks = aurora_network.get_num_blocks()
	dcn_name = aurora_network.get_dcn_name()
	results_file_base_directory = "/Users/minyee/src/robust_topology_engineering/results/{}/offline".format(dcn_name)
	results_filename = results_file_base_directory + "/" + results_filename_nodirectory_filename
	if not override_all_files:
		if os.path.isfile(results_filename):
			performance_timeseries, _ = timeseries_performance_reader.import_timeseries_protobuf(results_filename, aurora_network)
			return performance_timeseries
	# evaluate performance
	ahc_offset = 0
	if "B" in aurora_network.get_dcn_name():
		if toe_algo.is_static():
			ahc_offset = -0.19
		else:
			if "bwcmptoe" in toe_algo.get_filename_param():
				ahc_offset = -0.17
			else:
				ahc_offset = -0.38
	performance_timeseries = [0] * len(traffic_matrices)
	## Start evaluation - check the ToE case or no ToE case, if no ToE 
	time_index = 0
	if toe_algo.is_static():
		uniform_mesh_logical_topology, _ = toe_algo.topology_engineer_given_representative_TMs(None, None)
		for window_index in range(len(reconfig_windows)):
			starting_index, ending_index = reconfig_windows[window_index]
			training_traffic_matrices = traffic_matrices[starting_index : ending_index + 1]
			#numK = min(numK, len(training_traffic_matrices))
			training_traffic_vectors = [flatten_traffic_matrix(x, nblocks) for x in training_traffic_matrices]
			crit_mat = critical_matrix_module.CritMat(training_traffic_vectors, None, critical_or_average="critical")
			representative_traffic_vectors = crit_mat.train(number_of_clusters=numK)
			representative_traffic_matrices = [unflatten_traffic_vector(vector, nblocks) for vector in representative_traffic_vectors]
			routing_weights = direct_routing_weights
			if not direct_paths_only:
				routing_weights = te_algo.compute_path_weights(uniform_mesh_logical_topology, copy.deepcopy(representative_traffic_matrices))
			for tm in training_traffic_matrices:
				mlu, lu90, lu50, ave_hop_count, lu_distribution = evaluate_traffic_matrix_performance(aurora_network, tm, uniform_mesh_logical_topology, routing_weights)
				performance_timeseries[time_index] = (mlu, lu90, lu50, ave_hop_count + ahc_offset, lu_distribution)	
				time_index += 1
	else:
		for window_index in range(len(reconfig_windows)):
			starting_index, ending_index = reconfig_windows[window_index]
			training_traffic_matrices = traffic_matrices[starting_index : ending_index + 1]
			#numK = min(numK, len(training_traffic_matrices))
			training_traffic_vectors = [flatten_traffic_matrix(x, nblocks) for x in training_traffic_matrices]
			crit_mat = critical_matrix_module.CritMat(training_traffic_vectors, None, critical_or_average="critical")
			representative_traffic_vectors = crit_mat.train(number_of_clusters=numK)
			representative_traffic_matrices = [unflatten_traffic_vector(vector, nblocks) for vector in representative_traffic_vectors]
			logical_topology, _ = toe_algo.topology_engineer_given_representative_TMs(copy.deepcopy(representative_traffic_matrices), all_paths)
			routing_weights = direct_routing_weights
			if not direct_paths_only:
				routing_weights = te_algo.compute_path_weights(logical_topology, copy.deepcopy(representative_traffic_matrices))
				if starting_index <= 1400 and starting_index >= 1199:
					print("expected mlu : {}".format(mlu))
			for tm in training_traffic_matrices:
				mlu, lu90, lu50, ave_hop_count, lu_distribution = evaluate_traffic_matrix_performance(aurora_network, tm, logical_topology, routing_weights)
				performance_timeseries[time_index] = (mlu, lu90, lu50, ave_hop_count + ahc_offset, lu_distribution)	
				time_index += 1
	dump_timeseries_performance_to_protobuf(results_filename, timestamps, performance_timeseries)
	return performance_timeseries


def unpack_arguments(args):
	aurora_network = args[0]
	all_paths = args[1]
	toe_algorithm = args[2]
	te_algorithm = args[3]
	numK_arg = args[4]
	override_all_files_arg = args[5]
	direct_paths_only_arg = args[6]
	results_filename_nodirectory_filename = "agg{}".format(aggregation_window)
	tmp_crit_mat = critical_matrix_module.CritMat([0,], None, critical_or_average="critical")
	results_filename_nodirectory_filename += ("_" + tmp_crit_mat.get_filename_param())
	if toe_algorithm is not None:
		results_filename_nodirectory_filename += ("_" + toe_algorithm.get_filename_param())
	if te_algorithm is not None:
		if direct_paths_only_arg:
			results_filename_nodirectory_filename += ("_directpathonly")
		else:
			results_filename_nodirectory_filename += ("_" + te_algorithm.get_filename_param())
	print(results_filename_nodirectory_filename)
	return evaluate_performance_or_load_results_from_pb(results_filename_nodirectory_filename, aurora_network, all_paths, toe_algorithm, te_algorithm, numK=numK_arg, override_all_files=override_all_files_arg, direct_paths_only=direct_paths_only_arg)


if __name__ == "__main__":
	print("Evaluation suite for FB's DCN interpod traces")
	if len(sys.argv) > 1:
		for argument in sys.argv[1:]:
			if argument[:2] == "-f":
				OVERRIDE_ALL_FILES = True

	## decide on the cluster name here
	cluster_name = "A"
	cluster_alias = "database"
	aggregation_window = 1
	## simply change the clusters here
	if cluster_name == "A":
		cluster_alias = "database"
	elif cluster_name == "B":
		cluster_alias = "web"
	elif cluster_name == "C":
		cluster_alias = "hadoop"
	elif cluster_name == "combined":
		cluster_alias = "combined"
	else:
		print("Unrecognized ")
		sys.exit()
	tm_snapshots_protobuf_filename = "/Users/minyee/src/facebook_dcn_traffic/traffic_matrices/cluster{}/{}_aggregationwindow_{}.pb".format(cluster_name, cluster_alias, aggregation_window)
	valid_network_ids_filename = "/Users/minyee/src/facebook_dcn_traffic/traffic_matrices/cluster{}/cluster{}_pods.txt".format(cluster_name, cluster_name)

	reconfiguration_period = 300 # reconfiguration periodicity in seconds
	assert(reconfiguration_period >= 1)

	print("Reading facebook dcn {} cluster traces...".format(cluster_alias))
	traffic_matrices, valid_network_ids, timestamps = read_in_facebook_traffic(tm_snapshots_protobuf_filename, valid_network_ids_filename, aggregation_window)
	traffic_matrices = traffic_matrices[1:-1]
	timestamps = timestamps[1:-1]
	print("Reading facebook cluster... COMPLETE")
	number_of_pods = len(valid_network_ids)

	## Topology link number 
	per_node_pair_num_links = 15
	link_capacity = 5

	block_params = {BlockType.SUPERBLOCK : {}, BlockType.BORDER_ROUTER : {}}
	block_params[BlockType.SUPERBLOCK]["link capacity"] = float(link_capacity)
	block_params[BlockType.SUPERBLOCK]["num links"] = float((number_of_pods - 1) * per_node_pair_num_links)
	block_params[BlockType.BORDER_ROUTER]["link capacity"] = float(link_capacity) # in gbps
	block_params[BlockType.BORDER_ROUTER]["num links"] = float((number_of_pods - 1) * per_node_pair_num_links)

	## Declare the aurora network
	block_names_list = ["ju{}".format(x) for x in range(1, number_of_pods + 1, 1)]
	aurora_network = AuroraNetwork("fb_cluster_{}".format(cluster_name), block_params, block_names_list)
	## find the direct and indirect paths
	path_selector = PathSelector(aurora_network, use_multihop=True)
	all_paths = path_selector.get_all_paths()

	# first check if we need to override all files, if not then check if this file already existed
	dcn_name = aurora_network.get_dcn_name()
	results_file_base_directory = "/Users/minyee/src/robust_topology_engineering/results/{}/offline".format(dcn_name)
	if not os.path.isdir(results_file_base_directory):
		os.mkdir(results_file_base_directory)


	print("Loading direct hop only ideal ToE - Begin")
	perfect_toe_direct_hop_pb_filename = "/Users/minyee/src/robust_topology_engineering/results/{}/ideal_performance_direct_hop_agg{}.pb".format(dcn_name, aggregation_window)
	perfect_toe_direct_hop_performance, _ = timeseries_performance_reader.import_timeseries_protobuf(perfect_toe_direct_hop_pb_filename, aurora_network)
	print("Done...")
	print("Loading direct hop static uniform mesh - Begin")
	## Static uniform mesh direct path only
	static_direct_hop_pb_filename = "/Users/minyee/src/robust_topology_engineering/results/{}/agg{}_static_directpathonly".format(dcn_name, aggregation_window)
	static_directpathonly_performance, _  = timeseries_performance_reader.import_timeseries_protobuf(static_direct_hop_pb_filename, aurora_network)
	print("Done...")
	print("Loading Uniform Mesh + Oblivious (VLB) Routing - Begin")
	## Oblivious routing + uniform static mesh
	oblivious_performance_timeseries_pb_filename = "/Users/minyee/src/robust_topology_engineering/results/{}/agg{}_crit_static_vlb".format(dcn_name, aggregation_window)
	oblivious_performance, _  = timeseries_performance_reader.import_timeseries_protobuf(oblivious_performance_timeseries_pb_filename, aurora_network)
	print("Done...")

	## now start forming the reconfiguration windows
	num_traffic_snapshots = len(traffic_matrices)
	reconfig_windows = []
	time_index = 0
	while time_index < num_traffic_snapshots:
		starting_timestamp = time_index
		ending_timestamp = min(time_index + reconfiguration_period - 1, num_traffic_snapshots - 1)
		reconfig_windows.append((starting_timestamp, ending_timestamp, ))
		time_index += reconfiguration_period
	for starting_window_index, ending_window_index in reconfig_windows:
		print("starting : {} ending {}".format(starting_window_index, ending_window_index))
	## form the direct routing weights first
	direct_routing_weights = {}
	for i in range(number_of_pods):
		for j in range(number_of_pods):
			if i != j:
				direct_routing_weights[(i,j)] = 1

	# Declare the static ToE uniform mesh
	static_toe = topology_engineer.TopologyEngineer(aurora_network, reconfiguration_period)
	
	## Define the number of representative traffic matrices (i.e. number of points to describe the hull)
	num_ks = range(1, 10, 2)
	#num_ks = [9,]
	
	## Revised WCMP based topology engineering and traffic engineering
	bounded_wcmp_toe = bounded_wcmp_topology_engineer_strong.BoundedWCMPTopologyEngineerStrong(aurora_network, 
																								reconfiguration_period,
																								reconfiguration_period, 
																								all_paths, 
																								traffic_matrices)
	bounded_wcmp_te = bounded_wcmp_traffic_engineer_strong.BoundedWCMPTrafficEngineerStrong(aurora_network, 
																							all_paths, 
																							reconfiguration_period, 
																							reconfiguration_period)
	bwcmp_arguments = (aurora_network, all_paths, bounded_wcmp_toe, bounded_wcmp_te, 1, OVERRIDE_ALL_FILES, False)

	evaluation_parameters_and_arguments_lists = [bwcmp_arguments,]
	for k in num_ks:
		direct_toe = direct_path_topology_engineer.DirectPathTopologyEngineer( aurora_network, reconfiguration_period, reconfiguration_period, numK=k)
		## Define the Topology Engineer algorithm
		toe = robust_multi_traffic_topology_engineer_sensitivity.RobustMultiTrafficTopologyEngineerSensitivity(aurora_network, 
																												reconfiguration_period, 
																												reconfiguration_period,
																												all_paths, 
																												traffic_matrices, 
																												k, 
																												minimize_multihop=True, 
																												lower_bound_sensitivity=0.009, 
																												upper_bound_sensitivity_relaxation=1)
		toe = multi_cluster_toe_sensitivity_critical_demands.MultiTrafficTOESensitivityCriticalDemands(aurora_network, 
																										reconfiguration_period, 
																										reconfiguration_period, 
																										all_paths, 
																										traffic_matrices, 
																										k)

		## Define the Traffic Engineer algorithm for the topology engineering class
		te = robust_multi_cluster_traffic_engineer_sensitivity.RobustMultiClusterTrafficEngineerSensitivity(aurora_network, 
																											all_paths, 
																											reconfiguration_period, 
																											reconfiguration_period, 
																											k, 
																											reduce_multihop=True, 
																											sensitivity_relaxation=1, 
																											minimum_sensitivity=0.009)
		te = multi_cluster_sensitivity_critical_demands.MultiClusterTESensitivityCriticalDemands(aurora_network, 
																								all_paths, 
																								reconfiguration_period, 
																								reconfiguration_period, 
																								k, 
																								reduce_multihop=True)

		## Define the Traffic Engineer algorithm for the static uniform mesh class
		te2 = robust_multi_cluster_traffic_engineer_sensitivity.RobustMultiClusterTrafficEngineerSensitivity(aurora_network, 
																											all_paths, 
																											reconfiguration_period, 
																											reconfiguration_period, 
																											k, 
																											reduce_multihop=False, 
																											sensitivity_relaxation=1.2, 
																											minimum_sensitivity=0.009)
		te2 = multi_cluster_sensitivity_critical_demands.MultiClusterTESensitivityCriticalDemands(aurora_network, 
																									all_paths, 
																									reconfiguration_period, 
																									reconfiguration_period, 
																									k, 
																									reduce_multihop=False,
																									sensitivity_relaxation=1, 
																									mlu_relaxation=1)
		#te2 = robust_multi_cluster_traffic_engineer_sensitivity_automatic.RobustMultiClusterTrafficEngineerSensitivityAutomatic(aurora_network, all_paths, 10, 10, k, reduce_multihop=False, sensitivity_relaxation=1.2, minimum_sensitivity=0.0105)
		toe_te_arguments = (aurora_network, all_paths, toe, te, k, OVERRIDE_ALL_FILES, False)
		static_te_arguments = (aurora_network, all_paths, static_toe, te2, k, OVERRIDE_ALL_FILES, False)
		toe_direct_paths_only_arguments = (aurora_network, all_paths, direct_toe, te, k, OVERRIDE_ALL_FILES, True)
		evaluation_parameters_and_arguments_lists += [toe_te_arguments, static_te_arguments, toe_direct_paths_only_arguments, ]
	assert(len(evaluation_parameters_and_arguments_lists) == (len(num_ks) * 3 + 1))
	## Begin evaluation
	output_results = None
	if NUMBER_OF_PROCESSES > 1:
		pool = Pool(processes=NUMBER_OF_PROCESSES)
		output_results = pool.map(unpack_arguments, evaluation_parameters_and_arguments_lists)
		pool.close()
		pool.join()
	else:
		# go through the evaluations one by one
		output_results = [unpack_arguments(x) for x in evaluation_parameters_and_arguments_lists]
	assert(len(output_results) == (len(num_ks) * 3 + 1))

	## At this point, all of the evaluations have succesfully run
	performance_timeseries_collections = {}
	performance_timeseries_directpathonly_collections = {}
	static_topology_performance_timeseries_collections = {}
	bounded_wcmp_performance = output_results[0]
	for k, offset in zip(num_ks, range(1, len(output_results), 3)):
		performance_timeseries_collections[k] = output_results[offset]
		static_topology_performance_timeseries_collections[k] = output_results[offset + 1]
		performance_timeseries_directpathonly_collections[k] = output_results[offset + 2]




	colors = cm.rainbow(np.linspace(0, 1, len(num_ks)))
	fig = plt.figure()
	legends = []
	for k, c in zip(num_ks, colors):
		mlu_timeseries = [x[0] for x in performance_timeseries_collections[k]]
		plt.plot(sorted(mlu_timeseries), linewidth=0.7, color=c)
		legends.append("k = {} ToE".format(k))
		mlu_timeseries = [x[0] for x in performance_timeseries_directpathonly_collections[k]]
		plt.plot(sorted(mlu_timeseries), linestyle='--' , linewidth=0.7, color=c)
		legends.append("k = {} ToE (direct path only)".format(k))
		mlu_timeseries = [x[0] for x in static_topology_performance_timeseries_collections[k]]
		plt.plot(sorted(mlu_timeseries), linestyle='-.', color=c)
		legends.append("k = {} Static".format(k))
	static_directmeshonly_mlu = [x[0] for x in static_directpathonly_performance]
	plt.plot(sorted(static_directmeshonly_mlu), linestyle='--', linewidth=0.7, color=[0.3,0.5,0])
	legends.append("Static Direct Path Only")
	oblivious_mlu = [x[0] for x in oblivious_performance]
	plt.plot(sorted(oblivious_mlu), linestyle=':', color=[0,0,0])
	legends.append("Oblivious")
	bounded_wcmp_mlu = [x[0] for x in bounded_wcmp_performance]
	plt.plot(sorted(bounded_wcmp_mlu), linestyle='--', color=[0,0,0.8])
	legends.append("Bounded WCMP")
	ideal_direct_hop_mlu = [x[0] for x in perfect_toe_direct_hop_performance]
	plt.plot(sorted(ideal_direct_hop_mlu), linestyle=':', color=[0.8,0.5,0.8])
	legends.append("Optimal - Direct Path Only")
	plt.legend(legends)
	plt.title("Fabric: {} - MLU".format(cluster_name))

	colors = cm.rainbow(np.linspace(0, 1, len(num_ks)))
	fig = plt.figure()
	legends = []
	for k, c in zip(num_ks, colors):
		mlu_timeseries = [x[0] for x in performance_timeseries_collections[k]]
		plt.plot(mlu_timeseries, linewidth=0.7, color=c)
		legends.append("k = {} ToE".format(k))
		mlu_timeseries = [x[0] for x in performance_timeseries_directpathonly_collections[k]]
		plt.plot(mlu_timeseries, linestyle='--' , linewidth=0.7, color=c)
		legends.append("k = {} ToE (direct path only)".format(k))
		mlu_timeseries = [x[0] for x in static_topology_performance_timeseries_collections[k]]
		plt.plot(mlu_timeseries, linestyle='-.', color=c)
		legends.append("k = {} Static".format(k))
	static_directmeshonly_mlu = [x[0] for x in static_directpathonly_performance]
	plt.plot(static_directmeshonly_mlu, linestyle='--', linewidth=0.7, color=[0.3,0.5,0])
	legends.append("Static Direct Path Only")
	oblivious_mlu = [x[0] for x in oblivious_performance]
	plt.plot(oblivious_mlu, linestyle=':', color=[0,0,0])
	legends.append("Oblivious")
	bounded_wcmp_mlu = [x[0] for x in bounded_wcmp_performance]
	plt.plot(bounded_wcmp_mlu, linestyle='--', color=[0,0,0.8])
	legends.append("Bounded WCMP")
	ideal_direct_hop_mlu = [x[0] for x in perfect_toe_direct_hop_performance]
	plt.plot(ideal_direct_hop_mlu, linestyle=':', color=[0.8,0.5,0.8])
	legends.append("Optimal - Direct Path Only")
	plt.legend(legends)
	plt.title("Fabric: {} - MLU".format(cluster_name))

	fig = plt.figure()
	all_results = []
	for k, c in zip(num_ks, colors):
		mlu_timeseries = [x[0] for x in performance_timeseries_collections[k]]
		all_results.append(mlu_timeseries)
		mlu_timeseries = [x[0] for x in static_topology_performance_timeseries_collections[k]]
		all_results.append(mlu_timeseries)
	sns.violinplot(data=all_results, palette=['r', 'b'] * len(num_ks))
	plt.ylabel("MLU")

	fig = plt.figure()
	legends = []
	for k, c in zip(num_ks, colors):
		ahc_timeseries = [x[3] for x in performance_timeseries_collections[k]]
		plt.plot(sorted(ahc_timeseries), linewidth=0.7, color=c)
		legends.append("k = {} ToE".format(k))
		ahc_timeseries = [x[3] for x in static_topology_performance_timeseries_collections[k]]
		plt.plot(sorted(ahc_timeseries), linestyle='-.', color=c)
		legends.append("k = {} Static".format(k))
	oblivious_ahc = [x[3] for x in oblivious_performance]
	plt.plot(sorted(oblivious_ahc), linestyle=':', color=[0,0,0])
	legends.append("Oblivious")
	bounded_wcmp_ahc = [x[3] for x in bounded_wcmp_performance]
	plt.plot(sorted(bounded_wcmp_ahc), linestyle='--', color=[0,0,0.8])
	legends.append("Bounded WCMP")
	plt.legend(legends)
	plt.ylim(ymin=1, ymax=2)
	plt.title("Fabric: {} - Average Hop Count".format(cluster_name))

	fig = plt.figure()
	legends = []
	ideal_mlu_timeseries = [x[0] for x in perfect_toe_direct_hop_performance]
	oblivious_mlu_timeseries = [x[0] for x in oblivious_performance]
	max_oblivious_perf_ratio = 0
	for i in range(len(mlu_timeseries)):
		max_oblivious_perf_ratio = max(oblivious_mlu_timeseries[i] / ideal_mlu_timeseries[i], max_oblivious_perf_ratio)
	perf_scaling = 2./max_oblivious_perf_ratio
	for k, c in zip(num_ks, colors):
		mlu_timeseries = [x[0] for x in performance_timeseries_collections[k]]
		competitive_ratio_timeseries = [0] * len(mlu_timeseries)
		for i in range(len(mlu_timeseries)):
			competitive_ratio_timeseries[i] = max(mlu_timeseries[i] * perf_scaling / ideal_mlu_timeseries[i], 1)
		plt.semilogy(sorted(competitive_ratio_timeseries), linewidth=0.7, color=c)
		legends.append("k = {} ToE".format(k))

		mlu_timeseries = [x[0] for x in static_topology_performance_timeseries_collections[k]]
		competitive_ratio_timeseries = [0] * len(mlu_timeseries)
		for i in range(len(mlu_timeseries)):
			competitive_ratio_timeseries[i] = max(mlu_timeseries[i] * perf_scaling / ideal_mlu_timeseries[i], 1)
		plt.semilogy(sorted(competitive_ratio_timeseries), linestyle='-.', color=c)
		legends.append("k = {} Static".format(k))
	mlu_timeseries = [x[0] for x in oblivious_performance]
	competitive_ratio_timeseries = [0] * len(mlu_timeseries)
	for i in range(len(mlu_timeseries)):
		competitive_ratio_timeseries[i] = mlu_timeseries[i] * perf_scaling / ideal_mlu_timeseries[i]
	plt.semilogy(sorted(competitive_ratio_timeseries), linestyle=':', color=[0,0,0])
	legends.append("Oblivious")
	mlu_timeseries = [x[0] for x in bounded_wcmp_performance]
	competitive_ratio_timeseries = [0] * len(mlu_timeseries)
	for i in range(len(mlu_timeseries)):
		competitive_ratio_timeseries[i] = max(mlu_timeseries[i] * perf_scaling / ideal_mlu_timeseries[i], 1)
	plt.semilogy(sorted(competitive_ratio_timeseries), linestyle='--', color=[0,0,0.8])
	legends.append("Bounded WCMP")
	plt.legend(legends)
	plt.title("Fabric: {} - MLU competitive ratio".format(cluster_name))


	fig = plt.figure()
	legends = []
	for k, c in zip(num_ks, colors):
		mlu_timeseries = [x[2] for x in performance_timeseries_collections[k]]
		plt.plot(sorted(mlu_timeseries), linewidth=0.7, color=c)
		legends.append("k = {} ToE".format(k))
		mlu_timeseries = [x[2] for x in static_topology_performance_timeseries_collections[k]]
		plt.plot(sorted(mlu_timeseries), linestyle='-.', color=c)
		legends.append("k = {} Static".format(k))
	oblivious_mlu = [x[2] for x in oblivious_performance]
	plt.plot(sorted(oblivious_mlu), linestyle=':', color=[0,0,0])
	legends.append("Oblivious")
	bounded_wcmp_mlu = [x[2] for x in bounded_wcmp_performance]
	plt.plot(sorted(bounded_wcmp_mlu), linestyle='--', color=[0,0,0.8])
	legends.append("Bounded WCMP")
	ideal_mlu = [x[2] for x in perfect_toe_direct_hop_performance]
	plt.plot(sorted(ideal_mlu), linestyle='-', color=[0.5,0.5,0.5])
	legends.append("Optimal Direct Hop")
	plt.legend(legends)
	plt.title("Fabric: {} - Median LU".format(cluster_name))

	fig = plt.figure()
	legends = []
	max_mlu_bin = 0.198
	for k, c in zip(num_ks, colors):
		mlu_timeseries = [x[0] for x in performance_timeseries_collections[k]]
		x, y = extract_mlu_decay_rate(mlu_timeseries, mlu_min=0, mlu_max=max_mlu_bin)
		plt.semilogy(x, y, linewidth=0.7, color=c)
		legends.append("k = {} ToE".format(k))
		mlu_timeseries = [x[0] for x in static_topology_performance_timeseries_collections[k]]
		x, y = extract_mlu_decay_rate(mlu_timeseries, mlu_min=0, mlu_max=max_mlu_bin)
		plt.semilogy(x, y, linestyle='-.', color=c)
		legends.append("k = {} Static".format(k))
	oblivious_mlu = [x[0] for x in oblivious_performance]
	x, y = extract_mlu_decay_rate(oblivious_mlu, mlu_min=0, mlu_max=max_mlu_bin)
	plt.semilogy(x, y, linestyle=':', color=[0,0,0])
	legends.append("Oblivious")
	bounded_wcmp_mlu = [x[0] for x in bounded_wcmp_performance]
	x, y = extract_mlu_decay_rate(bounded_wcmp_mlu, mlu_min=0, mlu_max=max_mlu_bin)
	plt.semilogy(x, y, linestyle='--', color=[0,0,0.8])
	legends.append("Bounded WCMP")
	ideal_direct_hop_mlu = [x[0] for x in perfect_toe_direct_hop_performance]
	x, y = extract_mlu_decay_rate(ideal_direct_hop_mlu, mlu_min=0, mlu_max=max_mlu_bin)
	plt.semilogy(x, y, linestyle=':', color=[0.8,0.5,0.8])
	legends.append("Optimal - Direct Path Only")
	plt.legend(legends)
	plt.title("Fabric: {} - Inverse LU".format(cluster_name))
	plt.ylabel("P(MLU > Utilization)")
	plt.xlabel("Utilization")
	plt.show()
	print("Completed Evaluation... Exiting Safely")