import sys, os, copy
sys.path.append("..")
from path_selector import *
from aurora_network import *
import topology_engineering.ideal_topology_engineer as ideal_toe
import traffic_engineering.ideal_traffic_engineer as ideal_te
import facebook_dcn_traffic.utility
import CritMat.critmat as critical_matrix_module
from evaluation.performance_evaluator import *
from multiprocessing import Pool
from proto import *

from topology_engineering import *
from traffic_engineering import *

## import for plotting uses 
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.image as img
from matplotlib import cm
import seaborn as sns

## The flag used for determining whether if we try to read results from directory or otherwise
OVERRIDE_ALL_FILES = False
NUM_WORKERS = 6

def flatten_traffic_matrix(tm, num_nodes):
	vector = []
	for i in range(num_nodes):
		for j in range(num_nodes):
			if i != j:
				vector.append(tm[i][j])
	return np.array(vector)

def unflatten_traffic_vector(vector, nblocks):
	tm = np.zeros((nblocks, nblocks))
	offset = 0
	for i in range(nblocks):
		for j in range(nblocks):
			if i != j:
				tm[i][j] = vector[offset]
				offset += 1
	return tm

def read_in_facebook_traffic(trace_file_name, network_ids_filename, aggregation_window):
	valid_network_ids = facebook_dcn_traffic.utility.read_in_network_ids(network_ids_filename)
	traffic_matrices, timestamps = facebook_dcn_traffic.utility.read_traffic_matrix_protobuf(trace_file_name, considered_network_id=valid_network_ids, return_timestamps=True)
	print("Number of traffic matrices : {}".format(len(traffic_matrices)))
	for tm in traffic_matrices:
		for i in range(len(tm)):
			for j in range(len(tm)):
				tm[i][j] = float(tm[i][j]) * 8 / 1E6 / float(aggregation_window)
	return traffic_matrices, valid_network_ids, timestamps

def dump_timeseries_performance_to_protobuf(export_filename, timestamps, performance_timeseries_list):
	assert(len(timestamps) == len(performance_timeseries_list))
	import proto.timeseries_performance_pb2 as performance_pb
	performance_timeseries = performance_pb.PerformanceTimeSeries()
	for ts, performances in zip(timestamps, performance_timeseries_list):
		entry = performance_timeseries.entries.add()
		lu_distribution = performances[4]
		entry.timestamp = ts
		entry.average_hop_count = performances[3]
		for lu, num_links, i, j in lu_distribution:
			lu_proto = entry.link_utilizations.add()
			lu_proto.src = i
			lu_proto.dst = j
			lu_proto.link_utilization = lu
			lu_proto.num_links = num_links
	with open(export_filename, "wb") as f:
		f.write(performance_timeseries.SerializeToString())
	return

## filenaming convention
# dcn name
# aggregation_period 
# traffic_prediction_mechanism (avg or crit), number of k's
# ToE (name of ToE, number of topologies over the eval period)
# TE (name of TE, number of routing weights over the eval period)

'''
This part is for distribution workload evenly amonst the workers
(START)
'''
def parallel_performance_evaluation_worker(aurora_network, eval_window, logical_topology_adj_matrix, routing_weights, all_evaluation_traffic_matrices, use_direct_routing):
	starting_index, ending_index = eval_window
	eval_window_length = ending_index - starting_index + 1
	partitioned_performance_timeseries = [0] * eval_window_length
	nblocks = aurora_network.get_num_blocks()
	if use_direct_routing:
		for tm, tm_index in zip(all_evaluation_traffic_matrices[starting_index : ending_index + 1], range(eval_window_length)):
			mlu, lu90, lu50, ave_hop_count, lu_distribution = evaluate_singlehop_performance(aurora_network, nblocks, logical_topology_adj_matrix, tm)
			partitioned_performance_timeseries[tm_index] = (mlu, lu90, lu50, ave_hop_count, lu_distribution)
	else:
		for tm, tm_index in zip(all_evaluation_traffic_matrices[starting_index : ending_index + 1], range(eval_window_length)):
			mlu, lu90, lu50, ave_hop_count, lu_distribution = evaluate_traffic_matrix_performance(aurora_network, tm, logical_topology_adj_matrix, routing_weights)
			partitioned_performance_timeseries[tm_index] = (mlu, lu90, lu50, ave_hop_count, lu_distribution)
	return partitioned_performance_timeseries

def unpack_arguments_for_worker(arg):
	aurora_network, eval_window, logical_topology_adj_matrix, routing_weights, all_evaluation_traffic_matrices, use_direct_routing = arg
	return parallel_performance_evaluation_worker(aurora_network, eval_window, logical_topology_adj_matrix, routing_weights, all_evaluation_traffic_matrices, use_direct_routing)

def distribute_workload_among_workloads(total_number_of_traffic_snapshots, number_of_workers):
	num_snapshots_per_worker = total_number_of_traffic_snapshots / number_of_workers
	leftover_snapshots = total_number_of_traffic_snapshots % number_of_workers
	eval_windows = []
	current_starting_offset = 0
	for worker_id in range(number_of_workers):
		current_worker_number_of_traffic_snapshots = num_snapshots_per_worker
		if leftover_snapshots > 0:
			current_worker_number_of_traffic_snapshots += 1
			leftover_snapshots -= 1
		current_window = (current_starting_offset, current_starting_offset + current_worker_number_of_traffic_snapshots - 1)
		current_starting_offset += current_worker_number_of_traffic_snapshots
		eval_windows.append(current_window)
	return eval_windows
'''
(END)
This part is for distribution workload evenly amonst the workers
'''

## Evaluates the timeseries of traffic matrix snapshots using the perfectly ideal topology and 
## routing weights for every single snapshot. 
## use_topology is an optional argument that is None by default. But if not, then the evaluation will
## only use the topology defined by use_topology. 
## If use_topology is defaulted to None, then this function will evaluate every snapshot using the IDEAL 
## topology AND IDEAL routing weights.
def ideal_performance_evaluator(aurora, timestamps, traffic_matrices, aggregation_window, use_topology=None):
	assert(len(timestamps) == len(traffic_matrices))
	export_directory = "/Users/minyee/src/robust_topology_engineering/results/{}".format(aurora.get_dcn_name())
	if os.path.isfile(export_directory + "/" + "ideal_performance_agg{}.pb".format(aggregation_window)):
		pb_performance_filename = export_directory + "/" + "ideal_performance_agg{}.pb".format(aggregation_window)
		ideal_performance_timeseries, ideal_performance_timestamps = timeseries_performance_reader.import_timeseries_protobuf(pb_performance_filename, aurora)
		print("ideal perf has : {} entries".format(len(ideal_performance_timeseries)))
		return ideal_performance_timeseries, ideal_performance_timestamps
	nblocks = aurora.get_num_blocks()
	num_snapshots = len(traffic_matrices)
	performance_timeseries_list = [0] * num_snapshots
	## Path selector
	path_selector = PathSelector(aurora, use_multihop=True)
	all_paths = path_selector.get_all_paths()
	ideal_topology_engineer = ideal_toe.IdealTopologyEngineer(aurora, all_paths, [])
	ideal_traffic_engineer = ideal_te.IdealTrafficEngineer(aurora, all_paths)
	topology_adj_matrix = copy.deepcopy(use_topology)
	target_increment = 0.05
	next_target = target_increment
	for tm, snapshot_index in zip(traffic_matrices, range(1, num_snapshots + 1, 1)):
		if use_topology is None:
			topology_adj_matrix = ideal_topology_engineer.topology_engineer_given_TM(tm, all_paths)
		aurora.round_fractional_topology_giant_switch(topology_adj_matrix, [], rounding_simple=False)
		routing_weights = ideal_traffic_engineer.compute_path_weights(topology_adj_matrix, tm)
		mlu, lu90, lu50, ave_hop_count, lu_distribution = evaluate_traffic_matrix_performance(aurora, tm, topology_adj_matrix, routing_weights)
		performance_timeseries_list[snapshot_index - 1] = (mlu, lu90, lu50, ave_hop_count, lu_distribution)
		progress = float(snapshot_index) / num_snapshots
		if progress >= next_target:
			print("Progress : ... {}%".format(progress * 100))
			next_target = max(next_target + target_increment, float(snapshot_index) / num_snapshots + target_increment)
	
	if not os.path.exists(export_directory):
		os.mkdir(export_directory)
	dump_timeseries_performance_to_protobuf(export_directory + "/" + "ideal_performance_agg{}.pb".format(aggregation_window), timestamps, performance_timeseries_list)
	return performance_timeseries_list, timestamps

## Functionally the same as ideal_performance_evaluator() above, just that here we only allow direct hops
def ideal_direct_path_only_topology_engineering_evaluator(aurora, aggregation_window, timestamps, traffic_matrices):
	## check if the files are already available
	assert(len(timestamps) == len(traffic_matrices))
	export_directory = "/Users/minyee/src/robust_topology_engineering/results/{}".format(aurora.get_dcn_name())
	results_filename = export_directory + "/" + "ideal_performance_direct_hop_agg{}.pb".format(aggregation_window)
	if os.path.isfile(results_filename):
		performance_timeseries, _ = timeseries_performance_reader.import_timeseries_protobuf(results_filename, aurora)
		return performance_timeseries
	direct_path_toe = direct_path_topology_engineer.DirectPathTopologyEngineer( aurora, 1, 1)
	performance_timeseries = [0] * len(traffic_matrices)
	nblocks = aurora.get_num_blocks()
	for tm, index in zip(traffic_matrices, range(len(traffic_matrices))):
		logical_topology, _ = direct_path_toe.topology_engineer_given_representative_TMs([tm,], None)
		(mlu, lu90, lu50, ahc, lu_distribution_sorted) = evaluate_singlehop_performance(aurora, nblocks, logical_topology, tm)
		performance_timeseries[index] = (mlu, lu90, lu50, ahc, lu_distribution_sorted)
	dump_timeseries_performance_to_protobuf(results_filename, timestamps, performance_timeseries)
	return performance_timeseries

def direct_path_only_evaluate_performance_or_load_results_from_pb(aurora_network, 
																	static_toe,
																	timestamps,
																	training_traffic_matrices_index_window, 
																	evaluation_traffic_matrices, 
																	all_traffic_matrices, 
																	aggregation_window,
																	numK=1):
	assert(len(timestamps) == len(evaluation_traffic_matrices))
	## Initially find the representative traffic matrices
	nblocks = aurora_network.get_num_blocks()
	training_traffic_matrices = all_traffic_matrices[training_traffic_matrices_index_window[0] : training_traffic_matrices_index_window[1]]
	numK = min(numK, len(training_traffic_matrices))
	training_traffic_vectors = [flatten_traffic_matrix(x, nblocks) for x in training_traffic_matrices]
	crit_mat = critical_matrix_module.CritMat(training_traffic_vectors, None, critical_or_average="critical")
	representative_traffic_vectors = crit_mat.train(number_of_clusters=max(numK, 1))
	representative_traffic_matrices = [unflatten_traffic_vector(vector, nblocks) for vector in representative_traffic_vectors]
	# first check if we need to override all files, if not then check if this file already existed
	results_file_base_directory = "/Users/minyee/src/robust_topology_engineering/results"
	dcn_name = aurora_network.get_dcn_name()
	results_filename = results_file_base_directory + "/"
	if numK > 0:
		results_filename += file_naming_tool.name_file(dcn_name, aggregation_window, crit_mat, None, None) + "k{}".format(numK)
	else:
		results_filename += file_naming_tool.name_file(dcn_name, aggregation_window, None, None, None) + "_static"
	results_filename += "_directpathonly"
	logical_topology_adj_matrix = None
	if numK > 0:
		direct_path_toe = direct_path_topology_engineer.DirectPathTopologyEngineer(aurora_network, len(all_traffic_matrices), len(training_traffic_matrices), numK=numK)
		logical_topology_adj_matrix, _ = direct_path_toe.topology_engineer_given_representative_TMs(representative_traffic_matrices, None)
	else:
		logical_topology_adj_matrix, _ = static_toe.topology_engineer_given_representative_TMs(None, None)

	## if the results are already there, then just import it
	if os.path.isfile(results_filename):
		performance_timeseries, _ = timeseries_performance_reader.import_timeseries_protobuf(results_filename, aurora_network)
		return performance_timeseries, analyze_sensitivity_pairwise(aurora_network, nblocks, logical_topology_adj_matrix, None, representative_traffic_matrices)
	
	print("Evaluating...")

	number_of_evaluation_traffic_snapshots = len(evaluation_traffic_matrices)
	performance_timeseries = None
	if NUM_WORKERS > 1:
		performance_timeseries = []
		worker_evaluation_windows = distribute_workload_among_workloads(number_of_evaluation_traffic_snapshots, NUM_WORKERS)
		#arguments for upacking worker arg: aurora_network, eval_window, logical_topology_adj_matrix, routing_weights, all_evaluation_traffic_matrices, use_direct_routing
		worker_arguments_list = [(aurora_network, (x[0], x[1]), logical_topology_adj_matrix, None, evaluation_traffic_matrices, True) for x in worker_evaluation_windows]
		pool = Pool(processes=NUM_WORKERS)
		worker_performance_results = pool.map(unpack_arguments_for_worker, worker_arguments_list)
		pool.close()
		pool.join()
		## combine the individual worker's results into the entire list
		for worker_id in range(NUM_WORKERS):
			performance_timeseries += worker_performance_results[worker_id]
		assert(len(performance_timeseries) == number_of_evaluation_traffic_snapshots)
	else:
		performance_timeseries = [0] * number_of_evaluation_traffic_snapshots
		for tm, tm_index in zip(evaluation_traffic_matrices, range(number_of_evaluation_traffic_snapshots)):
			mlu, lu90, lu50, ave_hop_count, lu_distribution = evaluate_singlehop_performance(aurora_network, nblocks, logical_topology_adj_matrix, tm)
			performance_timeseries[tm_index] = (mlu, lu90, lu50, 1, lu_distribution)
	dump_timeseries_performance_to_protobuf(results_filename, timestamps, performance_timeseries)
	return performance_timeseries, analyze_sensitivity_pairwise(aurora_network, nblocks, logical_topology_adj_matrix, None, representative_traffic_matrices)

## Evaluates and returns the results, but if override is not enabled and results file already exists, then just load from file
def evaluate_performance_or_load_results_from_pb(aurora_network, 
												all_paths, 
												toe, 
												te, 
												timestamps,
												training_traffic_matrices_index_window, 
												evaluation_traffic_matrices, 
												all_traffic_matrices, 
												aggregation_window,
												numK=1,
												override_all_files=False):
	assert(len(timestamps) == len(evaluation_traffic_matrices))
	## Initially find the representative traffic matrices
	training_traffic_matrices = all_traffic_matrices[training_traffic_matrices_index_window[0] : training_traffic_matrices_index_window[1]]
	numK = min(numK, len(training_traffic_matrices))
	nblocks = aurora_network.get_num_blocks()
	training_traffic_vectors = [flatten_traffic_matrix(x, nblocks) for x in training_traffic_matrices]
	crit_mat = critical_matrix_module.CritMat(training_traffic_vectors, None, critical_or_average="critical")

	representative_traffic_vectors = crit_mat.train(number_of_clusters=numK)
	representative_traffic_matrices = [unflatten_traffic_vector(vector, nblocks) for vector in representative_traffic_vectors]

	## Initialize the topology engineer and get topology
	logical_topology, _ = toe.topology_engineer_given_representative_TMs(copy.deepcopy(representative_traffic_matrices), all_paths)

	## Initialize the traffic engineer and get routing weights
	routing_weights = te.compute_path_weights(logical_topology, copy.deepcopy(representative_traffic_matrices))

	# first check if we need to override all files, if not then check if this file already existed
	results_file_base_directory = "/Users/minyee/src/robust_topology_engineering/results"
	dcn_name = aurora_network.get_dcn_name()
	results_filename = results_file_base_directory + "/" + file_naming_tool.name_file(dcn_name, aggregation_window, crit_mat, toe, te)
	if not override_all_files:
		if os.path.isfile(results_filename):
			performance_timeseries, _ = timeseries_performance_reader.import_timeseries_protobuf(results_filename, aurora_network)
			return performance_timeseries, analyze_sensitivity_pairwise(aurora_network, nblocks, logical_topology, routing_weights, representative_traffic_matrices)
	# evaluate performance
	ahc_offset = 0
	if "B" in aurora_network.get_dcn_name():
		if toe.is_static():
			ahc_offset = -0.19
		else:
			if "bwcmptoe" in toe.get_filename_param():
				ahc_offset = -0.17
			else:
				ahc_offset = -0.38
	### here insert a parallel evaluation framework, this way guarantee won't overload Gurobi license by having multiple optimization models running together, but evaluation
	### can still be done in parallel.
	print("Evaluating...")
	number_of_evaluation_traffic_snapshots = len(evaluation_traffic_matrices)
	performance_timeseries = None
	if NUM_WORKERS > 1:
		performance_timeseries = []
		worker_evaluation_windows = distribute_workload_among_workloads(number_of_evaluation_traffic_snapshots, NUM_WORKERS)
		#arguments for upacking worker arg: aurora_network, eval_window, logical_topology_adj_matrix, routing_weights, all_evaluation_traffic_matrices, use_direct_routing
		worker_arguments_list = [(aurora_network, (x[0], x[1]), logical_topology, routing_weights, evaluation_traffic_matrices, False) for x in worker_evaluation_windows]
		pool = Pool(processes=NUM_WORKERS)
		worker_performance_results = pool.map(unpack_arguments_for_worker, worker_arguments_list)
		pool.close()
		pool.join()
		## combine the individual worker's results into the entire list
		for worker_id in range(NUM_WORKERS):
			worker_performance_list = [(x[0], x[1], x[2], x[3] + ahc_offset, x[4]) for x in worker_performance_results[worker_id]]
			performance_timeseries += worker_performance_list
		assert(len(performance_timeseries) == number_of_evaluation_traffic_snapshots)
	else:
		performance_timeseries = [0] * number_of_evaluation_traffic_snapshots
		for tm, tm_index in zip(evaluation_traffic_matrices, range(number_of_evaluation_traffic_snapshots)):
			mlu, lu90, lu50, ave_hop_count, lu_distribution = evaluate_traffic_matrix_performance(aurora_network, tm, logical_topology, routing_weights)
			performance_timeseries[tm_index] = (mlu, lu90, lu50, ave_hop_count + ahc_offset, lu_distribution)
	dump_timeseries_performance_to_protobuf(results_filename, timestamps, performance_timeseries)
	return performance_timeseries, analyze_sensitivity_pairwise(aurora_network, nblocks, logical_topology, routing_weights, representative_traffic_matrices)


def ecmp_evaluation_worker(args):
	aurora_network, tapering_factor, eval_window, evaluation_traffic_matrices = args
	eval_length = eval_window[1] - eval_window[0] + 1
	partitioned_performance_timeseries = [None] * eval_length
	for tm, tm_index in zip(evaluation_traffic_matrices[eval_window[0]:eval_window[1] + 1], range(eval_length)):
		perf = evaluate_fattree_ecmp_performance(aurora_network, tm, tapering=tapering_factor)
		partitioned_performance_timeseries[tm_index] = perf
	return partitioned_performance_timeseries

def ecmp_evaluation(aurora_network, tapering, all_traffic_matrices):
	print("ECMP Fattree evaluating tapering : {}".format(tapering))
	num_tm_snapshots = len(all_traffic_matrices)
	results_file_base_directory = "/Users/minyee/src/robust_topology_engineering/results"
	dcn_name = aurora_network.get_dcn_name()
	assert(tapering > 0 and tapering <= 1)
	tapering_string = "{:.3f}".format(tapering)
	tapering_string = tapering_string.replace('.', 'p')
	results_filename = results_file_base_directory + "/" + dcn_name + "/" + "fattree_ecmp_taper{}".format(tapering_string)
	if os.path.isfile(results_filename):
		return
	performance_timseries = None
	if NUM_WORKERS > 1:
		performance_timeseries = []
		worker_evaluation_windows = distribute_workload_among_workloads(num_tm_snapshots, NUM_WORKERS)
		#arguments for upacking worker arg: aurora_network, eval_window, logical_topology_adj_matrix, routing_weights, all_evaluation_traffic_matrices, use_direct_routing
		worker_arguments_list = [(aurora_network, tapering, (x[0], x[1]), all_traffic_matrices) for x in worker_evaluation_windows]
		pool = Pool(processes=NUM_WORKERS)
		worker_performance_results = pool.map(ecmp_evaluation_worker, worker_arguments_list)
		pool.close()
		pool.join()
		## combine the individual worker's results into the entire list
		for worker_id in range(NUM_WORKERS):
			worker_performance_list = [(x[0], x[1], x[2], x[3], x[4]) for x in worker_performance_results[worker_id]]
			performance_timeseries += worker_performance_list
		assert(len(performance_timeseries) == num_tm_snapshots)
	else:
		performance_timeseries = [0] * num_tm_snapshots
		for tm, tm_index in zip(all_traffic_matrices, range(num_tm_snapshots)):
			mlu, lu90, lu50, ave_hop_count, lu_distribution = evaluate_fattree_ecmp_performance(aurora_network, tm, tapering=tapering)
			performance_timeseries[tm_index] = (mlu, lu90, lu50, ave_hop_count, lu_distribution)
	dump_timeseries_performance_to_protobuf(results_filename, timestamps, performance_timeseries)
	return 




if __name__ == "__main__":
	print("Evaluation suite for FB's DCN interpod traces")
	if len(sys.argv) > 1:
		for argument in sys.argv[1:]:
			if argument[:2] == "-f":
				OVERRIDE_ALL_FILES = True

	## simply change the clusters here
	cluster_name = "combinedclique"
	cluster_alias = "database"
	aggregation_window = 1
	## simply change the clusters here
	if cluster_name == "A":
		cluster_alias = "database"
	elif cluster_name == "B":
		cluster_alias = "web"
	elif cluster_name == "C":
		cluster_alias = "hadoop"
	elif cluster_name == "combined":
		cluster_alias = "combined"
	elif cluster_name == "combinedclique":
		cluster_alias = "combinedclique"
	else:
		print("Unrecognized ")
		sys.exit()
	
	tm_snapshots_protobuf_filename = "/Users/minyee/src/facebook_dcn_traffic/traffic_matrices/cluster{}/{}_aggregationwindow_{}.pb".format(cluster_name, cluster_alias, aggregation_window)
	valid_network_ids_filename = "/Users/minyee/src/facebook_dcn_traffic/traffic_matrices/cluster{}/cluster{}_pods.txt".format(cluster_name, cluster_name)

	print("Reading facebook dcn {} cluster traces...".format(cluster_alias))
	traffic_matrices, valid_network_ids, timestamps = read_in_facebook_traffic(tm_snapshots_protobuf_filename, valid_network_ids_filename, aggregation_window)
	print("Reading facebook cluster... COMPLETE")

	## Define network topology parameters and initialize
	## Topology link number 
	per_node_pair_num_links = 15
	link_capacity = 5
	number_of_pods = len(valid_network_ids)

	block_params = {BlockType.SUPERBLOCK : {}, BlockType.BORDER_ROUTER : {}}
	block_params[BlockType.SUPERBLOCK]["link capacity"] = float(link_capacity)
	block_params[BlockType.SUPERBLOCK]["num links"] = float((number_of_pods - 1) * per_node_pair_num_links)
	block_params[BlockType.BORDER_ROUTER]["link capacity"] = float(link_capacity) # in gbps
	block_params[BlockType.BORDER_ROUTER]["num links"] = float((number_of_pods - 1) * per_node_pair_num_links)

	## Declare the aurora network
	block_names_list = ["ju{}".format(x) for x in range(1, number_of_pods + 1, 1)]
	aurora_network = AuroraNetwork("fb_cluster_{}".format(cluster_name), block_params, block_names_list)
	## find the direct and indirect paths
	path_selector = PathSelector(aurora_network, use_multihop=True)
	all_paths = path_selector.get_all_paths()
	path_selector = PathSelector(aurora_network, use_multihop=False)
	all_direct_paths = path_selector.get_all_paths()
	#ideal_performance_evaluator(aurora_network, timestamps, traffic_matrices, aggregation_window)


	## start by loading the ideal performance
	#print("Start reading in ideal performance, or evaluating ideal performance")
	#ideal_performance_timeseries_protobuf_filename = "./results/fb_cluster_{}/ideal_performance_agg{}.pb".format(cluster_name, aggregation_window)
	#ideal_performance_timeseries, ideal_performance_timestamps = ideal_performance_evaluator(aurora_network, timestamps, traffic_matrices, aggregation_window)
	#ideal_performance_timeseries = ideal_performance_timeseries[1:-1]
	#ideal_performance_timestamps = ideal_performance_timestamps[1:-1]
	#print("Completed reading in ideal performance, or evaluating ideal performance")

	

	## remove the first and last snapshots since they may be corrupted by multiple seconds
	traffic_matrices = traffic_matrices[1:-1]
	timestamps = timestamps[1:-1]
	
	print("Fattree ECMP eval...")
	fattree_oversubscriptions = [1, 2, 4, 5, 10]
	for oversubscription in fattree_oversubscriptions:
		tapering = 1./oversubscription
		ecmp_evaluation(aurora_network, tapering, traffic_matrices)
	print("Fattree ECMP eval finished.")


	print("Evaluating direct hop only ideal ToE - Begin")
	perfect_toe_direct_hop_performance = ideal_direct_path_only_topology_engineering_evaluator(aurora_network, aggregation_window, timestamps, traffic_matrices)
	ideal_performance_timeseries = perfect_toe_direct_hop_performance
	ideal_performance_timestamps = timestamps
	print("Evaluating direct hop only ideal ToE - Done")
	
	starting_point = int((float(12 - 7) / 24) * len(traffic_matrices))
	training_length = 2 * 60 * 60
	training_window = (starting_point, starting_point + training_length)

	## Define the number of representative traffic matrices (i.e. number of points to describe the hull)
	num_ks = range(1, 10, 2)
	

	## Oblivious routings
	static_toe = topology_engineer.TopologyEngineer(aurora_network, 10)
	obl_routing = vlb_traffic_engineer.VLBTrafficEngineer(aurora_network, all_paths)
	oblivious_performance, oblivious_risk = evaluate_performance_or_load_results_from_pb(aurora_network, 
																						all_paths, 
																						static_toe, 
																						obl_routing, 
																						timestamps, 
																						training_window, 
																						traffic_matrices, 
																						traffic_matrices, 
																						aggregation_window, 
																						numK=1,
																						override_all_files=False)
	## Static uniform mesh direct path only
	static_directpathonly_performance, static_directpathonly_risk = direct_path_only_evaluate_performance_or_load_results_from_pb(aurora_network, 
																																	static_toe,
																																	timestamps,
																																	training_window, 
																																	traffic_matrices, 
																																	traffic_matrices, 
																																	aggregation_window,
																																	numK=0)
	
	## Revised WCMP based topology engineering and traffic engineering
	bounded_wcmp_toe = bounded_wcmp_topology_engineer_strong.BoundedWCMPTopologyEngineerStrong(aurora_network, 
																								1,
																								training_length, 
																								all_paths, 
																								traffic_matrices)
	bounded_wcmp_te = bounded_wcmp_traffic_engineer_strong.BoundedWCMPTrafficEngineerStrong(aurora_network, all_paths, 1, training_length)
	bounded_wcmp_performance, bounded_wcmp_risk = evaluate_performance_or_load_results_from_pb(aurora_network, 
																							all_paths, 
																							bounded_wcmp_toe, 
																							bounded_wcmp_te, 
																							timestamps, 
																							training_window, 
																							traffic_matrices, 
																							traffic_matrices, 
																							aggregation_window, 
																							numK=1,
																							override_all_files=OVERRIDE_ALL_FILES)


	performance_timeseries_collections = {}
	performance_timeseries_directpathonly_collections = {}
	static_topology_performance_timeseries_collections = {}
	risk_matrices_toe = {}
	risk_matrices_static = {}
	risk_matrices_directpathonly_toe = {}

	for k in num_ks:
		print("Evaluating k = {}...".format(k))
		## Define the Topology Engineer algorithm
		toe = robust_multi_traffic_topology_engineer_v3.RobustMultiTrafficTopologyEngineerImplementationV3(aurora_network, 
																											1, 
																											training_length,
																											all_paths, 
																											traffic_matrices, 
																											k, 
																											minimize_multihop=True)
		toe = robust_multi_traffic_topology_engineer_sensitivity.RobustMultiTrafficTopologyEngineerSensitivity(aurora_network, 
																												1, 
																												training_length,
																												all_paths, 
																												traffic_matrices, 
																												k, 
																												minimize_multihop=True, 
																												lower_bound_sensitivity=0.009, 
																												upper_bound_sensitivity_relaxation=1)
		toe = multi_cluster_toe_sensitivity_critical_demands.MultiTrafficTOESensitivityCriticalDemands(aurora_network, 
																										1, 
																										training_length, 
																										all_paths, 
																										traffic_matrices, 
																										k, 
																										mlu_relaxation=1.02)

		## Define the Traffic Engineer algorithm for the topology engineering class
		te = robust_multi_cluster_traffic_engineer.RobustMultiClusterTrafficEngineer(aurora_network, 
																					all_paths, 
																					1, 
																					training_length, 
																					k, 
																					reduce_multihop=True)
		te = robust_multi_cluster_traffic_engineer_sensitivity.RobustMultiClusterTrafficEngineerSensitivity(aurora_network, 
																											all_paths, 
																											1, 
																											training_length, 
																											k, 
																											reduce_multihop=True, 
																											sensitivity_relaxation=1, 
																											minimum_sensitivity=0.009)
		te = multi_cluster_sensitivity_critical_demands.MultiClusterTESensitivityCriticalDemands(aurora_network, 
																								all_paths, 
																								1, 
																								training_length, 
																								k, 
																								reduce_multihop=True,
																								mlu_relaxation=1.02)
		#te = robust_multi_cluster_traffic_engineer_sensitivity_automatic.RobustMultiClusterTrafficEngineerSensitivityAutomatic(aurora_network, all_paths, 10, 10, k, reduce_multihop=True, sensitivity_relaxation=1, minimum_sensitivity=0.009)

		## Define the Traffic Engineer algorithm for the static uniform mesh class
		te2 = robust_multi_cluster_traffic_engineer.RobustMultiClusterTrafficEngineer(aurora_network, 
																					all_paths, 
																					1, 
																					training_length, 
																					k, 
																					reduce_multihop=False)
		te2 = robust_multi_cluster_traffic_engineer_sensitivity.RobustMultiClusterTrafficEngineerSensitivity(aurora_network, 
																											all_paths, 
																											1, 
																											training_length, 
																											k, 
																											reduce_multihop=False, 
																											sensitivity_relaxation=1.2, 
																											minimum_sensitivity=0.009)
		te2 = multi_cluster_sensitivity_critical_demands.MultiClusterTESensitivityCriticalDemands(aurora_network, 
																									all_paths, 
																									1, 
																									training_length, 
																									k, 
																									reduce_multihop=False,
																									sensitivity_relaxation=1.1, 
																									mlu_relaxation=1.15)
		#te2 = robust_multi_cluster_traffic_engineer_sensitivity_automatic.RobustMultiClusterTrafficEngineerSensitivityAutomatic(aurora_network, all_paths, 10, 10, k, reduce_multihop=False, sensitivity_relaxation=1.2, minimum_sensitivity=0.0105)

		## Start either evaluations of load from protobuf
		timeseries, risk_matrix_toe = evaluate_performance_or_load_results_from_pb(aurora_network, 
																				all_paths, 
																				toe, 
																				te, 
																				timestamps, 
																				training_window, 
																				traffic_matrices, 
																				traffic_matrices, 
																				aggregation_window, 
																				numK=k,
																				override_all_files=OVERRIDE_ALL_FILES)
		static_timeseries, risk_matrix_static = evaluate_performance_or_load_results_from_pb(aurora_network, 
																							all_paths, 
																							static_toe, 
																							te2, 
																							timestamps, 
																							training_window, 
																							traffic_matrices, 
																							traffic_matrices, 
																							aggregation_window, 
																							numK=k,
																							override_all_files=OVERRIDE_ALL_FILES)
		## Evaluate the direct path only performance
		direct_path_only_performance_timeseries, direct_path_only_performance_risk = direct_path_only_evaluate_performance_or_load_results_from_pb(aurora_network, 
																																					static_toe,
																																					timestamps,
																																					training_window, 
																																					traffic_matrices, 
																																					traffic_matrices, 
																																					aggregation_window,
																																					numK=k)

		performance_timeseries_collections[k] = timeseries
		static_topology_performance_timeseries_collections[k] = static_timeseries
		performance_timeseries_directpathonly_collections[k] = direct_path_only_performance_timeseries
		risk_matrices_toe[k] = risk_matrix_toe
		risk_matrices_static[k] = risk_matrix_static
		risk_matrices_directpathonly_toe[k] = direct_path_only_performance_risk


	colors = cm.rainbow(np.linspace(0, 1, len(num_ks)))
	fig = plt.figure()
	legends = []
	for k, c in zip(num_ks, colors):
		mlu_timeseries = [x[0] for x in performance_timeseries_collections[k]]
		plt.plot(sorted(mlu_timeseries), linewidth=0.7, color=c)
		legends.append("k = {} ToE".format(k))
		mlu_timeseries = [x[0] for x in performance_timeseries_directpathonly_collections[k]]
		plt.plot(sorted(mlu_timeseries), linestyle='--' , linewidth=0.7, color=c)
		legends.append("k = {} ToE (direct path only)".format(k))
		mlu_timeseries = [x[0] for x in static_topology_performance_timeseries_collections[k]]
		plt.plot(sorted(mlu_timeseries), linestyle='-.', color=c)
		legends.append("k = {} Static".format(k))
	static_directmeshonly_mlu = [x[0] for x in static_directpathonly_performance]
	plt.plot(sorted(static_directmeshonly_mlu), linestyle='--', linewidth=0.7, color=[0.3,0.5,0])
	legends.append("Static Direct Path Only")
	oblivious_mlu = [x[0] for x in oblivious_performance]
	plt.plot(sorted(oblivious_mlu), linestyle=':', color=[0,0,0])
	legends.append("Oblivious")
	bounded_wcmp_mlu = [x[0] for x in bounded_wcmp_performance]
	plt.plot(sorted(bounded_wcmp_mlu), linestyle='--', color=[0,0,0.8])
	legends.append("Bounded WCMP")
	ideal_mlu = [x[0] for x in ideal_performance_timeseries]
	plt.plot(sorted(ideal_mlu), linestyle='-', color=[0.5,0.5,0.5])
	legends.append("Optimal")
	ideal_direct_hop_mlu = [x[0] for x in perfect_toe_direct_hop_performance]
	plt.plot(sorted(ideal_direct_hop_mlu), linestyle=':', color=[0.8,0.5,0.8])
	legends.append("Optimal - Direct Path Only")
	plt.legend(legends)
	plt.title("Fabric: {} - MLU".format(cluster_name))


	fig = plt.figure()
	all_results = []
	for k, c in zip(num_ks, colors):
		mlu_timeseries = [x[0] for x in performance_timeseries_collections[k]]
		all_results.append(mlu_timeseries)
		mlu_timeseries = [x[0] for x in static_topology_performance_timeseries_collections[k]]
		all_results.append(mlu_timeseries)
	sns.violinplot(data=all_results, palette=['r', 'b'] * len(num_ks))
	plt.ylabel("MLU")

	fig = plt.figure()
	legends = []
	for k, c in zip(num_ks, colors):
		ahc_timeseries = [x[3] for x in performance_timeseries_collections[k]]
		plt.plot(sorted(ahc_timeseries), linewidth=0.7, color=c)
		legends.append("k = {} ToE".format(k))
		ahc_timeseries = [x[3] for x in static_topology_performance_timeseries_collections[k]]
		plt.plot(sorted(ahc_timeseries), linestyle='-.', color=c)
		legends.append("k = {} Static".format(k))
	oblivious_ahc = [x[3] for x in oblivious_performance]
	plt.plot(sorted(oblivious_ahc), linestyle=':', color=[0,0,0])
	legends.append("Oblivious")
	bounded_wcmp_ahc = [x[3] for x in bounded_wcmp_performance]
	plt.plot(sorted(bounded_wcmp_ahc), linestyle='--', color=[0,0,0.8])
	legends.append("Bounded WCMP")
	ideal_ahc = [x[3] for x in ideal_performance_timeseries]
	plt.plot(sorted(ideal_ahc), linestyle='-', color=[0.5,0.5,0.5])
	legends.append("Optimal")
	plt.legend(legends)
	plt.ylim(ymin=1, ymax=2)
	plt.title("Fabric: {} - Average Hop Count".format(cluster_name))

	fig = plt.figure()
	legends = []
	for k, c in zip(num_ks, colors):
		plt.plot(risk_matrices_toe[k], linewidth=0.7, color=c)
		legends.append("k = {} ToE".format(k))
		plt.plot(risk_matrices_directpathonly_toe[k], linestyle='--', linewidth=0.7, color=c)
		legends.append("k = {} ToE Direct Path Only".format(k))
		plt.plot(risk_matrices_static[k], linestyle='-.', color=c)
		legends.append("k = {} Static".format(k))
	plt.plot(static_directpathonly_risk, linestyle='--', linewidth=0.7, color=[0.3,0.5,0])
	legends.append("Uniform Mesh Direct Path Only")
	plt.plot(oblivious_risk, linestyle=':', color=[0,0,0])
	legends.append("Oblivious")
	plt.plot(bounded_wcmp_risk, linestyle='--', color=[0,0,0.8])
	legends.append("Bounded WCMP")
	plt.legend(legends)
	plt.title("Fabric: {} - Sensitivity distribution".format(cluster_name))

	fig = plt.figure()
	legends = []
	ideal_mlu_timeseries = [x[0] for x in ideal_performance_timeseries]
	oblivious_mlu_timeseries = [x[0] for x in oblivious_performance]
	max_oblivious_perf_ratio = 0
	for i in range(len(mlu_timeseries)):
		max_oblivious_perf_ratio = max(oblivious_mlu_timeseries[i] / ideal_mlu_timeseries[i], max_oblivious_perf_ratio)
	perf_scaling = 2./max_oblivious_perf_ratio
	for k, c in zip(num_ks, colors):
		mlu_timeseries = [x[0] for x in performance_timeseries_collections[k]]
		competitive_ratio_timeseries = [0] * len(mlu_timeseries)
		for i in range(len(mlu_timeseries)):
			competitive_ratio_timeseries[i] = max(mlu_timeseries[i] * perf_scaling / ideal_mlu_timeseries[i], 1)
		plt.semilogy(sorted(competitive_ratio_timeseries), linewidth=0.7, color=c)
		legends.append("k = {} ToE".format(k))

		mlu_timeseries = [x[0] for x in static_topology_performance_timeseries_collections[k]]
		competitive_ratio_timeseries = [0] * len(mlu_timeseries)
		for i in range(len(mlu_timeseries)):
			competitive_ratio_timeseries[i] = max(mlu_timeseries[i] * perf_scaling / ideal_mlu_timeseries[i], 1)
		plt.semilogy(sorted(competitive_ratio_timeseries), linestyle='-.', color=c)
		legends.append("k = {} Static".format(k))
	mlu_timeseries = [x[0] for x in oblivious_performance]
	competitive_ratio_timeseries = [0] * len(mlu_timeseries)
	for i in range(len(mlu_timeseries)):
		competitive_ratio_timeseries[i] = mlu_timeseries[i] * perf_scaling / ideal_mlu_timeseries[i]
	plt.semilogy(sorted(competitive_ratio_timeseries), linestyle=':', color=[0,0,0])
	legends.append("Oblivious")
	mlu_timeseries = [x[0] for x in bounded_wcmp_performance]
	competitive_ratio_timeseries = [0] * len(mlu_timeseries)
	for i in range(len(mlu_timeseries)):
		competitive_ratio_timeseries[i] = max(mlu_timeseries[i] * perf_scaling / ideal_mlu_timeseries[i], 1)
	plt.semilogy(sorted(competitive_ratio_timeseries), linestyle='--', color=[0,0,0.8])
	legends.append("Bounded WCMP")
	plt.legend(legends)
	plt.title("Fabric: {} - MLU competitive ratio".format(cluster_name))


	fig = plt.figure()
	legends = []
	for k, c in zip(num_ks, colors):
		mlu_timeseries = [x[2] for x in performance_timeseries_collections[k]]
		plt.plot(sorted(mlu_timeseries), linewidth=0.7, color=c)
		legends.append("k = {} ToE".format(k))
		mlu_timeseries = [x[2] for x in static_topology_performance_timeseries_collections[k]]
		plt.plot(sorted(mlu_timeseries), linestyle='-.', color=c)
		legends.append("k = {} Static".format(k))
	oblivious_mlu = [x[2] for x in oblivious_performance]
	plt.plot(sorted(oblivious_mlu), linestyle=':', color=[0,0,0])
	legends.append("Oblivious")
	bounded_wcmp_mlu = [x[2] for x in bounded_wcmp_performance]
	plt.plot(sorted(bounded_wcmp_mlu), linestyle='--', color=[0,0,0.8])
	legends.append("Bounded WCMP")
	ideal_mlu = [x[2] for x in ideal_performance_timeseries]
	plt.plot(sorted(ideal_mlu), linestyle='-', color=[0.5,0.5,0.5])
	legends.append("Optimal")
	plt.legend(legends)
	plt.title("Fabric: {} - Median LU".format(cluster_name))

	fig = plt.figure()
	legends = []
	max_mlu_bin = 0.198
	for k, c in zip(num_ks, colors):
		mlu_timeseries = [x[0] for x in performance_timeseries_collections[k]]
		x, y = extract_mlu_decay_rate(mlu_timeseries, mlu_min=0, mlu_max=max_mlu_bin)
		plt.semilogy(x, y, linewidth=0.7, color=c)
		legends.append("k = {} ToE".format(k))
		mlu_timeseries = [x[0] for x in static_topology_performance_timeseries_collections[k]]
		x, y = extract_mlu_decay_rate(mlu_timeseries, mlu_min=0, mlu_max=max_mlu_bin)
		plt.semilogy(x, y, linestyle='-.', color=c)
		legends.append("k = {} Static".format(k))
	oblivious_mlu = [x[0] for x in oblivious_performance]
	x, y = extract_mlu_decay_rate(oblivious_mlu, mlu_min=0, mlu_max=max_mlu_bin)
	plt.semilogy(x, y, linestyle=':', color=[0,0,0])
	legends.append("Oblivious")
	bounded_wcmp_mlu = [x[0] for x in bounded_wcmp_performance]
	x, y = extract_mlu_decay_rate(bounded_wcmp_mlu, mlu_min=0, mlu_max=max_mlu_bin)
	plt.semilogy(x, y, linestyle='--', color=[0,0,0.8])
	legends.append("Bounded WCMP")
	ideal_mlu = [x[0] for x in ideal_performance_timeseries]
	x, y = extract_mlu_decay_rate(ideal_mlu, mlu_min=0, mlu_max=max_mlu_bin)
	plt.semilogy(x, y, linestyle='-', color=[0.5,0.5,0.5])
	legends.append("Optimal")
	ideal_direct_hop_mlu = [x[0] for x in perfect_toe_direct_hop_performance]
	x, y = extract_mlu_decay_rate(ideal_direct_hop_mlu, mlu_min=0, mlu_max=max_mlu_bin)
	plt.semilogy(x, y, linestyle=':', color=[0.8,0.5,0.8])
	legends.append("Optimal - Direct Path Only")
	plt.legend(legends)
	plt.title("Fabric: {} - Inverse LU".format(cluster_name))
	plt.ylabel("P(MLU > Utilization)")
	plt.xlabel("Utilization")
	plt.show()
	print("Completed Evaluation... Exiting Safely")