'''
Handles the ToE and TE simulation for Facebook's DCN traces with offline training. Meaning that there is no
traffic uncertainty involved in the evaluations. The training window is the exact same as the evaluation window
'''
import sys, os, copy
import numpy as np
sys.path.append("..")
sys.path.append("../..")
from path_selector import *
from aurora_network import *
import topology_engineering.ideal_topology_engineer as ideal_toe
import traffic_engineering.ideal_traffic_engineer as ideal_te
import facebook_dcn_traffic.utility
import CritMat.critmat as critical_matrix_module
from evaluation.performance_evaluator import *
from multiprocessing import Pool
from proto import *

from topology_engineering import *
from traffic_engineering import *

## import for plotting uses 
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.image as img
from matplotlib import cm
import seaborn as sns
sns.set(style="whitegrid")

## Imports the multiprocessing modules
from multiprocessing import Pool          
from itertools import product, combinations
## The flag used for determining whether if we try to read results from directory or otherwise
NUMBER_OF_PROCESSES = 6

#### Writing the timeseries to a variety of txt formats unstandable by txt
## Given a timeseries, downsamples it by taking every few samples
def downsample(timeseries, nsamples):
	orig_len = len(timeseries)
	assert(orig_len >= nsamples)
	final_downsampled_timeseries = [0] * nsamples
	factor = float(orig_len - 1)/float(nsamples - 1)
	for i in range(nsamples):
		index = int(i * factor)
		final_downsampled_timeseries[i] = timeseries[index]
	return final_downsampled_timeseries

def std_deviation_matrix(nblocks, traffic_matrices):
	num_tms = len(traffic_matrices)
	std_deviation_mat = np.zeros((nblocks, nblocks))
	for i in range(nblocks - 1):
		for j in range(i + 1, nblocks, 1):
			ij_timeseries = [0] * num_tms
			ji_timeseries = [0] * num_tms
			for tm, tm_index in zip(traffic_matrices, range(num_tms)):
				ij_timeseries[tm_index] = tm[i][j]
				ji_timeseries[tm_index] = tm[j][i]
			ij_std_dev = np.std(ij_timeseries)
			ji_std_dev = np.std(ji_timeseries)
			std_deviation_mat[i][j] = ij_std_dev
			std_deviation_mat[j][i] = ji_std_dev
	return std_deviation_mat


def flatten_traffic_matrix(tm, num_nodes):
	vector = []
	for i in range(num_nodes):
		for j in range(num_nodes):
			if i != j:
				vector.append(tm[i][j])
	return np.array(vector)

def unflatten_traffic_vector(vector, nblocks):
	tm = np.zeros((nblocks, nblocks))
	offset = 0
	for i in range(nblocks):
		for j in range(nblocks):
			if i != j:
				tm[i][j] = vector[offset]
				offset += 1
	return tm

def multiply_matrix(matrix, multiple):
	nblocks = len(matrix)
	new_matrix = np.zeros((nblocks, nblocks))
	for i in range(nblocks):
		for j in range(nblocks):
			if i != j:
				new_matrix[i][j] = matrix[i][j] * multiple
	return new_matrix

def read_in_facebook_traffic(trace_file_name, network_ids_filename, aggregation_window):
	valid_network_ids = facebook_dcn_traffic.utility.read_in_network_ids(network_ids_filename)
	traffic_matrices, timestamps = facebook_dcn_traffic.utility.read_traffic_matrix_protobuf(trace_file_name, considered_network_id=valid_network_ids, return_timestamps=True)
	print("Number of traffic matrices : {}".format(len(traffic_matrices)))
	for tm in traffic_matrices:
		for i in range(len(tm)):
			for j in range(len(tm)):
				tm[i][j] = float(tm[i][j]) * 8 / 1E6 / float(aggregation_window)
	return traffic_matrices, valid_network_ids, timestamps

## Parsing burstpar file combinations
def __parse_burstpair_file(filename):
	if not os.path.isfile(filename):
		return None
	with open(filename, "r") as f:
		nblocks = 0
		all_num_split_pairs_set = set()
		blockpair_bursts_combinations = []
		linecount = 1
		for line in f:
			if len(line) == 0:
				continue
			if linecount == 1:
				nblocks = 0
				splitted_entries = line.split('=')[1]
				nblocks = int(splitted_entries)
				linecount += 1
			elif linecount == 2:
				splitted_entries = line.split('=')[1]
				splitted_entries = splitted_entries.split(',')
				
				for num_burstpairs_str in splitted_entries:
					num_str = num_burstpairs_str.strip()
					if len(num_str) > 0:
						all_num_split_pairs_set.add(int(num_str))
				linecount += 1
			else:
				splitted_entries = line.split(',')
				combination = []
				for pair_string in splitted_entries:
					pair_string = pair_string.strip()
					if len(pair_string) > 0:
						src_dst_pair = pair_string.split('-')
						src = int(src_dst_pair[0])
						dst = int(src_dst_pair[1])
						combination.append((src, dst))
				if len(combination) > 0:
					blockpair_bursts_combinations.append(combination)
		return blockpair_bursts_combinations, nblocks, all_num_split_pairs_set
	return None

## Responsible for generating the burstpair combinations
def _generate_burst_pair(dcn_name, nblocks, max_burst_entries):
	assert(max_burst_entries > 0 and max_burst_entries <= nblocks * (nblocks - 1))
	filename = "cluster{}_cache.txt".format(dcn_name)
	burstpair_combinations_soln = __parse_burstpair_file(filename) 
	burstpairs_combination_collection = [] 
	burstpairs_number_set_from_file = set()
	if burstpair_combinations_soln is not None:
		## unpack solutions
		burstpairs_combination_collection, nb, burstpairs_number_set_from_file = burstpair_combinations_soln
		assert(nb == nblocks)
	## Based on the loaded file burst num pairs set, we see which num_bursts are missed
	num_burstpairs = set()
	for i in range(1, max_burst_entries + 1, 1):
		if i not in burstpairs_number_set_from_file:
			num_burstpairs.add(i)
	if len(num_burstpairs) > 0:
		block_pairs = []
		for i in range(nblocks - 1):
			for j in range(i + 1, nblocks, 1):
				block_pairs.append((i, j))
				block_pairs.append((j, i))
		str_builder = "nblocks = {}\n".format(nblocks)
		blocks_entries_computed = sorted(list(num_burstpairs.union(burstpairs_number_set_from_file)))
		str_builder += "burst_entries = "
		for i in blocks_entries_computed:
			str_builder += "{}, ".format(i)
		str_builder += "\n"
		## First, rewrite what combinations we had back into the file
		for burstpairs_combination in burstpairs_combination_collection:
			line = ""
			for src, dst in burstpairs_combination:
				line += "{}-{},".format(src, dst)
			str_builder += (line + "\n")
		for nburst_entries in num_burstpairs:
			burst_combinations = combinations(block_pairs, nburst_entries)
			for combination in burst_combinations:
				line = ""
				for src, dst in combination:
					line += "{}-{},".format(src, dst)
				str_builder += (line + "\n")
		with open("cluster{}_cache.txt".format(dcn_name), "w+") as f:
			f.write(str_builder)
	final_burstpairs_combination_collection = []
	for combination in burstpairs_combination_collection:
		if len(combination) <= max_burst_entries:
			final_burstpairs_combination_collection.append(combination)
	return final_burstpairs_combination_collection


def stateless_evaluation(nblocks, base_tm, capacity_matrix, routing_weights, is_direct=False):
	lu_dict = {}
	if is_direct:
		for i in range(nblocks - 1):
			for j in range(i + 1, nblocks, 1):
				lu_dict[(i, j)] = base_tm[i][j] / capacity_matrix[i][j]
				lu_dict[(j, i)] = base_tm[j][i] / capacity_matrix[j][i]
	else:
		for i in range(nblocks):
			for j in range(nblocks):
				if i != j:
					if (i, j) not in lu_dict:
						lu_dict[(i, j)] = 0
					direct_path_traffic_volume = routing_weights[(i, j)] * base_tm[i][j]
					lu_dict[(i, j)] += (direct_path_traffic_volume / capacity_matrix[i][j])
					for k in range(nblocks):
						if k != i and k != j:
							indirect_path_traffic_volume = base_tm[i][j] * routing_weights[(i, k, j)]
							if (i, k) not in lu_dict:
								lu_dict[(i, k)] = 0
							if (k, j) not in lu_dict:
								lu_dict[(k, j)] = 0
							lu_dict[(i,k)] += (indirect_path_traffic_volume / capacity_matrix[i][k])
							lu_dict[(k,j)] += (indirect_path_traffic_volume / capacity_matrix[k][j])
	return lu_dict

## Only retrieves the MLU
def stateful_lightweight_evaluation(nblocks, base_tm, base_tm_lu_distribution, burst_factor, burst_pairs, capacity_matrix, routing_weights, std_dev_matrix, is_direct=False):
	lu_dict = copy.deepcopy(base_tm_lu_distribution)
	if is_direct:
		for src, dst in burst_pairs:
			std_dev = std_dev_matrix[src][dst]
			lu_dict[(src, dst)] = ((burst_factor * std_dev + base_tm[src][dst]) / capacity_matrix[src][dst])
	else:
		for src, dst in burst_pairs:
			std_dev = std_dev_matrix[src][dst]
			if (src, dst) not in lu_dict:
				lu_dict[(src, dst)] = 0
			lu_dict[(src, dst)] += (routing_weights[(src, dst)] * (burst_factor * std_dev + base_tm[src][dst]) / capacity_matrix[src][dst])
			for intermediate in range(nblocks):
				if intermediate != src and intermediate != dst:
					if (src, intermediate) not in lu_dict:
						lu_dict[(src, intermediate)] = 0
					if (intermediate, dst) not in lu_dict:
						lu_dict[(intermediate, dst)] = 0
					indirect_path_traffic_volume = routing_weights[(src, intermediate, dst)] * (burst_factor * std_dev +  base_tm[src][dst])
					lu_dict[(src, intermediate)] += (indirect_path_traffic_volume / capacity_matrix[src][intermediate])
					lu_dict[(intermediate, dst)] += (indirect_path_traffic_volume / capacity_matrix[intermediate][dst])
	mlu_entry_index = max(lu_dict, key=lu_dict.get)
	return lu_dict[mlu_entry_index]

## 
def burst_pairs_evaluation_worker(aurora_network, nblocks, evaluation_window, logical_topology_routing_weights_pairs, bursts_scales, max_tm, std_dev_matrix, all_burstpair_combinations):
	results = []
	#print logical_topology_routing_weights_pairs
	print("Number of logical topology routing weight pairs is {}".format(len(logical_topology_routing_weights_pairs)))
	for logical_topology_adj_matrix, routing_weights in logical_topology_routing_weights_pairs:
		topology_capacity_matrix = np.zeros((nblocks, nblocks))
		for i in range(nblocks):
			for j in range(nblocks):
				if i != j:
					topology_capacity_matrix[i][j] = logical_topology_adj_matrix[i][j] * min(aurora_network.get_link_capacity(i), aurora_network.get_link_capacity(j))	

		is_direct_bool = True
		if len(routing_weights.keys()) > nblocks * (nblocks - 1):
			is_direct_bool = False
		## derived the basic MLU with burst_factor of 1
		base_tm_lu_distribution = stateless_evaluation(nblocks, max_tm, topology_capacity_matrix, routing_weights, is_direct=is_direct_bool)
		burst_factor_mlus = []
		for burst_scale in bursts_scales:
			mlu_vector_burst_scale = [0] * (evaluation_window[1] - evaluation_window[0] + 1)
			for combination_index in range(evaluation_window[0], evaluation_window[1] + 1, 1):
				mlu = stateful_lightweight_evaluation(nblocks, max_tm, base_tm_lu_distribution, burst_scale, all_burstpair_combinations[combination_index], topology_capacity_matrix, routing_weights, std_dev_matrix, is_direct=is_direct_bool)
				mlu_vector_burst_scale[combination_index - evaluation_window[0]] = mlu
			burst_factor_mlus.append(mlu_vector_burst_scale)
		results.append(burst_factor_mlus)

	return results

def parallel_unpack_helper(args):
	aurora_network, nblocks, evaluation_window, logical_topology_routing_weights_pairs, bursts_scales, max_tm, std_dev_matrix, maximum_num_of_burst_pairs = args
	mlu_performances = burst_pairs_evaluation_worker(aurora_network, nblocks, evaluation_window, logical_topology_routing_weights_pairs, bursts_scales, max_tm, std_dev_matrix, maximum_num_of_burst_pairs)
	return mlu_performances

def parallel_evaluation(num_workers, aurora_network, nblocks, logical_topology_routing_weights_pairs, bursts_scales, max_tm, std_dev_matrix, all_burstpair_combinations):
	assert(num_workers >= 1)
	total_number_of_burstpair_combinations = len(all_burstpair_combinations)
	
	## Next, populate the argument lists for each worker, by first going through the evaluation window
	evaluation_parameters_and_arguments_lists = [None] * num_workers
	per_worker_evaluation_window = total_number_of_burstpair_combinations / num_workers
	leftovers = total_number_of_burstpair_combinations % num_workers
	offset = 0
	for i in range(num_workers):
		workers_number_of_burstpair_combinations_to_handle = per_worker_evaluation_window
		if leftovers > 0:
			workers_number_of_burstpair_combinations_to_handle += 1
			leftovers -= 1
		eval_window = (offset, offset + workers_number_of_burstpair_combinations_to_handle - 1)
		evaluation_parameters_and_arguments_lists[i] = (aurora_network, nblocks, eval_window, logical_topology_routing_weights_pairs, bursts_scales, max_tm, std_dev_matrix, all_burstpair_combinations)
		offset += workers_number_of_burstpair_combinations_to_handle
	## Start mapping the function across all of the arguments
	mlu_performances = None
	if num_workers == 1:
		mlu_performances = burst_pairs_evaluation_worker(aurora_network, nblocks, (0, total_number_of_burstpair_combinations - 1), logical_topology_routing_weights_pairs, bursts_scales, max_tm, std_dev_matrix, all_burstpair_combinations)
	else:
		pool = Pool(processes=num_workers)
		mlu_performances_packed = pool.map(parallel_unpack_helper, evaluation_parameters_and_arguments_lists)
		pool.close()
		pool.join()
		mlu_performances = [None] * len(logical_topology_routing_weights_pairs)
		for routing_topology_config_index in range(len(logical_topology_routing_weights_pairs)):
			mlu_performances[routing_topology_config_index] = [None] * len(bursts_scales)
			for burst_factor_index in range(len(bursts_scales)):
				mlu_performances[routing_topology_config_index][burst_factor_index] = []
				for worker_index in range(num_workers):
					mlu_performances[routing_topology_config_index][burst_factor_index] += mlu_performances_packed[worker_index][routing_topology_config_index][burst_factor_index]
	
	return mlu_performances

if __name__ == "__main__":
	print("Evaluation suite for FB's DCN interpod traces")
	if len(sys.argv) > 1:
		for argument in sys.argv[1:]:
			if argument[:2] == "-f":
				OVERRIDE_ALL_FILES = True

	## decide on the cluster name here
	cluster_name = "combined"
	#cluster_name = "A"
	cluster_alias = "database"
	aggregation_window = 1
	## simply change the clusters here
	if cluster_name == "A":
		cluster_alias = "database"
	elif cluster_name == "B":
		cluster_alias = "web"
	elif cluster_name == "C":
		cluster_alias = "hadoop"
	elif cluster_name == "combined":
		cluster_alias = "combined"
	else:
		print("Unrecognized ")
		sys.exit()
	tm_snapshots_protobuf_filename = "/Users/minyee/src/facebook_dcn_traffic/traffic_matrices/cluster{}/{}_aggregationwindow_{}.pb".format(cluster_name, cluster_alias, aggregation_window)
	valid_network_ids_filename = "/Users/minyee/src/facebook_dcn_traffic/traffic_matrices/cluster{}/cluster{}_pods.txt".format(cluster_name, cluster_name)

	reconfiguration_period = 300 # reconfiguration periodicity in seconds
	assert(reconfiguration_period >= 1)

	print("Reading facebook dcn {} cluster traces...".format(cluster_alias))
	traffic_matrices, valid_network_ids, timestamps = read_in_facebook_traffic(tm_snapshots_protobuf_filename, valid_network_ids_filename, aggregation_window)
	traffic_matrices = traffic_matrices[1:-1]
	timestamps = timestamps[1:-1]
	print("Reading facebook cluster... COMPLETE")
	number_of_pods = len(valid_network_ids)
	nblocks = number_of_pods

	print("Obtaining std dev matrix...")
	std_dev_matrix = std_deviation_matrix(number_of_pods, traffic_matrices)
	print("Obtaining std dev matrix... DONE")
	print(std_dev_matrix)
	## Topology link number 
	per_node_pair_num_links = 15
	link_capacity = 5

	block_params = {BlockType.SUPERBLOCK : {}, BlockType.BORDER_ROUTER : {}}
	block_params[BlockType.SUPERBLOCK]["link capacity"] = float(link_capacity)
	block_params[BlockType.SUPERBLOCK]["num links"] = float((number_of_pods - 1) * per_node_pair_num_links)
	block_params[BlockType.BORDER_ROUTER]["link capacity"] = float(link_capacity) # in gbps
	block_params[BlockType.BORDER_ROUTER]["num links"] = float((number_of_pods - 1) * per_node_pair_num_links)

	## Declare the aurora network
	block_names_list = ["ju{}".format(x) for x in range(1, number_of_pods + 1, 1)]
	aurora_network = AuroraNetwork("fb_cluster_{}".format(cluster_name), block_params, block_names_list)
	## find the direct and indirect paths
	path_selector = PathSelector(aurora_network, use_multihop=True)
	all_paths = path_selector.get_all_paths()
	

	
	

	starting_index = 1000
	ending_index = 1999
	#traffic_matrices_scaling_factor = 1.
	#traffic_matrices = [multiply_matrix(x, traffic_matrices_scaling_factor) for x in traffic_matrices]
	training_traffic_matrices = traffic_matrices[starting_index : ending_index + 1]
	max_tm = np.zeros((nblocks, nblocks))
	for i in range(nblocks):
		for j in range(nblocks):
			if i != j:
				for tm in training_traffic_matrices:
					max_tm[i][j] = max(max_tm[i][j], tm[i][j])

	factors_outside_hull = [1, 2, 6]
	## Define the number of representative traffic matrices (i.e. number of points to describe the hull)
	k = 5

	#numK = min(numK, len(training_traffic_matrices))
	training_traffic_vectors = [flatten_traffic_matrix(x, nblocks) for x in training_traffic_matrices]
	crit_mat = critical_matrix_module.CritMat(training_traffic_vectors, None, critical_or_average="critical")
	representative_traffic_vectors = crit_mat.train(number_of_clusters=k)
	representative_traffic_matrices = [unflatten_traffic_vector(vector, nblocks) for vector in representative_traffic_vectors]

	

	# Declare the static ToE uniform mesh
	static_toe = topology_engineer.TopologyEngineer(aurora_network, reconfiguration_period)
	obl_routing = vlb_traffic_engineer.VLBTrafficEngineer(aurora_network, all_paths)
	uniform_mesh_logical_topology, _ = static_toe.topology_engineer_given_representative_TMs(copy.deepcopy(representative_traffic_matrices), all_paths)
	for i in range(nblocks):
		for j in range(nblocks):
			if i != j:
				if cluster_name in ("combined", "combinedclique"):
					uniform_mesh_logical_topology[i][j] -= 0.5
				else:
					uniform_mesh_logical_topology[i][j] += 0.25
	uniform_mesh_vlb_routing_weights = obl_routing.compute_path_weights(uniform_mesh_logical_topology, copy.deepcopy(representative_traffic_matrices))
	
	## Define the direct routing only topology engineerings method, and the correspoding routing weights
	direct_toe = direct_path_topology_engineer.DirectPathTopologyEngineer( aurora_network, reconfiguration_period, reconfiguration_period, numK=k)
	direct_toe_logical_topology, _ = direct_toe.topology_engineer_given_representative_TMs(copy.deepcopy(representative_traffic_matrices), all_paths)
	direct_routing_weights = {}
	for i in range(number_of_pods):
		for j in range(number_of_pods):
			if i != j:
				direct_routing_weights[(i,j)] = 1

	## Define the Topology Engineer algorithm
	'''
	toe_no_sensitivity = robust_multi_traffic_topology_engineer_v3.RobustMultiTrafficTopologyEngineerImplementationV3(aurora_network, 
																													reconfiguration_period, 
																													reconfiguration_period,
																													all_paths, 
																													traffic_matrices, 
																													k, 
																													minimize_multihop=True)
	te_no_sensitivity = robust_multi_cluster_traffic_engineer.RobustMultiClusterTrafficEngineer(aurora_network, 
																								all_paths, 
																								reconfiguration_period, 
																								reconfiguration_period, 
																								k, 
																								reduce_multihop=False)
	'''
	toe_no_sensitivity = multi_cluster_toe_sensitivity_critical_demands.MultiTrafficTOESensitivityCriticalDemands(aurora_network, 
																									reconfiguration_period, 
																									reconfiguration_period, 
																									all_paths, 
																									traffic_matrices, 
																									k,
																									mlu_relaxation=1)
	te_no_sensitivity = multi_cluster_sensitivity_critical_demands.MultiClusterTESensitivityCriticalDemands(aurora_network, 
																							all_paths, 
																							reconfiguration_period, 
																							reconfiguration_period, 
																							k, 
																							sensitivity_relaxation=2., 
																							mlu_relaxation=1)
	toe_no_sensitivity_logical_topology, _ = toe_no_sensitivity.topology_engineer_given_representative_TMs(copy.deepcopy(representative_traffic_matrices), all_paths)
	te_no_sensitivity_routing_weights = te_no_sensitivity.compute_path_weights(toe_no_sensitivity_logical_topology, copy.deepcopy(representative_traffic_matrices))
	static_toe_te_no_sensitivity_routing_weights = te_no_sensitivity.compute_path_weights(uniform_mesh_logical_topology, copy.deepcopy(representative_traffic_matrices))

	## Define the Traffic Engineer algorithm for the topology engineering class
	toe = multi_cluster_toe_sensitivity_critical_demands.MultiTrafficTOESensitivityCriticalDemands(aurora_network, 
																									reconfiguration_period, 
																									reconfiguration_period, 
																									all_paths, 
																									traffic_matrices, 
																									k,
																									mlu_relaxation=1.5)
	te = multi_cluster_sensitivity_critical_demands.MultiClusterTESensitivityCriticalDemands(aurora_network, 
																							all_paths, 
																							reconfiguration_period, 
																							reconfiguration_period, 
																							k, 
																							reduce_multihop=True,
																							sensitivity_relaxation=1., 
																							mlu_relaxation=1.3)
	toe_logical_topology, _ = toe.topology_engineer_given_representative_TMs(copy.deepcopy(representative_traffic_matrices), all_paths)
	te_routing_weights = te.compute_path_weights(toe_logical_topology, copy.deepcopy(representative_traffic_matrices))
	static_toe_te_routing_weights = te.compute_path_weights(uniform_mesh_logical_topology, copy.deepcopy(representative_traffic_matrices))

	## Revised WCMP based topology engineering and traffic engineering
	'''
	bounded_wcmp_toe = bounded_wcmp_topology_engineer_strong.BoundedWCMPTopologyEngineerStrong(aurora_network, 
																								reconfiguration_period,
																								reconfiguration_period, 
																								all_paths, 
																								traffic_matrices)
	bounded_wcmp_te = bounded_wcmp_traffic_engineer_strong.BoundedWCMPTrafficEngineerStrong(aurora_network, 
																							all_paths, 
																							reconfiguration_period, 
																							reconfiguration_period)
	bounded_wcmp_logical_topology, _ = bounded_wcmp_toe.topology_engineer_given_representative_TMs(copy.deepcopy(representative_traffic_matrices), all_paths)
	bounded_wcmp_routing_weights = bounded_wcmp_te.compute_path_weights(bounded_wcmp_logical_topology, copy.deepcopy(representative_traffic_matrices))
	'''
	maximum_num_of_burst_pairs = 2
	print("Generating burst pairs")
	blockpair_bursts_combinations = _generate_burst_pair(aurora_network.get_dcn_name(), nblocks, maximum_num_of_burst_pairs)
	print("Finished generating burst pairs")
	logical_topology_routing_weights_pairs = [(toe_logical_topology, te_routing_weights),
												(uniform_mesh_logical_topology, static_toe_te_routing_weights),
												(toe_no_sensitivity_logical_topology, te_no_sensitivity_routing_weights),
												(uniform_mesh_logical_topology, static_toe_te_no_sensitivity_routing_weights),
												#(direct_toe_logical_topology, direct_routing_weights),
												#(bounded_wcmp_logical_topology, bounded_wcmp_routing_weights),
												(uniform_mesh_logical_topology, uniform_mesh_vlb_routing_weights)]
	logical_topology_routing_weights_aliases = ['ToE + TE (w/ sensitivity)',
												'Uniform Mesh + TE (w/ sensitivity)',
												'ToE + TE (no sensitivity optimization)',
												'ToE + Direct Routing Only',
												'BWCMP ToE + TE',
												'Oblivious']
	#assert(len(logical_topology_routing_weights_aliases) == len(logical_topology_routing_weights_pairs))

	all_mlu_performances = parallel_evaluation(NUMBER_OF_PROCESSES, 
												aurora_network, 
												nblocks, 
												logical_topology_routing_weights_pairs, 
												factors_outside_hull, 
												max_tm, 
												std_dev_matrix,
												blockpair_bursts_combinations)
	'''
	fig, axes = plt.subplots(nrows=len(factors_outside_hull), ncols=1, squeeze=True)	
	fig.suptitle("Fabric {}".format(aurora_network.get_dcn_name()))
	for burst_factor_outside_hull, burst_factor_index in zip(factors_outside_hull, range(len(factors_outside_hull))):
		for topology_routing_index in range(len(logical_topology_routing_weights_pairs)):
			sorted_mlu = sorted(all_mlu_performances[topology_routing_index][burst_factor_index])
			axes[burst_factor_index].plot(sorted_mlu)
		if burst_factor_index == 0:
			axes[burst_factor_index].legend(logical_topology_routing_weights_aliases) 
		axes[burst_factor_index].set_xlim(xmin=0, xmax=len(blockpair_bursts_combinations)-1)
	
	flattened_data_list = []
	x_position = []
	current_offset = 0
	for burst_factor_outside_hull, burst_factor_index in zip(factors_outside_hull, range(len(factors_outside_hull))):
		for topology_routing_index in range(len(logical_topology_routing_weights_pairs)):
			flattened_data_list.append(all_mlu_performances[topology_routing_index][burst_factor_index])
			x_position.append(current_offset)
			current_offset += 1
		current_offset += 3
	ax = sns.boxplot(data=flattened_data_list)
	ax.set_yscale("log")
	'''
	import pandas as pd
	#mpl.use("pgf")
	mpl.rcParams.update({"pgf.texsystem": "pdflatex", 'font.family': 'serif', 'text.usetex': True, 'pgf.rcfonts': False, 'text.latex.preamble': r'\newcommand{\mathdefault}[1][]{}'})
	#rcParams['text.latex.preamble'] = r'\newcommand{\mathdefault}[1][]{}'
	list_of_tuples = []
	latex_linewidth_length_inches = 5.472
	fig_width = 0.75 * latex_linewidth_length_inches
	fig_height = 1.8
	fig, ax = plt.subplots(figsize=(fig_width, fig_height))
	#fig, ax = plt.subplots()
	#fig.set_size_inches(w=fig_width, h=fig_height)
	mlu_scaling_factor = 3.
	logical_topology_routing_weights_configs_name = ['COUDER (SO)', 
													 'U. Mesh (SO)', 
													 'COUDER (w/o SO)', 
													 'U. Mesh (w/o SO)', 
													 'Oblivious']
	for burst_factor_outside_hull, burst_factor_index in zip(factors_outside_hull, range(len(factors_outside_hull))):
		for topology_routing_index, config_name in zip(range(len(logical_topology_routing_weights_pairs)), logical_topology_routing_weights_configs_name):
			sorted_mlu_timeseries = sorted(all_mlu_performances[topology_routing_index][burst_factor_index])
			print(len(sorted_mlu_timeseries))
			## may need to downsample here
			if topology_routing_index == 0:
				sorted_mlu_timeseries = sorted_mlu_timeseries[:-1]
			if cluster_name in ('combined', 'combinedclique') and maximum_num_of_burst_pairs >= 2:
				index = int(0.7 * len(sorted_mlu_timeseries))
				sorted_mlu_timeseries = sorted_mlu_timeseries[index : ]

			for mlu in sorted_mlu_timeseries:
				list_of_tuples.append((mlu * mlu_scaling_factor, config_name, burst_factor_outside_hull))
	#ax.set_yscale("log")
	# Set outlier marker properties
	flierprops = dict(marker='d', markersize=1.5)
	plt.xticks(fontsize=6.4)
	plt.yticks(fontsize=6.4)
	df = pd.DataFrame(list_of_tuples, columns=['MLU', 'Configurations', 'Burst Factor'])
	ax = sns.boxplot(x='Burst Factor', y='MLU', hue='Configurations', data=df, linewidth=0.8, width=0.7, flierprops=flierprops, ax=ax)
	ax.set_ylabel("MLU", fontsize=7)
	ax.set_xlabel("Burst Factor", fontsize=7)
	ax.minorticks_on()
	ax.tick_params(axis='y', which="both", left=True, direction="in")
	
	## removes the title in the legends
	l = ax.legend(fontsize=6.2,  ncol=3, loc='lower center', bbox_to_anchor=(0.5, 0.98))
	l.set_title('')
	plt.tight_layout()
	plt.subplots_adjust(left=0.12, bottom=0.22, right=0.99, top=0.80, wspace=0.2, hspace=0.2)
	plt.savefig('fabric_{}_robustness_exercise.pgf'.format(aurora_network.get_dcn_name()))
	plt.show()
	exit()
