'''
Handles the ToE and TE simulation for Facebook's DCN traces with offline training. Meaning that there is no
traffic uncertainty involved in the evaluations. The training window is the exact same as the evaluation window
'''
import sys, os, copy, random
sys.path.append("..")
from path_selector import *
from aurora_network import *
import topology_engineering.ideal_topology_engineer as ideal_toe
import traffic_engineering.ideal_traffic_engineer as ideal_te
import facebook_dcn_traffic.utility
import CritMat.critmat as critical_matrix_module
from evaluation.performance_evaluator import *
from multiprocessing import Pool
from proto import *

from topology_engineering import *
from traffic_engineering import *

## import for plotting uses 
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.image as img
from matplotlib import cm
import seaborn as sns

## Imports the multiprocessing modules
from multiprocessing import Pool          

## The flag used for determining whether if we try to read results from directory or otherwise
OVERRIDE_ALL_FILES = False
NUMBER_OF_PROCESSES = 6

def derive_traffic_pbfilenames(cluster_name):
	cluster_alias = "database"
	aggregation_window = 1
	## simply change the clusters here
	if cluster_name == "A":
		cluster_alias = "database"
	elif cluster_name == "B":
		cluster_alias = "web"
	elif cluster_name == "C":
		cluster_alias = "hadoop"
	elif cluster_name == "combined":
		cluster_alias = "combined"
	else:
		print("Unrecognized ")
		sys.exit()
	tm_snapshots_protobuf_filename = "/Users/minyee/src/facebook_dcn_traffic/traffic_matrices/cluster{}/{}_aggregationwindow_{}.pb".format(cluster_name, cluster_alias, aggregation_window)
	valid_network_ids_filename = "/Users/minyee/src/facebook_dcn_traffic/traffic_matrices/cluster{}/cluster{}_pods.txt".format(cluster_name, cluster_name)
	return cluster_alias, tm_snapshots_protobuf_filename, valid_network_ids_filename

def _custom_histogram(value_vector, max_val, min_val, nbins=50):
	bin_edges = []
	max_min_distance = max_val - min_val
	for i in range(nbins):
		edge = float(i) / (nbins - 1) * max_min_distance + min_val
		bin_edges.append(edge)
	histogram = [0] * nbins
	for val in value_vector:
		val_index = int(((val - min_val) / max_min_distance) * (nbins - 1))
		val_index = min(val_index, nbins - 1)
		histogram[val_index] += 1
	hist_sum =  float(len(value_vector))
	normalized_histogram = [float(x) / hist_sum for x in histogram]
	print(normalized_histogram)
	return bin_edges, normalized_histogram


def flatten_traffic_matrix(tm, num_nodes):
	vector = []
	for i in range(num_nodes):
		for j in range(num_nodes):
			if i != j:
				vector.append(tm[i][j])
	return np.array(vector)

def unflatten_traffic_vector(vector, nblocks):
	tm = np.zeros((nblocks, nblocks))
	offset = 0
	for i in range(nblocks):
		for j in range(nblocks):
			if i != j:
				tm[i][j] = vector[offset]
				offset += 1
	return tm

def compute_sensitivity(nblocks, logical_topology, routing_weights):
	sensitivity_matrix = np.zeros((nblocks, nblocks))
	for path in routing_weights:
		weight = routing_weights[path]
		src = path[0]
		dst = path[-1]
		curr_node = src
		for next_node in path[1:]:
			sensitivity_matrix[curr_node][next_node] = max(sensitivity_matrix[curr_node][next_node], weight/logical_topology[curr_node][next_node])
			curr_node = next_node
	sensitivity_distribution = []
	for i in range(nblocks):
		for j in range(nblocks):
			if i != j:
				sensitivity_distribution.append((i,j,sensitivity_matrix[i][j]))
	sorted_sensitivity_distribution = sorted(sensitivity_distribution, key=lambda x: x[2])
	return sorted_sensitivity_distribution

def read_in_facebook_traffic(trace_file_name, network_ids_filename, aggregation_window):
	valid_network_ids = facebook_dcn_traffic.utility.read_in_network_ids(network_ids_filename)
	traffic_matrices, timestamps = facebook_dcn_traffic.utility.read_traffic_matrix_protobuf(trace_file_name, considered_network_id=valid_network_ids, return_timestamps=True)
	print("Number of traffic matrices : {}".format(len(traffic_matrices)))
	for tm in traffic_matrices:
		for i in range(len(tm)):
			for j in range(len(tm)):
				tm[i][j] = float(tm[i][j]) * 8 / 1E6 / float(aggregation_window)
	return traffic_matrices, valid_network_ids, timestamps

def generate_decay_rate(timeseries_performance, nbins=100):
	hist, bin_edges = np.histogram(timeseries_performance,bins=nbins)
	hist_sum = sum(hist)
	hist = [float(x)/hist_sum for x in hist]
	hist_cumsum = np.cumsum(hist)
	inverse_cumsum = [1. - x for x in hist_cumsum]
	inverse_cumsum = [1] + inverse_cumsum
	return inverse_cumsum, bin_edges

def downsample(timeseries, nsamples):
	sorted_timeseries = sorted(timeseries)
	orig_len = len(sorted_timeseries)
	assert(orig_len >= nsamples)
	final_downsampled_timeseries = [0] * nsamples
	factor = float(orig_len - 1)/float(nsamples - 1)
	for i in range(nsamples):
		index = int(i * factor)
		final_downsampled_timeseries[i] = sorted_timeseries[index]
	return final_downsampled_timeseries

## Evaluates and returns the results, but if override is not enabled and results file already exists, then just load from file
def evaluate_performance_worker(aurora_network, 
								all_paths,
								evaluation_window,
								eval_traffic_matrices,
								logical_topology_routing_weights_pairs):
	## Initially find the representative traffic matrices
	nblocks = aurora_network.get_num_blocks()
	
	mlu_performance_timeseries = [0] * (evaluation_window[1] - evaluation_window[0] + 1)
	## Start evaluation - check the ToE case or no ToE case, if no ToE 
	## first, go over the static routing and topology pairs to evaluate performance
	## then, for the same topology, go over all the ideal routing performance
	ideal_te = ideal_traffic_engineer.IdealTrafficEngineer(aurora_network, all_paths)
	final_mlu_timeseries = []
	config_id = 1
	for logical_topology_adj_matrix, routing_weights in logical_topology_routing_weights_pairs:
		static_routing_mlu = [0] * (evaluation_window[1] - evaluation_window[0] + 1)
		perfect_routing_mlu = [0] * (evaluation_window[1] - evaluation_window[0] + 1)
		for tm_index in range(evaluation_window[0], evaluation_window[1] + 1, 1):
			mlu, lu90, lu50, ave_hop_count, lu_distribution = evaluate_traffic_matrix_performance(aurora_network, 
																									eval_traffic_matrices[tm_index], 
																									logical_topology_adj_matrix, 
																									routing_weights)
			static_routing_mlu[tm_index - evaluation_window[0]] = mlu
			ideal_routing_weights = ideal_te.compute_path_weights(logical_topology_adj_matrix, eval_traffic_matrices[tm_index])
			mlu, lu90, lu50, ave_hop_count, lu_distribution = evaluate_traffic_matrix_performance(aurora_network, 
																									eval_traffic_matrices[tm_index], 
																									logical_topology_adj_matrix, 
																									ideal_routing_weights)
			perfect_routing_mlu[tm_index - evaluation_window[0]] = mlu
		final_mlu_timeseries.append(static_routing_mlu)
		final_mlu_timeseries.append(perfect_routing_mlu)
		print("config_id {} max static mlu {}".format(config_id, max(static_routing_mlu)))
		print("config_id {} max perfect mlu {}".format(config_id, max(perfect_routing_mlu)))
		config_id += 1
	return final_mlu_timeseries


def reshuffle_sorted_timeseries(sorted_timeseries, nsegments=40):
	assert(nsegments >= 1)
	nsnapshots_per_segment = len(sorted_timeseries) / nsegments
	leftovers = len(sorted_timeseries) % nsegments
	offset = 0
	final_shuffled_timeseries = []
	segment_windows = []
	for segment in range(nsegments):
		n_snapshots_current_segment = nsnapshots_per_segment
		if leftovers > 0:
			leftovers -= 1
			n_snapshots_current_segment += 1
		starting_snapshot, ending_snapshot = offset, offset + n_snapshots_current_segment - 1
		segment_windows.append((starting_snapshot, ending_snapshot, ))
		offset += n_snapshots_current_segment
	## randomly shuffle the segments
	random.Random(4).shuffle(segment_windows)
	for starting_snapshot, ending_snapshot in segment_windows:
		segment_timeseries = copy.deepcopy(sorted_timeseries[starting_snapshot : ending_snapshot + 1])
		random.Random(4).shuffle(segment_timeseries)
		final_shuffled_timeseries += segment_timeseries
	return final_shuffled_timeseries
		
	


def single_vs_many_routing_weights_experiment(aurora_network, all_paths, training_traffic_matrices, mlu_multiplier=8., k_cluster=5):
	training_traffic_vectors = [flatten_traffic_matrix(x, nblocks) for x in training_traffic_matrices]
	crit_mat = critical_matrix_module.CritMat(training_traffic_vectors, None, critical_or_average="critical")
	representative_traffic_vectors = crit_mat.train(number_of_clusters=max(k_cluster, 1))
	representative_traffic_matrices = [unflatten_traffic_vector(vector, nblocks) for vector in representative_traffic_vectors]
	toe_multi_routing_weights = multi_traffic_topology_engineer_many_routing_weight_sets.MultiTrafficTopologyEngineerDifferentRoutingWeightSets(aurora_network, 
																																				1, 
																																				len(training_traffic_matrices), 
																																				all_paths, 
																																				training_traffic_matrices, 
																																				k_cluster)
	## Define the Traffic Engineer algorithm for the topology engineering class
	toe = multi_cluster_toe_sensitivity_critical_demands.MultiTrafficTOESensitivityCriticalDemands(aurora_network, 
																									1, 
																									len(training_traffic_matrices),
																									all_paths, 
																									traffic_matrices, 
																									k_cluster,
																									mlu_relaxation=1.)
	te = multi_cluster_sensitivity_critical_demands.MultiClusterTESensitivityCriticalDemands(aurora_network, 
																								all_paths, 
																								1, 
																								len(training_traffic_matrices), 
																								k_cluster, 
																								reduce_multihop=True)
	point_te = multi_cluster_sensitivity_critical_demands.MultiClusterTESensitivityCriticalDemands(aurora_network, 
																								all_paths, 
																								1, 
																								1, 
																								k_cluster, 
																								reduce_multihop=True)
	single_traffic_weight_logical_topology, _ = toe.topology_engineer_given_representative_TMs(copy.deepcopy(representative_traffic_matrices), all_paths)
	multi_traffic_weight_logical_topology, _ = toe_multi_routing_weights.topology_engineer_given_representative_TMs(copy.deepcopy(representative_traffic_matrices), all_paths)
	single_traffic_weight_routing_weight = te.compute_path_weights(single_traffic_weight_logical_topology, copy.deepcopy(representative_traffic_matrices))
	#multi_traffic_weight_routing_weight = te.compute_path_weights(single_traffic_weight_logical_topology, copy.deepcopy(representative_traffic_matrices))
	multi_traffic_weight_routing_weight = te.compute_path_weights(multi_traffic_weight_logical_topology, copy.deepcopy(representative_traffic_matrices))
	single_mlu_timeseries = []
	multiple_mlu_timeseries = []
	multiple_mlu_point_timeseries = []
	ideal_single_mlu_timeseries = []
	ideal_multiple_mlu_timeseries = []

	for tm, tm_index in zip(training_traffic_matrices, range(len(training_traffic_matrices))):
		mlu_single, _, _, _, _ = evaluate_traffic_matrix_performance(aurora_network, tm, single_traffic_weight_logical_topology, single_traffic_weight_routing_weight)
		single_mlu_timeseries.append(mlu_single * mlu_multiplier)
		mlu_multiple, _, _, _, _ = evaluate_traffic_matrix_performance(aurora_network, tm, multi_traffic_weight_logical_topology, multi_traffic_weight_routing_weight)
		multiple_mlu_timeseries.append(mlu_multiple * mlu_multiplier)
		### Ideal performance for both single and multiple
		ideal_single_routing_weights = point_te.compute_path_weights(single_traffic_weight_logical_topology, copy.deepcopy([tm,]))
		mlu_ideal_single, _, _, _, _ = evaluate_traffic_matrix_performance(aurora_network, tm, single_traffic_weight_logical_topology, ideal_single_routing_weights)
		ideal_single_mlu_timeseries.append(mlu_ideal_single * mlu_multiplier)
		ideal_multiple_routing_weights = point_te.compute_path_weights(multi_traffic_weight_logical_topology, copy.deepcopy([tm,]))
		mlu_ideal_multiple, _, _, _, _ = evaluate_traffic_matrix_performance(aurora_network, tm, multi_traffic_weight_logical_topology, ideal_multiple_routing_weights)
		ideal_multiple_mlu_timeseries.append(mlu_ideal_multiple * mlu_multiplier)
		training_traffic_index = tm_index - 1
		if tm_index == 0:
			training_traffic_index = 1
		point_multiple_routing_weights = point_te.compute_path_weights(multi_traffic_weight_logical_topology, copy.deepcopy([training_traffic_matrices[training_traffic_index],]))
		mlu_multiple_point, _, _, _, _ = evaluate_traffic_matrix_performance(aurora_network, tm, multi_traffic_weight_logical_topology, point_multiple_routing_weights)
		multiple_mlu_point_timeseries.append(mlu_multiple_point * mlu_multiplier)
		
	fig = plt.figure()
	plt.plot(reshuffle_sorted_timeseries(sorted(single_mlu_timeseries)))
	plt.plot(reshuffle_sorted_timeseries(sorted(multiple_mlu_timeseries)))
	plt.plot(reshuffle_sorted_timeseries(sorted(multiple_mlu_point_timeseries)))
	plt.plot(reshuffle_sorted_timeseries(sorted(ideal_single_mlu_timeseries)))
	plt.plot(reshuffle_sorted_timeseries(sorted(ideal_multiple_mlu_timeseries)))
	plt.legend(['Single', 'Multiple - group', 'Multiple - point', 'Single - Ideal', 'Multiple Ideal'])
	#plt.plot(sorted(single_mlu_timeseries))
	#plt.plot(sorted(multiple_mlu_timeseries))
	sorted_single_mlu_timeseries = sorted(single_mlu_timeseries)
	unsorted_single_mlu_timeseries = reshuffle_sorted_timeseries(sorted_single_mlu_timeseries)
	sorted_multiple_mlu_timeseries = sorted(multiple_mlu_timeseries)
	unsorted_multiple_mlu_timeseries = reshuffle_sorted_timeseries(sorted_multiple_mlu_timeseries)
	sorted_multiple_point_mlu_timeseries = sorted(multiple_mlu_point_timeseries)
	unsorted_multiple_point_mlu_timeseries = reshuffle_sorted_timeseries(sorted_multiple_point_mlu_timeseries)
	sorted_single_ideal_mlu_timeseries = sorted(ideal_single_mlu_timeseries)
	unsorted_single_ideal_mlu_timeseries = reshuffle_sorted_timeseries(sorted_single_ideal_mlu_timeseries)
	sorted_multiple_ideal_mlu_timeseries = sorted(ideal_multiple_mlu_timeseries)
	unsorted_multiple_ideal_mlu_timeseries = reshuffle_sorted_timeseries(sorted_multiple_ideal_mlu_timeseries)
	with open("single_vs_multiple_routingweights_experiment.txt" , 'w+') as f:
		str_builder = "x_range single_mlu_sorted many_mlu_sorted single_mlu_unsorted many_mlu_unsorted single_mlu_ideal_sorted many_mlu_ideal_sorted single_mlu_ideal_unsorted many_mlu_ideal_unsorted many_mlu_point_sorted many_mlu_point_unsorted \n"
		for row in range(len(training_traffic_matrices)):
			str_builder += "{} {:.4f} {:.4f} {:.4f} {:.4f} {:.4f} {:.4f} {:.4f} {:.4f} {:.4f} {:.4f} \n".format(row, sorted_single_mlu_timeseries[row], sorted_multiple_mlu_timeseries[row], unsorted_single_mlu_timeseries[row], unsorted_multiple_mlu_timeseries[row], sorted_single_ideal_mlu_timeseries[row], sorted_multiple_ideal_mlu_timeseries[row], unsorted_single_ideal_mlu_timeseries[row], unsorted_multiple_ideal_mlu_timeseries[row], sorted_multiple_point_mlu_timeseries[row], unsorted_multiple_point_mlu_timeseries[row])
		f.write(str_builder)
	plt.show()
	return




def sensitivity_analysis(aurora_network, all_paths, training_traffic_matrices, eval_traffic_matrices, mlu_multiplier=8., k_cluster=5):
	training_traffic_vectors = [flatten_traffic_matrix(x, nblocks) for x in training_traffic_matrices]
	crit_mat = critical_matrix_module.CritMat(training_traffic_vectors, None, critical_or_average="critical")
	representative_traffic_vectors = crit_mat.train(number_of_clusters=max(k_cluster, 1))
	representative_traffic_matrices = [unflatten_traffic_vector(vector, nblocks) for vector in representative_traffic_vectors]
	'''
	toe_no_sensitivity = multi_cluster_toe_sensitivity_critical_demands.MultiTrafficTOESensitivityCriticalDemands(aurora_network, 
																									1, 
																									len(training_traffic_matrices), 
																									all_paths, 
																									eval_traffic_matrices, 
																									k_cluster,
																									mlu_relaxation=1)
	te_no_sensitivity = multi_cluster_sensitivity_critical_demands.MultiClusterTESensitivityCriticalDemands(aurora_network, 
																							all_paths, 
																							1, 
																							len(training_traffic_matrices),
																							k_cluster, 
																							sensitivity_relaxation=1, 
																							mlu_relaxation=1)
	'''
	toe_no_sensitivity = robust_multi_traffic_topology_engineer_v3.RobustMultiTrafficTopologyEngineerImplementationV3(aurora_network, 
																														1, 
																														len(training_traffic_matrices), 
																														all_paths, 
																														eval_traffic_matrices, 
																														k_cluster, 
																														minimize_multihop=True)
	te_no_sensitivity = robust_multi_cluster_traffic_engineer.RobustMultiClusterTrafficEngineer(aurora_network, 
																								all_paths, 
																								1, 
																								len(training_traffic_matrices), 
																								k_cluster, 
																								reduce_multihop=True) 
	toe_no_sensitivity_logical_topology, _ = toe_no_sensitivity.topology_engineer_given_representative_TMs(copy.deepcopy(representative_traffic_matrices), all_paths)
	te_no_sensitivity_routing_weights = te_no_sensitivity.compute_path_weights(toe_no_sensitivity_logical_topology, copy.deepcopy(representative_traffic_matrices))

	## Define the Traffic Engineer algorithm for the topology engineering class
	toe = multi_cluster_toe_sensitivity_critical_demands.MultiTrafficTOESensitivityCriticalDemands(aurora_network, 
																									1, 
																									len(training_traffic_matrices),
																									all_paths, 
																									traffic_matrices, 
																									k_cluster,
																									mlu_relaxation=1.05)
	te = multi_cluster_sensitivity_critical_demands.MultiClusterTESensitivityCriticalDemands(aurora_network, 
																							all_paths, 
																							1, 
																							len(training_traffic_matrices),
																							k_cluster, 
																							reduce_multihop=True,
																							sensitivity_relaxation=1., 
																							mlu_relaxation=1.05)
	eval_toe_logical_topology, _ = toe.topology_engineer_given_representative_TMs(copy.deepcopy(representative_traffic_matrices), all_paths)
	eval_te_routing_weights = te.compute_path_weights(eval_toe_logical_topology, copy.deepcopy(representative_traffic_matrices))
	## Define the Traffic Engineer algorithm for the topology engineering class
	toe = multi_cluster_toe_sensitivity_critical_demands.MultiTrafficTOESensitivityCriticalDemands(aurora_network, 
																									1, 
																									len(training_traffic_matrices),
																									all_paths, 
																									traffic_matrices, 
																									k_cluster,
																									mlu_relaxation=1.)
	te = multi_cluster_sensitivity_critical_demands.MultiClusterTESensitivityCriticalDemands(aurora_network, 
																							all_paths, 
																							1, 
																							len(training_traffic_matrices),
																							k_cluster, 
																							reduce_multihop=True,
																							sensitivity_relaxation=1., 
																							mlu_relaxation=1.)
	training_toe_logical_topology, _ = toe.topology_engineer_given_representative_TMs(copy.deepcopy(representative_traffic_matrices), all_paths)
	training_te_routing_weights = te.compute_path_weights(training_toe_logical_topology, copy.deepcopy(representative_traffic_matrices))

	with_sensitivity_optimization_sensitivity_distribution = compute_sensitivity(aurora_network.get_num_blocks(), eval_toe_logical_topology, eval_te_routing_weights)
	without_sensitivity_optimization_sensitivity_distribution = compute_sensitivity(aurora_network.get_num_blocks(), toe_no_sensitivity_logical_topology, te_no_sensitivity_routing_weights)
	
	sensitivity_distribution_optimized = sorted([x[2] for x in with_sensitivity_optimization_sensitivity_distribution])
	sensitivity_distribution_unoptimized = sorted([x[2] for x in without_sensitivity_optimization_sensitivity_distribution])
	max_val = max(sensitivity_distribution_unoptimized[-1], sensitivity_distribution_optimized[-1])
	min_val = min(sensitivity_distribution_unoptimized[0], sensitivity_distribution_optimized[0])
	nbins = 25
	edges_optimized, hist_optimized = _custom_histogram(sensitivity_distribution_optimized, max_val, min_val, nbins=nbins)
	edges_unoptimized, hist_unoptimized = _custom_histogram(sensitivity_distribution_unoptimized, max_val, min_val, nbins=nbins)
	bar_width = (max_val-min_val) / (nbins - 1)
	
	#plt.plot(edges_optimized, hist_optimized)
	#plt.plot(edges_unoptimized, hist_unoptimized)

	## next go ahead and actually evaluate the performance
	training_length = len(training_traffic_matrices)
	optimized_mlu_timeseries = []
	unoptimized_mlu_timeseries = []
	for tm, tm_index in zip(eval_traffic_matrices, range(len(eval_traffic_matrices))):
		if tm_index >= training_length:
			mlu_optimized, _, _, _, _ = evaluate_traffic_matrix_performance(aurora_network, tm, eval_toe_logical_topology, eval_te_routing_weights)
			optimized_mlu_timeseries.append(mlu_multiplier * mlu_optimized)
		else:
			mlu_optimized, _, _, _, _ = evaluate_traffic_matrix_performance(aurora_network, tm, training_toe_logical_topology, training_te_routing_weights)
			optimized_mlu_timeseries.append(mlu_multiplier * mlu_optimized)
		mlu_unoptimized, _, _, _, _ = evaluate_traffic_matrix_performance(aurora_network, tm, toe_no_sensitivity_logical_topology, te_no_sensitivity_routing_weights)
		unoptimized_mlu_timeseries.append(mlu_multiplier * mlu_unoptimized)
	
	sorted_optimized_mlu_timeseries = sorted(optimized_mlu_timeseries)
	sorted_unoptimized_mlu_timeseries = sorted(unoptimized_mlu_timeseries)
	
	training_length = len(training_traffic_matrices)
	training_perf_optimized_timeseries = copy.deepcopy(optimized_mlu_timeseries[:training_length])
	eval_perf_optimized_timeseries = copy.deepcopy(optimized_mlu_timeseries[training_length:])
	optimized_reshuffled_timeseries = reshuffle_sorted_timeseries(sorted(training_perf_optimized_timeseries)) + reshuffle_sorted_timeseries(sorted(eval_perf_optimized_timeseries))
	training_perf_unoptimized_timeseries = copy.deepcopy(unoptimized_mlu_timeseries[:training_length])
	eval_perf_unoptimized_timeseries = copy.deepcopy(unoptimized_mlu_timeseries[training_length:])
	unoptimized_reshuffled_timeseries = reshuffle_sorted_timeseries(sorted(training_perf_unoptimized_timeseries)) + reshuffle_sorted_timeseries(sorted(eval_perf_unoptimized_timeseries))
	##
	fig = plt.figure()
	plt.plot(optimized_reshuffled_timeseries)
	plt.plot(unoptimized_reshuffled_timeseries)
	
	with open("sensitivity_with_vs_without_experiment.txt", 'w+') as f:
		str_builder = "x_range mlu_optimized_sorted mlu_unoptimized_sorted mlu_optimized_unsorted mlu_unoptimized_unsorted \n"
		for row in range(len(eval_traffic_matrices)):
			str_builder += "{} {:.4f} {:.4f} {:.4f} {:.4f} \n".format(row, sorted_optimized_mlu_timeseries[row], sorted_unoptimized_mlu_timeseries[row], optimized_reshuffled_timeseries[row], unoptimized_reshuffled_timeseries[row])
		f.write(str_builder)
	## Exporting pgf plot 
	#mpl.use("pgf")
	mpl.rcParams.update({"pgf.texsystem": "pdflatex", 'font.family': 'serif', 'text.usetex': True, 'pgf.rcfonts': False, 'text.latex.preamble': r'\newcommand{\mathdefault}[1][]{}'})
	#rcParams['text.latex.preamble'] = r'\newcommand{\mathdefault}[1][]{}'
	latex_linewidth_length_inches = 5.472
	fig_width = 0.30 * latex_linewidth_length_inches
	fig_height = 1.6
	fig, axes = plt.subplots(nrows=2, ncols=1, figsize=(fig_width, fig_height))
	#fig, ax = plt.subplots()
	#fig.set_size_inches(w=fig_width, h=fig_height)
	# Set outlier marker properties
	#flierprops = dict(marker='d', markersize=1.5)
	axes[0].set_ylabel("PDF", fontsize=6.7, labelpad=1.1)
	axes[1].set_ylabel("PDF", fontsize=6.7, labelpad=1.1)
	#axes[0].set_xlabel("Sensitivity", fontsize=7)
	axes[1].set_xlabel("Sensitivity", fontsize=6.7, labelpad=1.1)
	axes[0].set_yticks([0, 0.05, 0.1, 0.15])
	axes[0].tick_params(axis='x', labelsize=6)
	axes[0].tick_params(axis='y', labelsize=6)
	axes[1].tick_params(axis='x', labelsize=6)
	axes[1].tick_params(axis='y', labelsize=6)
	axes[0].set_xlim(xmin=0, xmax=0.15)
	axes[1].set_xlim(xmin=0, xmax=0.15)
	axes[0].bar(edges_unoptimized, hist_unoptimized, width=bar_width, color=(0.2, 0.4, 0.6), align='edge')
	axes[1].bar(edges_optimized, hist_optimized, width=bar_width, color=(0.2, 0.4, 0.6), align='edge')
	axes[0].set_title('Not Desensitized', fontsize=6.8, pad=2)
	axes[1].set_title('Desensitized', fontsize=6.8, pad=2)
	axes[0].tick_params(axis='x', which='both', bottom=False, top=False, labelbottom=False)
	#plt.tight_layout()
	plt.subplots_adjust(left=0.25, bottom=0.18, right=0.94, top=0.92, wspace=0.40, hspace=0.22)
	
	plt.savefig('sensitivity_pdf.pgf')
	plt.show()
	return


if __name__ == "__main__":
	print("Evaluation suite for FB's DCN interpod traces")

	## decide on the cluster name here
	cluster_name = "C"
	aggregation_window = 1
	cluster_alias, tm_snapshots_protobuf_filename, valid_network_ids_filename = derive_traffic_pbfilenames(cluster_name)

	reconfiguration_period = 300 # reconfiguration periodicity in seconds
	assert(reconfiguration_period >= 1)

	print("Reading facebook dcn {} cluster traces...".format(cluster_alias))
	traffic_matrices, valid_network_ids, timestamps = read_in_facebook_traffic(tm_snapshots_protobuf_filename, valid_network_ids_filename, aggregation_window)
	traffic_matrices = traffic_matrices[1:-1]
	timestamps = timestamps[1:-1]
	print("Reading facebook cluster... COMPLETE")
	number_of_pods = len(valid_network_ids)
	nblocks = number_of_pods

	## Topology link number 
	per_node_pair_num_links = 15
	link_capacity = 5

	block_params = {BlockType.SUPERBLOCK : {}, BlockType.BORDER_ROUTER : {}}
	block_params[BlockType.SUPERBLOCK]["link capacity"] = float(link_capacity)
	block_params[BlockType.SUPERBLOCK]["num links"] = float((number_of_pods - 1) * per_node_pair_num_links)
	block_params[BlockType.BORDER_ROUTER]["link capacity"] = float(link_capacity) # in gbps
	block_params[BlockType.BORDER_ROUTER]["num links"] = float((number_of_pods - 1) * per_node_pair_num_links)

	## Declare the aurora network
	block_names_list = ["ju{}".format(x) for x in range(1, number_of_pods + 1, 1)]
	aurora_network = AuroraNetwork("fb_cluster_{}".format(cluster_name), block_params, block_names_list)
	## find the direct and indirect paths
	path_selector = PathSelector(aurora_network, use_multihop=True)
	all_paths = path_selector.get_all_paths()

	# first check if we need to override all files, if not then check if this file already existed
	dcn_name = aurora_network.get_dcn_name()

	## now start forming the reconfiguration windows
	num_traffic_snapshots = len(traffic_matrices)
	starting_point = int((float(12 - 7) / 24) * num_traffic_snapshots)
	training_length = 80
	eval_length = 80
	training_traffic_matrices = traffic_matrices[starting_point : starting_point + training_length + 1]
	eval_traffic_matrices = traffic_matrices[starting_point : starting_point + training_length + eval_length + 1]
	mlu_multiplier = 14.
	## Do the sensitivity analysis here
	#single_vs_many_routing_weights_experiment(aurora_network, all_paths, training_traffic_matrices, mlu_multiplier=mlu_multiplier, k_cluster=5)
	#exit()
	sensitivity_analysis(aurora_network, all_paths, training_traffic_matrices, eval_traffic_matrices, mlu_multiplier=mlu_multiplier, k_cluster=5)
	
