'''
# Reponsible to reading in timeseries files from each fabric, and plots them and also exports them as pgfplots
'''
## 3 main types of plots for mlu that we need just in case: decay rate, cdf of performances, percentile (sorted) 
import os, sys
import matplotlib.pyplot as plt
import numpy as np

sys.path.append("../")
from aurora_network import *
import proto.timeseries_performance_reader as performance_reader

'''
Utility functions
(START)
'''

## given an array of numbers, returns its CDF from 0 to 1
def get_normalized_cdf(array_of_values, nbins):
	# bin_edges
	sorted_input_array = sorted(array_of_values)
	bin_edges = [0] * nbins
	bin_edges[nbins - 1] = sorted_input_array[-1]
	bin_edges[0] = sorted_input_array[0]
	diff = sorted_input_array[-1] - sorted_input_array[0]
	if diff == 0:
		bin_edges = [sorted_input_array[0]] * nbins
		normalized_hist = [0] * nbins
		normalized_hist[-1] = 1
		return (bin_edges, normalized_hist)
	for i in range(1, nbins - 1, 1):
		bin_edges[i] = (float(i) / (nbins - 1) *  diff) + sorted_input_array[0]
	hist = [0] * nbins
	for value in sorted_input_array[1:-1]:
		index = int((value - sorted_input_array[0]) / diff * (nbins - 1))
		hist[index] += 1
	hist[-1] = 1
	histogram_sum = float(len(array_of_values))
	hist_cumsum = np.cumsum(hist)
	normalized_hist_cumsum = list([float(x)/histogram_sum for x in hist_cumsum])
	return (bin_edges, normalized_hist_cumsum)

#### Writing the timeseries to a variety of txt formats unstandable by txt
## Given a timeseries, downsamples it by taking every few samples
def downsample(timeseries, nsamples):
	orig_len = len(timeseries)
	assert(orig_len >= nsamples)
	final_downsampled_timeseries = [0] * nsamples
	factor = float(orig_len - 1)/float(nsamples - 1)
	for i in range(nsamples):
		index = int(i * factor)
		final_downsampled_timeseries[i] = timeseries[index]
	return final_downsampled_timeseries


'''
Reads in the network id txt files for topology
'''
def read_network_id_files(filename):
	network_id_set = set()
	with open(filename, 'r') as f:
		for line in f:
			content = line.strip()
			if len(content) > 0:
				network_id_set.add(long(content))
	return network_id_set

'''
Utility functions
(END)
'''

'''
Exporting common stats like mlu, lu90, lu50, hop count to txt files
Note that competitive ratio is not included at the moment
(START)
'''
## exports the sorted percentile file
def export_percentile_pgffile(export_filename, performance_timeseries, nbins=101):
	# first sort through each performance
	# the format of performance timeseries is : mlu, lu90, lu50, ahc, link_utilization_distribution
	str_builder = "percentile mlu lu90 lu50 ave_hop_count \n"
	mlu_timeseries = [x[0] for x in performance_timeseries]
	lu90_timeseries = [x[1] for x in performance_timeseries]
	lu50_timeseries = [x[2] for x in performance_timeseries]
	ahc_timeseries = [x[3] for x in performance_timeseries]
	competitive_ratio_timeseries = [x[4] for x in performance_timeseries]
	mlu_timeseries = downsample(sorted(mlu_timeseries), nbins)
	lu90_timeseries = downsample(sorted(lu90_timeseries), nbins)
	lu50_timeseries = downsample(sorted(lu50_timeseries), nbins)
	ahc_timeseries = downsample(sorted(ahc_timeseries), nbins)
	competitive_ratio_timeseries = downsample(sorted(competitive_ratio_timeseries), nbins)
	xbins = range(nbins)
	str_builder = "x_range mlu lu90 lu50 ahc competitive_ratio \n"
	for i in range(nbins):
		str_builder += "{} {} {} {} {} {} \n".format(xbins[i], mlu_timeseries[i], lu90_timeseries[i], lu50_timeseries[i], ahc_timeseries[i], competitive_ratio_timeseries[i])
	with open(export_filename, "w+") as f:
		f.write(str_builder)
	return

## decay rate for mlu only
def export_decay_rate_pgffile(export_filename, performance_timeseries, nbins=101):
	## export non log for now
	mlu_timeseries = [x[0] for x in performance_timeseries]
	#lu90_timeseries = [x[1] for x in performance_timeseries]
	#lu50_timeseries = [x[2] for x in performance_timeseries]
	#ahc_timeseries = [x[3] for x in performance_timeseries]
	#competitive_ratio_timeseries = [x[4] for x in performance_timeseries]
	## step 1 : find out the CDF of performances
	mlu_edges, mlu_cdf  = get_normalized_cdf(mlu_timeseries, nbins)
	mlu_decay_rate = [1. - x for x in mlu_cdf]
	#lu90_edges, lu90_cdf = get_normalized_cdf(lu90_timeseries, nbins)
	#lu90_decay_rate = [1. - x for x in lu90_cdf]
	#lu50_edges, lu50_cdf = get_normalized_cdf(lu50_timeseries, nbins)
	#lu50_decay_rate = [1. - x for x in lu50_cdf]
	#ahc_edges, ahc_cdf = get_normalized_cdf(ahc_timeseries, nbins)
	#ahc_decay_rate = [1. - x for x in ahc_cdf]
	#competitive_ratio_edges, competitive_ratio_cdf = get_normalized_cdf(competitive_ratio_timeseries, nbins)
	#competitive_ratio_decay_rate = [1. - x for x in competitive_ratio_cdf]
	#str_builder = "mlu_x mlu_y lu90_x lu90_y lu50_x lu50_y ahc_x ahc_y competitive_ratio_x competitive_ratio_y \n"
	str_builder = "mlu_x mlu_y \n"
	#for row in range(len(mlu_edges - 1)): ## remove the final entry, which could be zero and that forces the log value of a 0 to be weird
	#	str_builder += "{} {} \n".format(mlu_edges[row], mlu_decay_rate[row], lu90_edges[row], lu90_decay_rate[row], lu50_edges[row], lu50_decay_rate[row], ahc_edges[row], ahc_decay_rate[row], competitive_ratio_edges[row], competitive_ratio_decay_rate[row])
	for row in range(len(mlu_edges)): ## remove the final entry, which could be zero and that forces the log value of a 0 to be weird
		str_builder += "{} {} \n".format(mlu_edges[row], mlu_decay_rate[row])
	with open(export_filename, "w+") as f:
		f.write(str_builder)
	return


## jason :: what about performance ratio?
def export_performance_cdf_pgffile(export_filename, performance_timeseries, nbins=101):
	mlu_timeseries = [x[0] for x in performance_timeseries]
	lu90_timeseries = [x[1] for x in performance_timeseries]
	lu50_timeseries = [x[2] for x in performance_timeseries]
	ahc_timeseries = [x[3] for x in performance_timeseries]
	competitive_ratio_timeseries = [x[4] for x in performance_timeseries]

	## step 1 : find out the CDF of performances
	mlu_edges, mlu_cdf  = get_normalized_cdf(mlu_timeseries, nbins)
	mlu_cdf[-1] = 1.
	lu90_edges, lu90_cdf = get_normalized_cdf(lu90_timeseries, nbins)
	lu90_cdf[-1] = 1.
	lu50_edges, lu50_cdf = get_normalized_cdf(lu50_timeseries, nbins)
	lu50_cdf[-1] = 1.
	ahc_edges, ahc_cdf = get_normalized_cdf(ahc_timeseries, nbins)
	ahc_cdf[-1] = 1.
	competitive_ratio_edges, competitive_ratio_cdf = get_normalized_cdf(competitive_ratio_timeseries, nbins)
	competitive_ratio_cdf[-1] = 1.
	str_builder = "mlu_x mlu_y lu90_x lu90_y lu50_x lu50_y ahc_x ahc_y competitive_ratio_x competitive_ratio_y \n"
	for row in range(len(mlu_edges)):
		str_builder += "{} {} {} {} {} {} {} {} {} {} \n".format(mlu_edges[row], mlu_cdf[row], lu90_edges[row], lu90_cdf[row], lu50_edges[row], lu50_cdf[row], ahc_edges[row], ahc_cdf[row], competitive_ratio_edges[row], competitive_ratio_cdf[row])
	with open(export_filename, "w+") as f:
		f.write(str_builder)
	return

'''
(END)
Exporting common stats like mlu, lu90, lu50, hop count to txt files
'''
def process_performance_timeseries(performance_timeseries, performance_timestamps, ideal_performance_timeseries, ideal_performance_timestamps, aggregation_window, mlu_shift=0, ahc_shift=0, lu90_shift=0, lu50_shift=0):
	performance_timeseries_length = len(performance_timeseries)
	assert(len(performance_timestamps) == performance_timeseries_length)
	assert(len(ideal_performance_timeseries) == len(ideal_performance_timestamps))
	assert(len(ideal_performance_timestamps) >= performance_timeseries_length)
	ideal_performance_index_shift = int((performance_timestamps[0] - ideal_performance_timestamps[0]) / aggregation_window)
	reshifted_performance_timeseries = [None] * len(performance_timeseries)
	for time_index in range(performance_timeseries_length):
		mlu, lu90, lu50, ahc, _ = performance_timeseries[time_index]
		mlu_competitive_ratio = mlu / ideal_performance_timeseries[ideal_performance_index_shift + time_index][0]
		reshifted_performance_timeseries[time_index] = (mlu + mlu_shift, lu90 + lu90_shift, lu50 + lu50_shift, ahc + ahc_shift, mlu_competitive_ratio)
	return reshifted_performance_timeseries




if __name__ == "__main__":
	
	## decide on the cluster name here
	#cluster_name = "combined"
	cluster_name = "C"
	cluster_alias = "database"
	aggregation_window = 1
	## simply change the clusters here
	if cluster_name == "A":
		cluster_alias = "database"
	elif cluster_name == "B":
		cluster_alias = "web"
	elif cluster_name == "C":
		cluster_alias = "hadoop"
	elif cluster_name == "combined":
		cluster_alias = "combined"
	elif cluster_name == "combinedclique":
		cluster_alias = "combinedclique"
	else:
		print("Unrecognized ")
		sys.exit()
	tm_snapshots_protobuf_filename = "/Users/minyee/src/facebook_dcn_traffic/traffic_matrices/cluster{}/{}_aggregationwindow_{}.pb".format(cluster_name, cluster_alias, aggregation_window)
	valid_network_ids_filename = "/Users/minyee/src/facebook_dcn_traffic/traffic_matrices/cluster{}/cluster{}_pods.txt".format(cluster_name, cluster_name)
	print("Running Analysis Tool for DCN : {}".format(cluster_name))
	valid_network_ids = read_network_id_files(valid_network_ids_filename)
	number_of_pods = len(valid_network_ids)
	nblocks = number_of_pods

	## Topology link number 
	per_node_pair_num_links = 15
	link_capacity = 5

	block_params = {BlockType.SUPERBLOCK : {}, BlockType.BORDER_ROUTER : {}}
	block_params[BlockType.SUPERBLOCK]["link capacity"] = float(link_capacity)
	block_params[BlockType.SUPERBLOCK]["num links"] = float((number_of_pods - 1) * per_node_pair_num_links)
	block_params[BlockType.BORDER_ROUTER]["link capacity"] = float(link_capacity) # in gbps
	block_params[BlockType.BORDER_ROUTER]["num links"] = float((number_of_pods - 1) * per_node_pair_num_links)

	## Declare the aurora network
	block_names_list = ["ju{}".format(x) for x in range(1, number_of_pods + 1, 1)]
	aurora_network = AuroraNetwork("fb_cluster_{}".format(cluster_name), block_params, block_names_list)
	dcn_name = aurora_network.get_dcn_name()
	pb_files_to_import_raw = [#'agg1_crit_rmtoesensecrit_r1t7200k1_rmtesenscrit_r1t7200k1',
								#'agg1_crit_rmtoesensecrit_r1t7200k3_rmtesenscrit_r1t7200k3',
								#'agg1_crit_rmtoesensecrit_r1t7200k5_rmtesenscrit_r1t7200k5',
								#'agg1_crit_rmtoesensecrit_r1t7200k7_rmtesenscrit_r1t7200k7',
								#'agg1_crit_rmtoesensecrit_r1t7200k9_rmtesenscrit_r1t7200k9',
								#'agg1_crit_static_rmtesenscrit_r1t7200k1',
								#'agg1_crit_static_rmtesenscrit_r1t7200k3',
								#'agg1_crit_static_rmtesenscrit_r1t7200k5',
								#'agg1_crit_static_rmtesenscrit_r1t7200k7',
								#'agg1_crit_static_rmtesenscrit_r1t7200k9',
								#'agg1_crit_static_vlb',
								#'agg1_static_directpathonly',
								#'agg1_critk1_directpathonly',
								#'agg1_critk3_directpathonly',
								#'agg1_critk5_directpathonly',
								#'agg1_critk7_directpathonly',
								#'agg1_critk9_directpathonly',
								'fattree_ecmp_taper0p100',
								'fattree_ecmp_taper0p200',
								'fattree_ecmp_taper0p250',
								'fattree_ecmp_taper0p500',
								'fattree_ecmp_taper1p000',
								'ideal_performance_direct_hop_agg1.pb',
								]
	k = 5
	
	pb_files_to_import_raw = [#'online/reconfiglat500ms_rmtoesensecrit_r3600t300k{}_rmtesenscrit_r3600t300k{}'.format(k,k),
								#'online/reconfiglat500ms_rmtoesensecrit_r3600t300k{}_rmtesenscrit_r3600t300k{}'.format(k,k),
								#'online/reconfiglat500ms_rmtoesensecrit_r7200t300k{}_rmtesenscrit_r7200t300k{}'.format(k,k),
								#'online/reconfiglat000s_rmtoesensecrit_r3600t300k{}_rmtesenscrit_r3600t300k{}'.format(k,k),
								#'online/reconfiglat000s_rmtoesensecrit_r30t300k{}_rmtesenscrit_r30t300k{}'.format(k,k),
								#'online/reconfiglat000s_rmtoesensecrit_r3600t300k{}_rmtesenscrit_r3600t300k{}'.format(k,k),
								#'online/reconfiglat000s_rmtoesensecrit_r7200t300k{}_rmtesenscrit_r7200t300k{}'.format(k,k),
								#'online/reconfiglat000s_direct_toe_r300t300k1',
								#'agg1_crit_rmtoesensecrit_r1t7200k3_rmtesenscrit_r1t7200k3',
								#'agg1_crit_rmtoesensecrit_r300t300k1_rmtesenscrit_r300t300k1',
								#'agg1_crit_rmtoesensecrit_r300t300k3_rmtesenscrit_r300t300k3',
								#'agg1_crit_rmtoesensecrit_r300t300k5_rmtesenscrit_r300t300k5',
								#'agg1_crit_rmtoesensecrit_r300t300k7_rmtesenscrit_r300t300k7',
								#'agg1_crit_rmtoesensecrit_r300t300k9_rmtesenscrit_r300t300k9',
								#'agg1_crit_static_rmtesenscrit_r1t7200k1',
								#'agg1_crit_static_rmtesenscrit_r1t7200k3',
								#'agg1_crit_static_rmtesenscrit_r1t7200k5',
								#'agg1_crit_static_rmtesenscrit_r1t7200k7',
								#'agg1_crit_static_rmtesenscrit_r1t7200k9',
								#'agg1_crit_static_vlb',
								'agg1_static_directpathonly',
								'agg1_critk1_directpathonly',
								'agg1_critk3_directpathonly',
								'agg1_critk5_directpathonly',
								'agg1_critk7_directpathonly',
								'agg1_critk9_directpathonly',
								]
	ideal_performance_pb_filename = 'ideal_performance_direct_hop_agg{}.pb'.format(aggregation_window)
	if ideal_performance_pb_filename not in pb_files_to_import_raw:
		print("Manually including ideal performance")
		pb_files_to_import_raw.append(ideal_performance_pb_filename)

	pb_files_to_import_processed = ["./{}/{}".format(dcn_name, x) for x in pb_files_to_import_raw]
	for processed_pb_filename in pb_files_to_import_processed:
		if not os.path.isfile(processed_pb_filename):
			raise Exception("File is not present : {}".format(processed_pb_filename))
	
	pb_file_mlu_shift_options = {}
	# this version for combinedclique
	pb_file_ahc_shift_options = {'agg1_crit_rmtoesensecrit_r1t7200k1_rmtesenscrit_r1t7200k1' : -0.51,
								'agg1_crit_rmtoesensecrit_r1t7200k3_rmtesenscrit_r1t7200k3' : -0.49,
								'agg1_crit_rmtoesensecrit_r1t7200k5_rmtesenscrit_r1t7200k5' : -0.48,
								'agg1_crit_rmtoesensecrit_r1t7200k7_rmtesenscrit_r1t7200k7' : -0.49,
								'agg1_crit_rmtoesensecrit_r1t7200k9_rmtesenscrit_r1t7200k9' : -0.49,
								'agg1_crit_static_rmtesenscrit_r1t7200k1' : -0.20,
								'agg1_crit_static_rmtesenscrit_r1t7200k3' : -0.22,
								'agg1_crit_static_rmtesenscrit_r1t7200k5' : -0.23,
								'agg1_crit_static_rmtesenscrit_r1t7200k7' : -0.23,
								'agg1_crit_static_rmtesenscrit_r1t7200k9' : -0.23,
								}
	# this version for A
	pb_file_ahc_shift_options = {'agg1_crit_rmtoesensecrit_r1t7200k1_rmtesenscrit_r1t7200k1' : 0.06,
								'agg1_crit_rmtoesensecrit_r1t7200k3_rmtesenscrit_r1t7200k3' : 0.07,
								'agg1_crit_rmtoesensecrit_r1t7200k5_rmtesenscrit_r1t7200k5' : 0.07,
								'agg1_crit_rmtoesensecrit_r1t7200k7_rmtesenscrit_r1t7200k7' : 0.08,
								'agg1_crit_rmtoesensecrit_r1t7200k9_rmtesenscrit_r1t7200k9' : 0.075,
								'agg1_crit_static_rmtesenscrit_r1t7200k1' : 0.24,
								'agg1_crit_static_rmtesenscrit_r1t7200k3' : 0.25,
								'agg1_crit_static_rmtesenscrit_r1t7200k5' : 0.256,
								'agg1_crit_static_rmtesenscrit_r1t7200k7' : 0.246,
								'agg1_crit_static_rmtesenscrit_r1t7200k9' : 0.267,}
	# this version for C
	
	pb_file_ahc_shift_options = {'agg1_crit_static_rmtesenscrit_r1t7200k1' : 0.15,
								'agg1_crit_static_rmtesenscrit_r1t7200k3' : 0.17,
								'agg1_crit_static_rmtesenscrit_r1t7200k5' : 0.18,
								'agg1_crit_static_rmtesenscrit_r1t7200k7' : 0.185,
								'agg1_crit_static_rmtesenscrit_r1t7200k9' : 0.18,}		
	pb_file_ahc_shift_options = {}			
	# this version for combined
	'''
	pb_file_ahc_shift_options = {'agg1_crit_rmtoesensecrit_r1t7200k1_rmtesenscrit_r1t7200k1' : -0.08,
								'agg1_crit_rmtoesensecrit_r1t7200k3_rmtesenscrit_r1t7200k3' : -0.06,
								'agg1_crit_rmtoesensecrit_r1t7200k5_rmtesenscrit_r1t7200k5' : -0.07,
								'agg1_crit_rmtoesensecrit_r1t7200k7_rmtesenscrit_r1t7200k7' : -0.06,
								'agg1_crit_rmtoesensecrit_r1t7200k9_rmtesenscrit_r1t7200k9' : -0.05,
								'agg1_crit_static_rmtesenscrit_r1t7200k1' : 0.06,
								'agg1_crit_static_rmtesenscrit_r1t7200k3' : 0.07,
								'agg1_crit_static_rmtesenscrit_r1t7200k5' : 0.09,
								'agg1_crit_static_rmtesenscrit_r1t7200k7' : 0.09,
								'agg1_crit_static_rmtesenscrit_r1t7200k9' : 0.11,}			
	'''		
	pb_file_lu90_shift_options = {}
	pb_file_lu50_shift_options = {}
	aliases = ['Every Day',
				'Every hour',
				'Every 5 mins',
				'Ideal',
				]
	aliases = pb_files_to_import_raw
	loaded_performance_timeseries = []
	loaded_timestamps = []
	## processed files indicate those with the directories attached, while raw does not
	## Start by loading files
	for pb_file_processed, pb_file_raw in zip(pb_files_to_import_processed, pb_files_to_import_raw):
		print("Loading... {}".format(pb_file_raw))
		performance_timeseries, timestamps = performance_reader.import_timeseries_protobuf(pb_file_processed, aurora_network)
		loaded_performance_timeseries.append(performance_timeseries)
		loaded_timestamps.append(timestamps)
		print("Completed Loading... {}".format(pb_file_raw))

	print("\n\nFinished loading all files...\n\n")
	ideal_performance_timeseries = loaded_performance_timeseries[-1]
	ideal_performance_timestamps = loaded_timestamps[-1]
	processed_performance_timeseries = []
	## Next, process files by loading files
	for pb_file_processed, pb_file_raw, performance_timeseries, performance_timestamps in zip(pb_files_to_import_processed, pb_files_to_import_raw, loaded_performance_timeseries, loaded_timestamps):
		print("Processing... {}".format(pb_file_raw))
		mlu_shift_val = 0
		if pb_file_raw in pb_file_mlu_shift_options:
			mlu_shift_val = pb_file_mlu_shift_options[pb_file_raw]
		lu90_shift_val = 0
		if pb_file_raw in pb_file_lu90_shift_options:
			lu90_shift_val = pb_file_lu90_shift_options[pb_file_raw]
		lu50_shift_val = 0
		if pb_file_raw in pb_file_lu50_shift_options:
			lu50_shift_val = pb_file_lu50_shift_options[pb_file_raw]
		ahc_shift_val = 0
		if pb_file_raw in pb_file_ahc_shift_options:
			ahc_shift_val = pb_file_ahc_shift_options[pb_file_raw]
		processed_and_shifted_performance_timeseries = process_performance_timeseries(performance_timeseries,
																					performance_timestamps,
																					ideal_performance_timeseries,
																					ideal_performance_timestamps,
																					aggregation_window,
																					mlu_shift=mlu_shift_val, 
																					ahc_shift=ahc_shift_val, 
																					lu90_shift=lu90_shift_val, 
																					lu50_shift=lu50_shift_val)
		processed_performance_timeseries.append(processed_and_shifted_performance_timeseries)
		print("Done.")
	
	## Finally process the files
	if not os.path.isdir("./fb_summary_results"):
		os.mkdir("./fb_summary_results")
	if not os.path.isdir("./fb_summary_results/{}".format(dcn_name)):
		os.mkdir("./fb_summary_results/{}".format(dcn_name))
	export_txt_directory = "./fb_summary_results/{}".format(dcn_name)
	for pb_file_processed, pb_file_raw, performance_timeseries in zip(pb_files_to_import_processed, pb_files_to_import_raw, processed_performance_timeseries):
		export_txt_filename = pb_file_raw.split('.')[0]
		export_txt_filename = export_txt_filename.strip()
		export_percentile_pgffile(export_txt_directory + "/" + export_txt_filename + "_percentile.txt", performance_timeseries)
		export_decay_rate_pgffile(export_txt_directory + "/" + export_txt_filename + "_decayrate.txt", performance_timeseries)
		export_performance_cdf_pgffile(export_txt_directory + "/" + export_txt_filename + "_cdf.txt", performance_timeseries)
	

	
	fig = plt.figure()
	index = 0
	for performance_timeseries in processed_performance_timeseries:
		mlu_timeseries = None
		mlu_timeseries = [x[0] for x in performance_timeseries]
		plt.plot(sorted(mlu_timeseries))
		#plt.plot(mlu_timeseries)
		index += 1
	plt.legend(aliases)

	fig = plt.figure()
	bar_vals = []
	x_vals = range(len(processed_performance_timeseries))
	for performance_timeseries in processed_performance_timeseries:
		mlu_timeseries = sorted([x[0] for x in performance_timeseries])
		p99_val = mlu_timeseries[int(0.99 * len(mlu_timeseries))]
		p999_val = mlu_timeseries[int(0.999 * len(mlu_timeseries))]
		bar_vals.append(p99_val)
	plt.bar(x_vals, bar_vals)

	fig = plt.figure()
	for performance_timeseries in processed_performance_timeseries:
		competitive_ratio = sorted([x[4] for x in performance_timeseries])
		plt.plot(sorted(competitive_ratio))
	plt.legend(aliases)


	fig = plt.figure()
	index = 0
	for performance_timeseries in processed_performance_timeseries:
		ahc_timeseries = None
		#if index in (2,3): 
		#	mlu_timeseries = [x[0] * 1.05 for x in performance_timeseries]
		#else:
		ahc_timeseries = [x[3] for x in performance_timeseries]
		plt.plot(sorted(ahc_timeseries))
		index += 1
	plt.legend(aliases)
	

	plt.show()

	exit()



	