'''
Handles the ToE and TE simulation for Facebook's DCN traces with offline training. Meaning that there is no
traffic uncertainty involved in the evaluations. The training window is the exact same as the evaluation window
'''
import sys, os, copy
sys.path.append("..")
from path_selector import *
from aurora_network import *
import topology_engineering.ideal_topology_engineer as ideal_toe
import traffic_engineering.ideal_traffic_engineer as ideal_te
import facebook_dcn_traffic.utility
import CritMat.critmat as critical_matrix_module
from evaluation.performance_evaluator import *
from multiprocessing import Pool
from proto import *

from topology_engineering import *
from traffic_engineering import *

## import for plotting uses 
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.image as img
from matplotlib import cm
import seaborn as sns

## Imports the multiprocessing modules
from multiprocessing import Pool          

## The flag used for determining whether if we try to read results from directory or otherwise
OVERRIDE_ALL_FILES = False
NUMBER_OF_PROCESSES = 6


def flatten_traffic_matrix(tm, num_nodes):
	vector = []
	for i in range(num_nodes):
		for j in range(num_nodes):
			if i != j:
				vector.append(tm[i][j])
	return np.array(vector)

def unflatten_traffic_vector(vector, nblocks):
	tm = np.zeros((nblocks, nblocks))
	offset = 0
	for i in range(nblocks):
		for j in range(nblocks):
			if i != j:
				tm[i][j] = vector[offset]
				offset += 1
	return tm

def read_in_facebook_traffic(trace_file_name, network_ids_filename, aggregation_window):
	valid_network_ids = facebook_dcn_traffic.utility.read_in_network_ids(network_ids_filename)
	traffic_matrices, timestamps = facebook_dcn_traffic.utility.read_traffic_matrix_protobuf(trace_file_name, considered_network_id=valid_network_ids, return_timestamps=True)
	print("Number of traffic matrices : {}".format(len(traffic_matrices)))
	for tm in traffic_matrices:
		for i in range(len(tm)):
			for j in range(len(tm)):
				tm[i][j] = float(tm[i][j]) * 8 / 1E6 / float(aggregation_window)
	return traffic_matrices, valid_network_ids, timestamps

def dump_timeseries_performance_to_protobuf(export_filename, timestamps, performance_timeseries_list):
	assert(len(timestamps) == len(performance_timeseries_list))
	import proto.timeseries_performance_pb2 as performance_pb
	performance_timeseries = performance_pb.PerformanceTimeSeries()
	for ts, performances in zip(timestamps, performance_timeseries_list):
		entry = performance_timeseries.entries.add()
		lu_distribution = performances[4]
		entry.timestamp = ts
		entry.average_hop_count = performances[3]
		for lu, num_links, i, j in lu_distribution:
			lu_proto = entry.link_utilizations.add()
			lu_proto.src = i
			lu_proto.dst = j
			lu_proto.link_utilization = lu
			lu_proto.num_links = num_links
	with open(export_filename, "wb") as f:
		f.write(performance_timeseries.SerializeToString())
	return

def generate_decay_rate(timeseries_performance, nbins=100):
	hist, bin_edges = np.histogram(timeseries_performance,bins=nbins)
	hist_sum = sum(hist)
	hist = [float(x)/hist_sum for x in hist]
	hist_cumsum = np.cumsum(hist)
	inverse_cumsum = [1. - x for x in hist_cumsum]
	inverse_cumsum = [1] + inverse_cumsum
	return inverse_cumsum, bin_edges

def downsample(timeseries, nsamples):
	sorted_timeseries = sorted(timeseries)
	orig_len = len(sorted_timeseries)
	assert(orig_len >= nsamples)
	final_downsampled_timeseries = [0] * nsamples
	factor = float(orig_len - 1)/float(nsamples - 1)
	for i in range(nsamples):
		index = int(i * factor)
		final_downsampled_timeseries[i] = sorted_timeseries[index]
	return final_downsampled_timeseries

## Evaluates and returns the results, but if override is not enabled and results file already exists, then just load from file
def evaluate_performance_worker(aurora_network, 
								all_paths,
								evaluation_window,
								eval_traffic_matrices,
								logical_topology_routing_weights_pairs):
	## Initially find the representative traffic matrices
	nblocks = aurora_network.get_num_blocks()
	
	mlu_performance_timeseries = [0] * (evaluation_window[1] - evaluation_window[0] + 1)
	## Start evaluation - check the ToE case or no ToE case, if no ToE 
	## first, go over the static routing and topology pairs to evaluate performance
	## then, for the same topology, go over all the ideal routing performance
	ideal_te = ideal_traffic_engineer.IdealTrafficEngineer(aurora_network, all_paths)
	final_mlu_timeseries = []
	config_id = 1
	for logical_topology_adj_matrix, routing_weights in logical_topology_routing_weights_pairs:
		static_routing_mlu = [0] * (evaluation_window[1] - evaluation_window[0] + 1)
		perfect_routing_mlu = [0] * (evaluation_window[1] - evaluation_window[0] + 1)
		for tm_index in range(evaluation_window[0], evaluation_window[1] + 1, 1):
			mlu, lu90, lu50, ave_hop_count, lu_distribution = evaluate_traffic_matrix_performance(aurora_network, 
																									eval_traffic_matrices[tm_index], 
																									logical_topology_adj_matrix, 
																									routing_weights)
			static_routing_mlu[tm_index - evaluation_window[0]] = mlu
			ideal_routing_weights = ideal_te.compute_path_weights(logical_topology_adj_matrix, eval_traffic_matrices[tm_index])
			mlu, lu90, lu50, ave_hop_count, lu_distribution = evaluate_traffic_matrix_performance(aurora_network, 
																									eval_traffic_matrices[tm_index], 
																									logical_topology_adj_matrix, 
																									ideal_routing_weights)
			perfect_routing_mlu[tm_index - evaluation_window[0]] = mlu
		final_mlu_timeseries.append(static_routing_mlu)
		final_mlu_timeseries.append(perfect_routing_mlu)
		print("config_id {} max static mlu {}".format(config_id, max(static_routing_mlu)))
		print("config_id {} max perfect mlu {}".format(config_id, max(perfect_routing_mlu)))
		config_id += 1
	return final_mlu_timeseries


def unpack_arguments(args):
	aurora_network = args[0]
	all_paths = args[1]
	eval_window = args[2]
	eval_traffic_matrices = args[3]
	logical_topology_routing_weights_pairs = args[4]
	mlu_perf = evaluate_performance_worker(aurora_network, 
											all_paths,
											eval_window,
											eval_traffic_matrices,
											logical_topology_routing_weights_pairs)
	return mlu_perf


if __name__ == "__main__":
	print("Evaluation suite for FB's DCN interpod traces")
	if len(sys.argv) > 1:
		for argument in sys.argv[1:]:
			if argument[:2] == "-f":
				OVERRIDE_ALL_FILES = True

	## decide on the cluster name here
	cluster_name = "B"
	cluster_alias = "database"
	aggregation_window = 1
	## simply change the clusters here
	if cluster_name == "A":
		cluster_alias = "database"
	elif cluster_name == "B":
		cluster_alias = "web"
	elif cluster_name == "C":
		cluster_alias = "hadoop"
	elif cluster_name == "combined":
		cluster_alias = "combined"
	else:
		print("Unrecognized ")
		sys.exit()
	tm_snapshots_protobuf_filename = "/Users/minyee/src/facebook_dcn_traffic/traffic_matrices/cluster{}/{}_aggregationwindow_{}.pb".format(cluster_name, cluster_alias, aggregation_window)
	valid_network_ids_filename = "/Users/minyee/src/facebook_dcn_traffic/traffic_matrices/cluster{}/cluster{}_pods.txt".format(cluster_name, cluster_name)

	reconfiguration_period = 300 # reconfiguration periodicity in seconds
	assert(reconfiguration_period >= 1)

	print("Reading facebook dcn {} cluster traces...".format(cluster_alias))
	traffic_matrices, valid_network_ids, timestamps = read_in_facebook_traffic(tm_snapshots_protobuf_filename, valid_network_ids_filename, aggregation_window)
	traffic_matrices = traffic_matrices[1:-1]
	timestamps = timestamps[1:-1]
	print("Reading facebook cluster... COMPLETE")
	number_of_pods = len(valid_network_ids)
	nblocks = number_of_pods

	## Topology link number 
	per_node_pair_num_links = 15
	link_capacity = 5

	block_params = {BlockType.SUPERBLOCK : {}, BlockType.BORDER_ROUTER : {}}
	block_params[BlockType.SUPERBLOCK]["link capacity"] = float(link_capacity)
	block_params[BlockType.SUPERBLOCK]["num links"] = float((number_of_pods - 1) * per_node_pair_num_links)
	block_params[BlockType.BORDER_ROUTER]["link capacity"] = float(link_capacity) # in gbps
	block_params[BlockType.BORDER_ROUTER]["num links"] = float((number_of_pods - 1) * per_node_pair_num_links)

	## Declare the aurora network
	block_names_list = ["ju{}".format(x) for x in range(1, number_of_pods + 1, 1)]
	aurora_network = AuroraNetwork("fb_cluster_{}".format(cluster_name), block_params, block_names_list)
	## find the direct and indirect paths
	path_selector = PathSelector(aurora_network, use_multihop=True)
	all_paths = path_selector.get_all_paths()

	# first check if we need to override all files, if not then check if this file already existed
	dcn_name = aurora_network.get_dcn_name()

	## now start forming the reconfiguration windows
	num_traffic_snapshots = len(traffic_matrices)
	starting_point = int((float(12 - 7) / 24) * num_traffic_snapshots)
	training_length = 2 * 60 * 60
	training_length = 80
	training_traffic_matrices = traffic_matrices[starting_point : starting_point + training_length + 1]
	training_traffic_vectors = [flatten_traffic_matrix(x, nblocks) for x in training_traffic_matrices]
	eval_traffic_matrices = training_traffic_matrices
	

	
	## Define the number of representative traffic matrices (i.e. number of points to describe the hull)
	num_ks = [training_length]
	
	## Revised WCMP based topology engineering and traffic engineering
	'''
	bounded_wcmp_toe = bounded_wcmp_topology_engineer_strong.BoundedWCMPTopologyEngineerStrong(aurora_network, 
																								reconfiguration_period,
																								reconfiguration_period, 
																								all_paths, 
																								traffic_matrices)
	bounded_wcmp_te = bounded_wcmp_traffic_engineer_strong.BoundedWCMPTrafficEngineerStrong(aurora_network, 
																							all_paths, 
																							reconfiguration_period, 
																							reconfiguration_period)
	bwcmp_arguments = (aurora_network, all_paths, bounded_wcmp_toe, bounded_wcmp_te, 1, OVERRIDE_ALL_FILES, False)
	'''
	evaluation_parameters_and_arguments_lists = []
	all_logical_topology_routing_weights_pairs = []
	for k in num_ks:
		## Define the Topology Engineer algorithm	
		training_traffic_vectors = [flatten_traffic_matrix(x, nblocks) for x in training_traffic_matrices]
		crit_mat = critical_matrix_module.CritMat(training_traffic_vectors, None, critical_or_average="critical")
		representative_traffic_vectors = crit_mat.train(number_of_clusters=max(k, 1))
		representative_traffic_matrices = [unflatten_traffic_vector(vector, nblocks) for vector in representative_traffic_vectors]
		toe = robust_multi_traffic_topology_engineer_sensitivity.RobustMultiTrafficTopologyEngineerSensitivity(aurora_network, 
																												reconfiguration_period, 
																												reconfiguration_period,
																												all_paths, 
																												traffic_matrices, 
																												k, 
																												minimize_multihop=True, 
																												lower_bound_sensitivity=0.009, 
																												upper_bound_sensitivity_relaxation=1)
		toe = multi_cluster_toe_sensitivity_critical_demands.MultiTrafficTOESensitivityCriticalDemands(aurora_network, 
																										reconfiguration_period, 
																										reconfiguration_period, 
																										all_paths, 
																										traffic_matrices, 
																										k)

		## Define the Traffic Engineer algorithm for the topology engineering class
		te = robust_multi_cluster_traffic_engineer_sensitivity.RobustMultiClusterTrafficEngineerSensitivity(aurora_network, 
																											all_paths, 
																											reconfiguration_period, 
																											reconfiguration_period, 
																											k, 
																											reduce_multihop=True, 
																											sensitivity_relaxation=1, 
																											minimum_sensitivity=0.009)
		te = multi_cluster_sensitivity_critical_demands.MultiClusterTESensitivityCriticalDemands(aurora_network, 
																								all_paths, 
																								reconfiguration_period, 
																								reconfiguration_period, 
																								k, 
																								reduce_multihop=True)
		te2 = multi_cluster_sensitivity_critical_demands.MultiClusterTESensitivityCriticalDemands(aurora_network, 
																								all_paths, 
																								reconfiguration_period, 
																								reconfiguration_period, 
																								k, 
																								reduce_multihop=True,
																								mlu_relaxation=1.1)
		toe_multi_routing_weights = multi_traffic_topology_engineer_many_routing_weight_sets.MultiTrafficTopologyEngineerDifferentRoutingWeightSets(aurora_network, 
																																					reconfiguration_period, 
																																					reconfiguration_period, 
																																					all_paths, 
																																					eval_traffic_matrices, 
																																					k)
		
		single_traffic_weight_logical_topology, _ = toe.topology_engineer_given_representative_TMs(copy.deepcopy(representative_traffic_matrices), all_paths)
		
		multi_traffic_weight_logical_topology, _ = toe_multi_routing_weights.topology_engineer_given_representative_TMs(copy.deepcopy(representative_traffic_matrices), all_paths)
		print("k - {} Starting with single traffic weight optimization".format(k))
		single_traffic_weight_routing_weight = te.compute_path_weights(single_traffic_weight_logical_topology, copy.deepcopy(representative_traffic_matrices))
		print(single_traffic_weight_logical_topology)
		print(multi_traffic_weight_logical_topology)
		print("k - {} Starting with multi traffic weight optimization".format(k))
		multi_traffic_weight_routing_weight = te2.compute_path_weights(multi_traffic_weight_logical_topology, copy.deepcopy(representative_traffic_matrices))
		#te2 = robust_multi_cluster_traffic_engineer_sensitivity_automatic.RobustMultiClusterTrafficEngineerSensitivityAutomatic(aurora_network, all_paths, 10, 10, k, reduce_multihop=False, sensitivity_relaxation=1.2, minimum_sensitivity=0.0105)
		
		single_weight_topology_routing_pair = (single_traffic_weight_logical_topology, single_traffic_weight_routing_weight)
		multi_weight_topology_routing_pair = (multi_traffic_weight_logical_topology, single_traffic_weight_routing_weight)
		all_logical_topology_routing_weights_pairs += [single_weight_topology_routing_pair, multi_weight_topology_routing_pair]

	# start splitting the eval window of the workers
	num_eval_tms = len(eval_traffic_matrices)
	num_snapshots_per_worker = num_eval_tms / NUMBER_OF_PROCESSES
	leftover_snapshots = num_eval_tms % NUMBER_OF_PROCESSES
	offset = 0
	worker_arguments = []
	for worker_index in range(NUMBER_OF_PROCESSES):
		eval_window = None
		if leftover_snapshots > 0:
			eval_window = (offset, offset + num_snapshots_per_worker)
			leftover_snapshots -= 1
			offset += (num_snapshots_per_worker + 1)
		else:
			eval_window = (offset, offset + num_snapshots_per_worker - 1)
			offset += (num_snapshots_per_worker)
		worker_argument = (aurora_network, all_paths, eval_window, eval_traffic_matrices, all_logical_topology_routing_weights_pairs)
		worker_arguments.append(worker_argument)

	## Begin evaluation
	output_results = None
	final_combined_mlu_perf = None
	if NUMBER_OF_PROCESSES > 1:
		pool = Pool(processes=NUMBER_OF_PROCESSES)
		output_results = pool.map(unpack_arguments, worker_arguments)
		pool.close()
		pool.join()
		
	else:
		# go through the evaluations one by one
		output_results = [unpack_arguments(x) for x in worker_arguments]

	num_configurations = 2*len(all_logical_topology_routing_weights_pairs)
	final_combined_mlu_perf = [None] * num_configurations
	for config_id in range(num_configurations):
		final_combined_mlu_perf[config_id] = []
	for config_id in range(num_configurations):
		for worker_id in range(NUMBER_OF_PROCESSES):
			#final_combined_mlu_perf[config_id]
			#output_results[worker_id][config_id]
			final_combined_mlu_perf[config_id] += output_results[worker_id][config_id]

	fig = plt.figure()
	legends = ['ToE Single Routing Weight + TE', 'ToE Single Routing Weight + Perfect Routing', 'ToE Multi Routing Weights + TE', 'ToE Multi Routing Weights + Perfect Routing']
	aliases = ['single_static', 'single_ideal', 'multi_static', 'multi_ideal']
	to_export_results_decay_rate = {}
	to_export_results = {}
	for mlu_timeseries, index in zip(final_combined_mlu_perf, range(len(final_combined_mlu_perf))):
		probability, mlu_bin_edges = generate_decay_rate(mlu_timeseries, nbins=100)
		#plt.semilogy(mlu_bin_edges, probability)
		plt.plot(mlu_timeseries)
		to_export_results_decay_rate[aliases[index]] = (mlu_bin_edges, probability)
		#sorted_mlu_timeseries = sorted(mlu_timeseries)
		#downsampled_mlu_timeseries = downsample(sorted_mlu_timeseries, 100)
		#to_export_results[aliases[index]] = (range(len(downsampled_mlu_timeseries)), downsampled_mlu_timeseries)
		to_export_results[aliases[index]] = (range(len(mlu_timeseries)), mlu_timeseries)
	plt.legend(legends)
	plt.show()

	with open("single_vs_many_routingweights_experiment_mlu.txt", 'w+') as f:
		nrows = 0
		str_builder = "x_range "
		for alias, alias_index in zip(aliases, range(len(aliases))):
			str_builder += "{}_y ".format(alias)
			if nrows == 0:
				nrows = len(to_export_results[aliases[index]][0])
				assert(nrows == len(to_export_results[aliases[index]][1]))
			else:
				assert(nrows == len(to_export_results[aliases[index]][0]) and nrows == len(to_export_results[aliases[index]][1]))
		str_builder += "\n"
		for row in range(nrows):
			str_builder += "{} ".format(row)
			for alias, alias_index in zip(aliases, range(len(aliases))):
				str_builder += "{} ".format(to_export_results[alias][1][row])
			str_builder += "\n"
		f.write(str_builder)
	exit()

	with open("single_vs_multiple_routingweights_experiment.txt", 'w+') as f:
		nrows = 0
		str_builder = ""
		for alias, alias_index in zip(aliases, range(len(aliases))):
			str_builder += "{}_x {}_y ".format(alias, alias)
			if nrows == 0:
				nrows = len(to_export_results_decay_rate[aliases[index]][0])
				assert(nrows == len(to_export_results_decay_rate[aliases[index]][1]))
			else:
				assert(nrows == len(to_export_results_decay_rate[aliases[index]][0]) and nrows == len(to_export_results_decay_rate[aliases[index]][1]))
		str_builder += "\n"
		for row in range(nrows):
			for alias, alias_index in zip(aliases, range(len(aliases))):
				str_builder += "{} {} ".format(to_export_results_decay_rate[alias][0][row], to_export_results_decay_rate[alias][1][row])
			str_builder += "\n"
		f.write(str_builder)
	exit()
