import sys, os, copy
sys.path.append("..")
from path_selector import *
from aurora_network import *
import topology_engineering.ideal_topology_engineer as ideal_toe
import traffic_engineering.ideal_traffic_engineer as ideal_te
import facebook_dcn_traffic.utility
import CritMat.critmat as critical_matrix_module
from evaluation.performance_evaluator import *
from multiprocessing import Pool
from proto import *

from topology_engineering import *
from traffic_engineering import *

## import for plotting uses 
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.image as img
from matplotlib import cm
import seaborn as sns

NUM_WORKERS = 6
OVERRIDE_ALL_FILES = False


def flatten_traffic_matrix(tm, num_nodes):
	vector = []
	for i in range(num_nodes):
		for j in range(num_nodes):
			if i != j:
				vector.append(tm[i][j])
	return np.array(vector)

def unflatten_traffic_vector(vector, nblocks):
	tm = np.zeros((nblocks, nblocks))
	offset = 0
	for i in range(nblocks):
		for j in range(nblocks):
			if i != j:
				tm[i][j] = vector[offset]
				offset += 1
	return tm

def read_in_facebook_traffic(trace_file_name, network_ids_filename, aggregation_window):
	valid_network_ids = facebook_dcn_traffic.utility.read_in_network_ids(network_ids_filename)
	traffic_matrices, timestamps = facebook_dcn_traffic.utility.read_traffic_matrix_protobuf(trace_file_name, considered_network_id=valid_network_ids, return_timestamps=True)
	print("Number of traffic matrices : {}".format(len(traffic_matrices)))
	for tm in traffic_matrices:
		for i in range(len(tm)):
			for j in range(len(tm)):
				tm[i][j] = float(tm[i][j]) * 8 / 1E6 / float(aggregation_window)
	return traffic_matrices, valid_network_ids, timestamps

def dump_timeseries_performance_to_protobuf(export_filename, timestamps, performance_timeseries_list):
	assert(len(timestamps) == len(performance_timeseries_list))
	import proto.timeseries_performance_pb2 as performance_pb
	performance_timeseries = performance_pb.PerformanceTimeSeries()
	for ts, performances in zip(timestamps, performance_timeseries_list):
		entry = performance_timeseries.entries.add()
		lu_distribution = performances[4]
		entry.timestamp = ts
		entry.average_hop_count = performances[3]
		for lu, num_links, i, j in lu_distribution:
			lu_proto = entry.link_utilizations.add()
			lu_proto.src = i
			lu_proto.dst = j
			lu_proto.link_utilization = lu
			lu_proto.num_links = num_links
	with open(export_filename, "wb") as f:
		f.write(performance_timeseries.SerializeToString())

'''
Parallel evaluation worker functions
'''
def parallel_evaluation_worker(aurora_network, nblocks, evaluation_traffic_matrices, logical_topology, routing_weights):
	num_eval_tms = len(evaluation_traffic_matrices)
	performance_timeseries = [None] * num_eval_tms
	for tm, tm_index in zip(evaluation_traffic_matrices, range(num_eval_tms)):
		perf = None
		if routing_weights is None:
			perf = evaluate_singlehop_performance(aurora_network, nblocks, logical_topology, tm)
		else:
			perf = evaluate_traffic_matrix_performance(aurora_network, tm, logical_topology, routing_weights)
		performance_timeseries[tm_index] = perf
	return performance_timeseries

def unpack_arguments_for_parallel_evaluation_worker(args):
	aurora_network, nblocks, evaluation_traffic_matrices, logical_topology, routing_weights = args
	return parallel_evaluation_worker(aurora_network, nblocks, evaluation_traffic_matrices, logical_topology, routing_weights)
'''
Parallel evaluation worker functions
'''

## filenaming convention
# dcn name
# aggregation_period 
# traffic_prediction_mechanism (avg or crit), number of k's
# ToE (name of ToE, number of topologies over the eval period)
# TE (name of TE, number of routing weights over the eval period)
# results_file_base_directory - the base directory which all results file are located
def multi_reconfig_evaluation(results_file_base_directory,
								aggregation_window,
								aurora_network,
								topology_engineering_algorithm, 
								traffic_engineering_algorithm, 
								all_traffic_matrices,
								all_timestamps,
								all_paths,
								reconfig_length,
								training_length,
								numK=5,
								override_all_files=False):
	assert(len(all_timestamps) == len(all_traffic_matrices))
	print("all_traffic_matrices has length {}".format(len(all_traffic_matrices)))
	dcn_name = aurora_network.get_dcn_name()
	crit_mat = critical_matrix_module.CritMat([0] * 1000, None, critical_or_average="critical")
	results_filename = results_file_base_directory + "/" + file_naming_tool.name_file(dcn_name, aggregation_window, crit_mat, topology_engineering_algorithm, traffic_engineering_algorithm)
	## Check if the file has already been simulated before, if so, just load it rather than evaluating it again
	if not override_all_files:
		if os.path.isfile(results_filename):
			performance_timeseries, evaluated_timestamps = timeseries_performance_reader.import_timeseries_protobuf(results_filename, aurora_network)
			return performance_timeseries, evaluated_timestamps
	nblocks = aurora_network.get_num_blocks()
	## Split the evaluation into training - reconfig windows for workers to complete in a distributed manner
	## then goes and compute the topology and routing weight in each eval window
	## Evaluating traffic topology
	uniform_logical_topology = None
	if topology_engineering_algorithm.is_static():
		uniform_logical_topology, _ = topology_engineering_algorithm.topology_engineer_given_representative_TMs(None, None)
	num_all_tms = len(all_traffic_matrices)
	current_tm_starting_index = training_length
	worker_arguments_list = []
	while current_tm_starting_index < num_all_tms:
		## Derive the training and evaluation index windows, (starting_index, ending_index)
		current_eval_window_length = min(reconfig_length, num_all_tms - current_tm_starting_index)
		# set evaluation window and evaluation traffic matrices
		evaluation_window = (current_tm_starting_index, current_tm_starting_index + current_eval_window_length - 1)
		evaluation_traffic_matrices = all_traffic_matrices[evaluation_window[0] : evaluation_window[1] + 1]
		# set training window and training traffic matrices
		training_window = (current_tm_starting_index - training_length, current_tm_starting_index - 1)
		training_traffic_matrices = all_traffic_matrices[training_window[0] : training_window[1] + 1]
		## Now that the training traffic matrices have been defined, proceed to compute the critical set
		training_traffic_vectors = [flatten_traffic_matrix(x, nblocks) for x in training_traffic_matrices]
		crit_mat = critical_matrix_module.CritMat(training_traffic_vectors, None, critical_or_average="critical")
		representative_traffic_vectors = crit_mat.train(number_of_clusters=numK)
		representative_traffic_matrices = [unflatten_traffic_vector(vector, nblocks) for vector in representative_traffic_vectors]
		## Initialize the topology engineer and get topology
		logical_topology = None
		if not topology_engineering_algorithm.is_static():
			logical_topology, _ = topology_engineering_algorithm.topology_engineer_given_representative_TMs(copy.deepcopy(representative_traffic_matrices), all_paths)
			print(logical_topology)
		else:
			logical_topology = uniform_logical_topology
		## Initialize the traffic engineer and get routing weights
		routing_weights = None
		## If traffic_engineering_algorithm is None, then it implies direct path routing is used
		if traffic_engineering_algorithm is not None: 
			routing_weights = traffic_engineering_algorithm.compute_path_weights(logical_topology, copy.deepcopy(representative_traffic_matrices))
		
		worker_arguments = (aurora_network, nblocks, evaluation_traffic_matrices, logical_topology, routing_weights)
		worker_arguments_list.append(worker_arguments)
		current_tm_starting_index += current_eval_window_length
	performance_timeseries = []
	## Starting the evaluation
	pool = Pool(processes=NUM_WORKERS)
	worker_performance_results = pool.map(unpack_arguments_for_parallel_evaluation_worker, worker_arguments_list)
	pool.close()
	pool.join()
	## Combine the individual worker's results into the entire list
	for segment_id in range(len(worker_performance_results)):
		performance_timeseries += worker_performance_results[segment_id]
	num_evaluated_snapshots = len(performance_timeseries)
	evaluated_timestamps = all_timestamps[training_length : training_length + num_evaluated_snapshots]
	# dump results to file before exiting
	dump_timeseries_performance_to_protobuf(results_filename, evaluated_timestamps, performance_timeseries)
	return performance_timeseries, evaluated_timestamps


if __name__ == "__main__":
	print("Evaluation suite for FB's DCN interpod traces with MULTIPLE RECONFIGURATIONS")
	if len(sys.argv) > 1:
		for argument in sys.argv[1:]:
			if argument[:2] == "-f":
				OVERRIDE_ALL_FILES = True

	## simply change the clusters here
	cluster_name = "C"
	## reconfiguration frequency in terms of seconds
	reconfig_length = 3600
	## training duration in terms of seconds
	training_length = 3600
	aggregation_window = 1

	cluster_alias = "database"
	
	## simply change the clusters here
	if cluster_name == "A":
		cluster_alias = "database"
	elif cluster_name == "B":
		cluster_alias = "web"
	elif cluster_name == "C":
		cluster_alias = "hadoop"
	elif cluster_name == "combined":
		cluster_alias = "combined"
	elif cluster_name == "combinedclique":
		cluster_alias = "combinedclique"
	else:
		print("Unrecognized ")
		sys.exit()
	
	tm_snapshots_protobuf_filename = "/Users/minyee/src/facebook_dcn_traffic/traffic_matrices/cluster{}/{}_aggregationwindow_{}.pb".format(cluster_name, cluster_alias, aggregation_window)
	valid_network_ids_filename = "/Users/minyee/src/facebook_dcn_traffic/traffic_matrices/cluster{}/cluster{}_pods.txt".format(cluster_name, cluster_name)

	print("Reading facebook dcn {} cluster traces...".format(cluster_alias))
	traffic_matrices, valid_network_ids, timestamps = read_in_facebook_traffic(tm_snapshots_protobuf_filename, valid_network_ids_filename, aggregation_window)
	traffic_matrices = traffic_matrices[1:-1]
	timestamps = timestamps[1:-1]
	assert(len(timestamps) == len(traffic_matrices))
	print("Reading facebook cluster... COMPLETE")
	number_of_pods = len(valid_network_ids)

	

	## Topology link number 
	per_node_pair_num_links = 15
	link_capacity = 5

	block_params = {BlockType.SUPERBLOCK : {}, BlockType.BORDER_ROUTER : {}}
	block_params[BlockType.SUPERBLOCK]["link capacity"] = float(link_capacity)
	block_params[BlockType.SUPERBLOCK]["num links"] = float((number_of_pods - 1) * per_node_pair_num_links)
	block_params[BlockType.BORDER_ROUTER]["link capacity"] = float(link_capacity) # in gbps
	block_params[BlockType.BORDER_ROUTER]["num links"] = float((number_of_pods - 1) * per_node_pair_num_links)

	## Declare the aurora network
	block_names_list = ["ju{}".format(x) for x in range(1, number_of_pods + 1, 1)]
	aurora_network = AuroraNetwork("fb_cluster_{}".format(cluster_name), block_params, block_names_list)
	path_selector = PathSelector(aurora_network, use_multihop=True)
	all_paths = path_selector.get_all_paths()
	#ideal_performance_evaluator(aurora_network, timestamps, traffic_matrices, aggregation_window)

	## start by loading the ideal performance
	#ideal_performance_timeseries_protobuf_filename = "./results/fb_cluster_{}/ideal_performance_agg{}.pb".format(cluster_name, aggregation_window)
	#ideal_performance_timeseries, ideal_performance_timestamps = timeseries_performance_reader.import_timeseries_protobuf(ideal_performance_timeseries_protobuf_filename, aurora_network)
	#ideal_performance_timeseries = ideal_performance_timeseries[1:-1]
	#ideal_performance_timestamps = ideal_performance_timestamps[1:-1]

	## Set the results file base directory, implicitly assumes that it exists
	results_file_base_directory = "/Users/minyee/src/robust_topology_engineering/results"
	## Initialize the static topology engineering algorithm
	static_toe = topology_engineer.TopologyEngineer(aurora_network, 10)

	num_ks = [1,3,5,7,9]

	for k in num_ks:
		print("Evaluating k = {}...".format(k))
		# Initialize multi traffic topology engineering and direct path only topology engineering algorithms
		toe = multi_cluster_toe_sensitivity_critical_demands.MultiTrafficTOESensitivityCriticalDemands(aurora_network, 
																										reconfig_length, 
																										training_length, 
																										all_paths, 
																										traffic_matrices, 
																										k, 
																										mlu_relaxation=1.05)
		te = multi_cluster_sensitivity_critical_demands.MultiClusterTESensitivityCriticalDemands(aurora_network, 
																								all_paths, 
																								reconfig_length, 
																								training_length, 
																								k, 
																								reduce_multihop=True,
																								sensitivity_relaxation=1., 
																								mlu_relaxation=1.0)
		te2 = multi_cluster_sensitivity_critical_demands.MultiClusterTESensitivityCriticalDemands(aurora_network, 
																								all_paths, 
																								reconfig_length, 
																								training_length, 
																								k, 
																								reduce_multihop=False,
																								sensitivity_relaxation=1.1, 
																								mlu_relaxation=1.1)
		direct_path_toe = direct_path_topology_engineer.DirectPathTopologyEngineer(aurora_network, 
																					reconfig_length, 
																					training_length, 
																					numK=k)
		# Evaluate ToE performance
		toe_performance_timeseries, toe_evaluated_timestamps = multi_reconfig_evaluation(results_file_base_directory,
																						aggregation_window,
																						aurora_network,
																						toe, 
																						te, 
																						traffic_matrices,
																						timestamps,
																						all_paths,
																						reconfig_length,
																						training_length,
																						numK=k,
																						override_all_files=OVERRIDE_ALL_FILES)
		# Evaluate Mesh TE performance
		static_performance_timeseries, static_evaluated_timestamps = multi_reconfig_evaluation(results_file_base_directory,
																								aggregation_window,
																								aurora_network,
																								static_toe, 
																								te2, 
																								traffic_matrices,
																								timestamps,
																								all_paths,
																								reconfig_length,
																								training_length,
																								numK=k,
																								override_all_files=OVERRIDE_ALL_FILES)
		# Evaluate ToE Direct path only performance
		direct_performance_timeseries, direct_evaluated_timestamps = multi_reconfig_evaluation(results_file_base_directory,
																								aggregation_window,
																								aurora_network,
																								direct_path_toe, 
																								None, 
																								traffic_matrices,
																								timestamps,
																								all_paths,
																								reconfig_length,
																								training_length,
																								numK=k,
																								override_all_files=OVERRIDE_ALL_FILES)
	exit()
	
	ideal_performance_timeseries, ideal_performance_timestamps
	colors = cm.rainbow(np.linspace(0, 1, len(num_ks)))
	fig = plt.figure()
	legends = []
	for k, c in zip(num_ks, colors):
		mlu_timeseries = [x[0] for x in performance_timeseries_collections[k]]
		plt.plot(sorted(mlu_timeseries), linewidth=0.7, color=c)
		legends.append("k = {} ToE".format(k))
		mlu_timeseries = [x[0] for x in static_topology_performance_timeseries_collections[k]]
		plt.plot(sorted(mlu_timeseries), linestyle='-.', color=c)
		legends.append("k = {} Static".format(k))
	oblivious_mlu = [x[0] for x in oblivious_performance]
	plt.plot(sorted(oblivious_mlu), linestyle=':', color=[0,0,0])
	legends.append("Oblivious")
	ideal_mlu = [x[0] for x in ideal_performance_timeseries]
	plt.plot(sorted(ideal_mlu), linestyle='-', color=[0.5,0.5,0.5])
	legends.append("Optimal")
	plt.legend(legends)
	plt.title("Fabric: {} - MLU".format(cluster_name))


	fig = plt.figure()
	all_results = []
	for k, c in zip(num_ks, colors):
		mlu_timeseries = [x[0] for x in performance_timeseries_collections[k]]
		all_results.append(mlu_timeseries)
		mlu_timeseries = [x[0] for x in static_topology_performance_timeseries_collections[k]]
		all_results.append(mlu_timeseries)
	sns.violinplot(data=all_results, palette=['r', 'b'] * len(num_ks))
	plt.ylabel("MLU")

	fig = plt.figure()
	legends = []
	for k, c in zip(num_ks, colors):
		ahc_timeseries = [x[3] for x in performance_timeseries_collections[k]]
		plt.plot(sorted(ahc_timeseries), linewidth=0.7, color=c)
		legends.append("k = {} ToE".format(k))
		ahc_timeseries = [x[3] for x in static_topology_performance_timeseries_collections[k]]
		plt.plot(sorted(ahc_timeseries), linestyle='-.', color=c)
		legends.append("k = {} Static".format(k))
	oblivious_ahc = [x[3] for x in oblivious_performance]
	plt.plot(sorted(oblivious_ahc), linestyle=':', color=[0,0,0])
	legends.append("Oblivious")
	ideal_ahc = [x[3] for x in ideal_performance_timeseries]
	plt.plot(sorted(ideal_ahc), linestyle='-', color=[0.5,0.5,0.5])
	legends.append("Optimal")
	plt.legend(legends)
	plt.ylim(ymin=1, ymax=2)
	plt.title("Fabric: {} - Average Hop Count".format(cluster_name))

	fig = plt.figure()
	legends = []
	for k, c in zip(num_ks, colors):
		plt.plot(risk_matrices_toe[k], linewidth=0.7, color=c)
		legends.append("k = {} ToE".format(k))
		plt.plot(risk_matrices_static[k], linestyle='-.', color=c)
		legends.append("k = {} Static".format(k))
	plt.plot(oblivious_risk, linestyle=':', color=[0,0,0])
	legends.append("Oblivious")
	plt.legend(legends)
	plt.title("Fabric: {} - Sensitivity distribution".format(cluster_name))

	fig = plt.figure()
	legends = []
	ideal_mlu_timeseries = [x[0] for x in ideal_performance_timeseries]
	oblivious_mlu_timeseries = [x[0] for x in oblivious_performance]
	max_oblivious_perf_ratio = 0
	for i in range(len(mlu_timeseries)):
		max_oblivious_perf_ratio = max(oblivious_mlu_timeseries[i] / ideal_mlu_timeseries[i], max_oblivious_perf_ratio)
	perf_scaling = 2./max_oblivious_perf_ratio
	for k, c in zip(num_ks, colors):
		mlu_timeseries = [x[0] for x in performance_timeseries_collections[k]]
		competitive_ratio_timeseries = [0] * len(mlu_timeseries)
		for i in range(len(mlu_timeseries)):
			competitive_ratio_timeseries[i] = mlu_timeseries[i] * perf_scaling / ideal_mlu_timeseries[i]
		plt.semilogy(sorted(competitive_ratio_timeseries), linewidth=0.7, color=c)
		legends.append("k = {} ToE".format(k))

		mlu_timeseries = [x[0] for x in static_topology_performance_timeseries_collections[k]]
		competitive_ratio_timeseries = [0] * len(mlu_timeseries)
		for i in range(len(mlu_timeseries)):
			competitive_ratio_timeseries[i] = mlu_timeseries[i] * perf_scaling / ideal_mlu_timeseries[i]
		plt.semilogy(sorted(competitive_ratio_timeseries), linestyle='-.', color=c)
		legends.append("k = {} Static".format(k))
	mlu_timeseries = [x[0] for x in oblivious_performance]
	competitive_ratio_timeseries = [0] * len(mlu_timeseries)
	for i in range(len(mlu_timeseries)):
		competitive_ratio_timeseries[i] = mlu_timeseries[i] * perf_scaling / ideal_mlu_timeseries[i]
	plt.semilogy(sorted(competitive_ratio_timeseries), linestyle=':', color=[0,0,0])
	legends.append("Oblivious")
	plt.legend(legends)
	plt.title("Fabric: {} - MLU competitive ratio".format(cluster_name))


	fig = plt.figure()
	legends = []
	for k, c in zip(num_ks, colors):
		mlu_timeseries = [x[2] for x in performance_timeseries_collections[k]]
		plt.plot(sorted(mlu_timeseries), linewidth=0.7, color=c)
		legends.append("k = {} ToE".format(k))
		mlu_timeseries = [x[2] for x in static_topology_performance_timeseries_collections[k]]
		plt.plot(sorted(mlu_timeseries), linestyle='-.', color=c)
		legends.append("k = {} Static".format(k))
	oblivious_mlu = [x[2] for x in oblivious_performance]
	plt.plot(sorted(oblivious_mlu), linestyle=':', color=[0,0,0])
	legends.append("Oblivious")
	ideal_mlu = [x[2] for x in ideal_performance_timeseries]
	plt.plot(sorted(ideal_mlu), linestyle='-', color=[0.5,0.5,0.5])
	legends.append("Optimal")
	plt.legend(legends)
	plt.title("Fabric: {} - Median LU".format(cluster_name))

	fig = plt.figure()
	legends = []
	for k, c in zip(num_ks, colors):
		mlu_timeseries = [x[0] for x in performance_timeseries_collections[k]]
		x, y = extract_mlu_decay_rate(mlu_timeseries, mlu_min=0, mlu_max=0.015)
		plt.semilogy(x, y, linewidth=0.7, color=c)
		legends.append("k = {} ToE".format(k))
		mlu_timeseries = [x[0] for x in static_topology_performance_timeseries_collections[k]]
		x, y = extract_mlu_decay_rate(mlu_timeseries, mlu_min=0, mlu_max=0.015)
		plt.semilogy(x, y, linestyle='-.', color=c)
		legends.append("k = {} Static".format(k))
	oblivious_mlu = [x[0] for x in oblivious_performance]
	x, y = extract_mlu_decay_rate(oblivious_mlu, mlu_min=0, mlu_max=0.015)
	plt.semilogy(x, y, linestyle=':', color=[0,0,0])
	legends.append("Oblivious")
	ideal_mlu = [x[0] for x in ideal_performance_timeseries]
	x, y = extract_mlu_decay_rate(ideal_mlu, mlu_min=0, mlu_max=0.015)
	plt.semilogy(x, y, linestyle='-', color=[0.5,0.5,0.5])
	legends.append("Optimal")
	plt.legend(legends)
	plt.title("Fabric: {} - Inverse LU".format(cluster_name))
	plt.ylabel("P(MLU > Utilization)")
	plt.xlabel("Utilization")
	plt.show()
	print("Completed Evaluation... Exiting Safely")