import sys, os, copy
sys.path.append("..")
from path_selector import *
from aurora_network import *
import topology_engineering.ideal_topology_engineer as ideal_toe
import traffic_engineering.ideal_traffic_engineer as ideal_te
import facebook_dcn_traffic.utility
import CritMat.critmat as critical_matrix_module
from evaluation.performance_evaluator import *
from multiprocessing import Pool
from proto import *

from topology_engineering import *
from traffic_engineering import *

## import for plotting uses 
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.image as img
from matplotlib import cm
import seaborn as sns

NUM_WORKERS = 6
OVERRIDE_ALL_FILES = False

'''
Reconfig epoch predicted MLU file writer utility functions.
(START)
'''
def write_predicted_mlu_sequence_to_file(filename, predicted_mlu_sequence):
	str_builder = ""
	for predicted_mlu in predicted_mlu_sequence:
		str_builder += "{}\n".format(predicted_mlu)
	with open(filename, 'w+') as f:
		f.write(str_builder)
	return

def read_predicted_mlu_sequence_from_file(filename):
	predicted_mlu_sequence = []
	with open(filename, 'r') as f:
		for line in f:
			predicted_mlu_sequence.append(float(line))
	return predicted_mlu_sequence
'''
Reconfig epoch predicted MLU file writer utility functions.
(END)
'''

def sum_traffic_matrix(tm, nblocks):
	total = 0.
	for i in range(nblocks - 1):
		for j in range(i + 1, nblocks, 1):
			total += (tm[i][j] + tm[j][i])
	return total

def scale_matrix(nblocks, traffic_matrix, multiplying_factor):
	for i in range(nblocks - 1):
		for j in range(i + 1, nblocks, 1):
			if i != j:
				traffic_matrix[i][j] *= multiplying_factor
				traffic_matrix[j][i] *= multiplying_factor
	return traffic_matrix

def subsecond_reconfig_latency_string(latency):
	assert(latency <= 1)
	if latency == 0:
		return "000s"
	log_range = math.log10(latency)
	latency_string = ""
	if log_range >= -3:
		latency_string = str(int(latency * 1000)).zfill(3) + "ms"
	elif log_range < -3 and log_range >= -6:
		latency_string = str(int(latency * 1000000)).zfill(3) + "us"
	elif log_range < -6 and log_range >= -9:
		latency_string = str(int(latency * 1000000000)).zfill(3) + "ns"
	else:
		print("Latency : {} is out of range".format(latency))
		assert(False)
	return latency_string


def flatten_traffic_matrix(tm, num_nodes):
	vector = []
	for i in range(num_nodes):
		for j in range(num_nodes):
			if i != j:
				vector.append(tm[i][j])
	return np.array(vector)

def unflatten_traffic_vector(vector, nblocks):
	tm = np.zeros((nblocks, nblocks))
	offset = 0
	for i in range(nblocks):
		for j in range(nblocks):
			if i != j:
				tm[i][j] = vector[offset]
				offset += 1
	return tm

def read_in_facebook_traffic(trace_file_name, network_ids_filename, aggregation_window):
	valid_network_ids = facebook_dcn_traffic.utility.read_in_network_ids(network_ids_filename)
	traffic_matrices, timestamps = facebook_dcn_traffic.utility.read_traffic_matrix_protobuf(trace_file_name, considered_network_id=valid_network_ids, return_timestamps=True)
	print("Number of traffic matrices : {}".format(len(traffic_matrices)))
	for tm in traffic_matrices:
		for i in range(len(tm)):
			for j in range(len(tm)):
				tm[i][j] = float(tm[i][j]) * 8 / 1E6 / float(aggregation_window)
	return traffic_matrices, valid_network_ids, timestamps

def dump_timeseries_performance_to_protobuf(export_filename, timestamps, performance_timeseries_list):
	assert(len(timestamps) == len(performance_timeseries_list))
	import proto.timeseries_performance_pb2 as performance_pb
	performance_timeseries = performance_pb.PerformanceTimeSeries()
	for ts, performances in zip(timestamps, performance_timeseries_list):
		entry = performance_timeseries.entries.add()
		lu_distribution = performances[4]
		entry.timestamp = ts
		entry.average_hop_count = performances[3]
		for lu, num_links, i, j in lu_distribution:
			lu_proto = entry.link_utilizations.add()
			lu_proto.src = i
			lu_proto.dst = j
			lu_proto.link_utilization = lu
			lu_proto.num_links = num_links
	with open(export_filename, "wb") as f:
		f.write(performance_timeseries.SerializeToString())

'''
Parallel evaluation worker functions
'''
def parallel_evaluation_worker(aurora_network, 
								nblocks, 
								evaluation_traffic_matrices, 
								logical_topology, 
								routing_weights):
	num_eval_tms = len(evaluation_traffic_matrices)
	performance_timeseries = [None] * num_eval_tms
	for tm, tm_index in zip(evaluation_traffic_matrices, range(num_eval_tms)):
		perf = None
		if routing_weights is None:
			perf = evaluate_singlehop_performance(aurora_network, nblocks, logical_topology, tm)
		else:
			perf = evaluate_traffic_matrix_performance(aurora_network, tm, logical_topology, routing_weights)
		performance_timeseries[tm_index] = perf
	return performance_timeseries

def unpack_arguments_for_parallel_evaluation_worker(args):
	(aurora_network, nblocks, evaluation_traffic_matrices, logical_topology, routing_weights) = args
	return parallel_evaluation_worker(aurora_network, nblocks, evaluation_traffic_matrices, logical_topology, routing_weights)
'''
Parallel evaluation worker functions
'''

'''
Evaluates the zero reconfig latency case, upon which all other reconfig latency performances can be based on.
'''
def rescale_nonzero_reconfig_latency(results_file_base_directory,
									topology_engineering_algorithm, 
									traffic_engineering_algorithm, 
									zero_reconfig_latency_performance_timeseries, 
									zero_reconfig_latency_performance_timestamps,
									predicted_mlu_sequence,
									reconfig_frequency,
									reconfig_latency,
									aggregation_window,
									override_all_files=False):
	## Deriving the filename
	assert(len(zero_reconfig_latency_performance_timeseries) == len(zero_reconfig_latency_performance_timestamps))
	assert(reconfig_latency > 0)
	reconfig_latency_string = subsecond_reconfig_latency_string(reconfig_latency)
	results_filename = "reconfiglat{}".format(reconfig_latency_string) + "_" + topology_engineering_algorithm.get_filename_param()
	if traffic_engineering_algorithm is not None:
		results_filename += "_" + traffic_engineering_algorithm.get_filename_param()
	results_filename = results_file_base_directory + "/" + results_filename

	## Check if file already exists, and if so just read the results in
	if not override_all_files:
		if os.path.isfile(results_filename):
			performance_timeseries, evaluated_timestamps = timeseries_performance_reader.import_timeseries_protobuf(results_filename, aurora_network)
			return performance_timeseries, evaluated_timestamps

	timeseries_length = len(zero_reconfig_latency_performance_timeseries)
	rescaled_performance_timeseries = [None] * timeseries_length
	for i in range(timeseries_length):
		rescaled_performance_timeseries[i] = copy.deepcopy(zero_reconfig_latency_performance_timeseries[i])
	## Now going to scale the entries following reconfiguration epochs
	for reconfig_epoch_index, predicted_mlu in zip(range(len(predicted_mlu_sequence)), predicted_mlu_sequence):
		current_time_index = reconfig_epoch_index * reconfig_frequency
		##  multiply the first couple of eval snapshots in this segment, which were affected by the act of reconfiguration
		# first, find out how long reconfiguration of entire topology is going to take, by figuring out the amount of leftover capacity 
		# headroom first
		allowed_capacity_degradation_per_stage = max(1. - predicted_mlu, 0.1)
		num_of_reconfiguration_stages = int(math.ceil(1./allowed_capacity_degradation_per_stage))
		total_reconfiguration_latency = num_of_reconfiguration_stages * reconfig_latency

		per_snapshot_lu_scaling_factor = 1./(1. - allowed_capacity_degradation_per_stage)
		assert(per_snapshot_lu_scaling_factor >= 1)
		affected_snapshots = int(math.ceil(float(total_reconfiguration_latency) / aggregation_window))
		current_post_epoch_index = 0
		while affected_snapshots > 1:
			mlu, lu90, lu50, ahc, lu_distribution = rescaled_performance_timeseries[current_post_epoch_index + current_time_index]
			rescaled_lu = [(x[0] * per_snapshot_lu_scaling_factor, x[1] / per_snapshot_lu_scaling_factor, x[2], x[3]) for x in lu_distribution]
			rescaled_performance_timeseries[current_post_epoch_index + current_time_index] = (mlu * per_snapshot_lu_scaling_factor, lu90 * per_snapshot_lu_scaling_factor, lu50 * per_snapshot_lu_scaling_factor, ahc, rescaled_lu)
			affected_snapshots -= 1
			current_post_epoch_index += 1
		if affected_snapshots == 1:
			final_snapshot_lu_scaling_factor = ((total_reconfiguration_latency - (current_post_epoch_index * aggregation_window)) * per_snapshot_lu_scaling_factor) / aggregation_window
			assert(final_snapshot_lu_scaling_factor > 0)
			mlu, lu90, lu50, ahc, lu_distribution = rescaled_performance_timeseries[current_post_epoch_index + current_time_index]
			rescaled_lu = [(x[0] * final_snapshot_lu_scaling_factor, x[1] / final_snapshot_lu_scaling_factor, x[2], x[3]) for x in lu_distribution]
			rescaled_performance_timeseries[current_post_epoch_index + current_time_index] = (mlu * final_snapshot_lu_scaling_factor, lu90 * final_snapshot_lu_scaling_factor, lu50 * final_snapshot_lu_scaling_factor, ahc, rescaled_lu)
			affected_snapshots -= 1
			current_post_epoch_index += 1
	dump_timeseries_performance_to_protobuf(results_filename, zero_reconfig_latency_performance_timestamps, rescaled_performance_timeseries)
	return rescaled_performance_timeseries, zero_reconfig_latency_performance_timestamps
	
	


## Runs the online evaluation, the reconfig latency is set to zero for now.
def online_evaluation_zero_reconfig_latency(results_file_base_directory,
											aggregation_window,
											aurora_network, 
											topology_engineering_algorithm, 
											traffic_engineering_algorithm, 
											all_paths, 
											reconfig_period, 
											basic_training_length,
											all_traffic_matrices,
											all_timestamps,
											numK=5,
											override_all_files=False):
	assert(len(all_timestamps) == len(all_traffic_matrices))

	## Deriving the filename
	reconfig_latency_string = subsecond_reconfig_latency_string(0)
	results_filename = "reconfiglat{}".format(reconfig_latency_string) + "_" + topology_engineering_algorithm.get_filename_param()
	if traffic_engineering_algorithm is not None:
		results_filename += "_" + traffic_engineering_algorithm.get_filename_param()
	results_filename = results_file_base_directory + "/" + results_filename

	## Check if file already exists, and if so just read the results in
	if not override_all_files:
		if os.path.isfile(results_filename):
			performance_timeseries, evaluated_timestamps = timeseries_performance_reader.import_timeseries_protobuf(results_filename, aurora_network)
			predicted_mlu_sequence = read_predicted_mlu_sequence_from_file(results_filename + "_predicted_mlus.txt")
			assert(len(predicted_mlu_sequence) > 0)
			return performance_timeseries, evaluated_timestamps, predicted_mlu_sequence

	## If we get here, that means that we do need to run the evaluations
	current_index = basic_training_length
	worker_arguments_list = []
	nblocks = aurora_network.get_num_blocks()

	uniform_logical_topology = None
	if topology_engineering_algorithm.is_static():
		uniform_logical_topology, _ = topology_engineering_algorithm.topology_engineer_given_representative_TMs(None, None)
	predicted_mlu_sequence = []
	while current_index < len(all_traffic_matrices):
		training_window = (0, current_index - 1)
		eval_window = (current_index, current_index + reconfig_period - 1)
		training_traffic_matrices = all_traffic_matrices[training_window[0] : training_window[1] + 1]
		training_traffic_vectors = [flatten_traffic_matrix(x, nblocks) for x in training_traffic_matrices]
		crit_mat = critical_matrix_module.CritMat(training_traffic_vectors, None, critical_or_average="critical")
		representative_traffic_vectors = crit_mat.train(number_of_clusters=numK)
		representative_traffic_matrices = [unflatten_traffic_vector(vector, nblocks) for vector in representative_traffic_vectors]

		## Initialize the topology engineer and get topology
		logical_topology = None
		toe_predicted_mlu = 0
		te_predicted_mlu = 0
		if not topology_engineering_algorithm.is_static():
			logical_topology, _, toe_predicted_mlu = topology_engineering_algorithm.topology_engineer_given_representative_TMs(copy.deepcopy(representative_traffic_matrices), all_paths)
			print(logical_topology)
		else:
			logical_topology = uniform_logical_topology
		## Initialize the traffic engineer and get routing weights
		routing_weights = None
		## If traffic_engineering_algorithm is None, then it implies direct path routing is used
		if traffic_engineering_algorithm is not None: 
			routing_weights, te_predicted_mlu = traffic_engineering_algorithm.compute_path_weights(logical_topology, copy.deepcopy(representative_traffic_matrices))
		predicted_mlu = max(toe_predicted_mlu, te_predicted_mlu)
		assert(predicted_mlu > 0)
		predicted_mlu_sequence.append(predicted_mlu)

		## Setup the evaluation traffic matrices
		evaluation_traffic_matrices = all_traffic_matrices[eval_window[0] : eval_window[1] + 1]
		worker_arguments = (aurora_network, nblocks, evaluation_traffic_matrices, logical_topology, routing_weights)
		worker_arguments_list.append(worker_arguments)
		current_index += reconfig_period

	performance_timeseries = []
	## Starting the evaluation
	pool = Pool(processes=NUM_WORKERS)
	worker_performance_results = pool.map(unpack_arguments_for_parallel_evaluation_worker, worker_arguments_list)
	pool.close()
	pool.join()
	## Combine the individual worker's results into the entire list
	for segment_id in range(len(worker_performance_results)):
		performance_timeseries += worker_performance_results[segment_id]
	num_evaluated_snapshots = len(performance_timeseries)
	evaluated_timestamps = all_timestamps[basic_training_length : ]
	print("timestamps : {}".format(len(evaluated_timestamps)))
	print("evaluated traffic : {}".format(num_evaluated_snapshots))
	assert(num_evaluated_snapshots == len(evaluated_timestamps))
	# dump results to file before exiting, and then dump the reconfig epoch predicted MLU 	
	dump_timeseries_performance_to_protobuf(results_filename, evaluated_timestamps, performance_timeseries)
	write_predicted_mlu_sequence_to_file(results_filename + "_predicted_mlus.txt", predicted_mlu_sequence)
	return performance_timeseries, evaluated_timestamps, predicted_mlu_sequence

if __name__ == "__main__":
	print("Evaluation suite for FB's DCN interpod traces with MULTIPLE RECONFIGURATIONS")
	if len(sys.argv) > 1:
		for argument in sys.argv[1:]:
			if argument[:2] == "-f":
				OVERRIDE_ALL_FILES = True

	## simply change the clusters here
	cluster_name = "combined"
	## reconfiguration frequency in terms of seconds
	reconfig_frequency = 3600
	## training duration in terms of seconds
	base_training_set_length = 300 ## the base training convex set
	aggregation_window = 1
	list_of_reconfig_latency_in_seconds = [0.5, 0.0005]

	cluster_alias = "database"
	
	## simply change the clusters here
	if cluster_name == "A":
		cluster_alias = "database"
	elif cluster_name == "B":
		cluster_alias = "web"
	elif cluster_name == "C":
		cluster_alias = "hadoop"
	elif cluster_name == "combined":
		cluster_alias = "combined"
	elif cluster_name == "combinedclique":
		cluster_alias = "combinedclique"
	else:
		print("Unrecognized ")
		sys.exit()
	
	tm_snapshots_protobuf_filename = "/Users/minyee/src/facebook_dcn_traffic/traffic_matrices/cluster{}/{}_aggregationwindow_{}.pb".format(cluster_name, cluster_alias, aggregation_window)
	valid_network_ids_filename = "/Users/minyee/src/facebook_dcn_traffic/traffic_matrices/cluster{}/cluster{}_pods.txt".format(cluster_name, cluster_name)
	print("Reading facebook dcn {} cluster traces...".format(cluster_alias))
	traffic_matrices, valid_network_ids, timestamps = read_in_facebook_traffic(tm_snapshots_protobuf_filename, valid_network_ids_filename, aggregation_window)
	traffic_matrices = traffic_matrices[1:-1]
	timestamps = timestamps[1:-1]
	assert(len(timestamps) == len(traffic_matrices))
	print("Reading facebook cluster... COMPLETE")

	number_of_pods = len(valid_network_ids)

	### scale the traffic matrices based on average utilization

	## todo(jason) : scale traffic matrices such that the average total traffic per snapshot leads to about 0.5 utilization

	## Topology link number 
	per_node_pair_num_links = 15
	link_capacity = 5

	block_params = {BlockType.SUPERBLOCK : {}, BlockType.BORDER_ROUTER : {}}
	block_params[BlockType.SUPERBLOCK]["link capacity"] = float(link_capacity)
	block_params[BlockType.SUPERBLOCK]["num links"] = float((number_of_pods - 1) * per_node_pair_num_links)
	block_params[BlockType.BORDER_ROUTER]["link capacity"] = float(link_capacity) # in gbps
	block_params[BlockType.BORDER_ROUTER]["num links"] = float((number_of_pods - 1) * per_node_pair_num_links)

	## Declare the aurora network
	block_names_list = ["ju{}".format(x) for x in range(1, number_of_pods + 1, 1)]
	aurora_network = AuroraNetwork("fb_cluster_{}".format(cluster_name), block_params, block_names_list)
	path_selector = PathSelector(aurora_network, use_multihop=True)
	all_paths = path_selector.get_all_paths()
	#ideal_performance_evaluator(aurora_network, timestamps, traffic_matrices, aggregation_window)


	## Figure out how to scale the traffic matrices so that average MLU is not overly low or overly high
	average_mlu_level = 0.5
	if cluster_name == "A":
		average_mlu_level = 0.6
	elif cluster_name == "C":
		average_mlu_level = 4. * 0.5
	elif cluster_name == "combined":
		average_mlu_level = 4.5 * 0.5
		#average_mlu_level = 0.5
	print("Compute scaling factor for traffic matrices")
	per_snapshot_sum = [sum_traffic_matrix(x, number_of_pods) for x in traffic_matrices]
	average_total_volume = float(sum(per_snapshot_sum))/len(per_snapshot_sum)
	total_network_bandwidth = (number_of_pods - 1) * per_node_pair_num_links * link_capacity
	per_tm_snapshot_scaling_factor = average_mlu_level / (average_total_volume / total_network_bandwidth)
	print("Per TM scaling factor : {} ".format(per_tm_snapshot_scaling_factor))

	## get only first 7 hours of traffic for now 
	total_snapshots = base_training_set_length + 6 * 3600 # base training period plus 6 hours of eval
	traffic_matrices = traffic_matrices[:total_snapshots]
	timestamps = timestamps[:total_snapshots]

	## scale traffic matrices
	traffic_matrices = [scale_matrix(number_of_pods, traffic_matrix, per_tm_snapshot_scaling_factor) for traffic_matrix in traffic_matrices]

	## start by loading the ideal performance
	#ideal_performance_timeseries_protobuf_filename = "./results/fb_cluster_{}/ideal_performance_agg{}.pb".format(cluster_name, aggregation_window)
	#ideal_performance_timeseries, ideal_performance_timestamps = timeseries_performance_reader.import_timeseries_protobuf(ideal_performance_timeseries_protobuf_filename, aurora_network)
	#ideal_performance_timeseries = ideal_performance_timeseries[1:-1]
	#ideal_performance_timestamps = ideal_performance_timestamps[1:-1]

	## Set the results file base directory, implicitly assumes that it exists
	results_file_base_directory = "/Users/minyee/src/robust_topology_engineering/results"
	## Initialize the static topology engineering algorithm
	print("dcn name : {}".format(aurora_network.get_dcn_name()))
	if not os.path.isdir(results_file_base_directory + "/" + aurora_network.get_dcn_name() + "/" + "online"):
		os.mkdir(results_file_base_directory + "/" + aurora_network.get_dcn_name() + "/" + "online")

	results_file_base_directory = results_file_base_directory + "/" + aurora_network.get_dcn_name() + "/" + "online" 
	#static_toe = topology_engineer.TopologyEngineer(aurora_network, 10)
	num_ks = [1,3,5,7,]
	num_ks = [1,]

	## First order of business is to either load 

	for k in num_ks:
		print("Evaluating k = {}...".format(k))
		# Initialize multi traffic topology engineering and direct path only topology engineering algorithms
		toe = multi_cluster_toe_sensitivity_critical_demands.MultiTrafficTOESensitivityCriticalDemands(aurora_network, 
																										reconfig_frequency, 
																										base_training_set_length, 
																										all_paths, 
																										traffic_matrices, 
																										k, 
																										mlu_relaxation=1.05,
																										return_predicted_mlu=True)
		te = multi_cluster_sensitivity_critical_demands.MultiClusterTESensitivityCriticalDemands(aurora_network, 
																								all_paths, 
																								reconfig_frequency, 
																								base_training_set_length, 
																								k, 
																								reduce_multihop=True,
																								sensitivity_relaxation=1., 
																								mlu_relaxation=1.0,
																								return_predicted_mlu=True)
		toe = robust_multi_traffic_topology_engineer_v3.RobustMultiTrafficTopologyEngineerImplementationV3(aurora_network, 
																										reconfig_frequency, 
																										base_training_set_length, 
																										all_paths, 
																										traffic_matrices, 
																										k, 
																										minimize_multihop=True,
																										return_predicted_mlu=True)
		te = robust_multi_cluster_traffic_engineer.RobustMultiClusterTrafficEngineer(aurora_network, 
																					all_paths, 
																					reconfig_frequency, 
																					base_training_set_length, 
																					k, 
																					reduce_multihop=True,
																					return_predicted_mlu=True)
		direct_path_toe = direct_path_topology_engineer.DirectPathTopologyEngineer(aurora_network, 
																					reconfig_frequency, 
																					base_training_set_length, 
																					numK=k,
																					return_predicted_mlu=True)
		# Evaluate ToE performance
		toe_performance_timeseries, toe_evaluated_timestamps, toe_predicted_mlus = online_evaluation_zero_reconfig_latency(results_file_base_directory,
																															aggregation_window,
																															aurora_network, 
																															toe, 
																															te, 
																															all_paths, 
																															reconfig_frequency, 
																															base_training_set_length,
																															traffic_matrices,
																															timestamps,
																															numK=k,
																															override_all_files=OVERRIDE_ALL_FILES)
		# Evaluate ToE Direct path only performance
		direct_performance_timeseries, direct_evaluated_timestamps, direct_predicted_mlus = online_evaluation_zero_reconfig_latency(results_file_base_directory,
																																aggregation_window,
																																aurora_network, 
																																direct_path_toe, 
																																None, 
																																all_paths, 
																																reconfig_frequency, 
																																base_training_set_length,
																																traffic_matrices,
																																timestamps,
																																numK=k,
																																override_all_files=OVERRIDE_ALL_FILES)
		for reconfig_latency_in_seconds in list_of_reconfig_latency_in_seconds:
			if reconfig_latency_in_seconds == 0:
				continue
			else:
				rescale_nonzero_reconfig_latency(results_file_base_directory,
													toe, 
													te, 
													toe_performance_timeseries, 
													toe_evaluated_timestamps,
													toe_predicted_mlus,
													reconfig_frequency,
													reconfig_latency_in_seconds,
													aggregation_window,
													override_all_files=OVERRIDE_ALL_FILES)
				rescale_nonzero_reconfig_latency(results_file_base_directory,
													direct_path_toe, 
													None, 
													direct_performance_timeseries, 
													direct_evaluated_timestamps,
													direct_predicted_mlus,
													reconfig_frequency,
													reconfig_latency_in_seconds,
													aggregation_window,
													override_all_files=OVERRIDE_ALL_FILES)
	print("Completed Evaluation... Exiting Safely")
	exit()
	
	