'''
Handles the ToE and TE simulation for Facebook's DCN traces with offline training. Meaning that there is no
traffic uncertainty involved in the evaluations. The training window is the exact same as the evaluation window
'''
import sys, os, copy, random
sys.path.append("..")
from path_selector import *
from aurora_network import *
import topology_engineering.ideal_topology_engineer as ideal_toe
import traffic_engineering.ideal_traffic_engineer as ideal_te
import CritMat.critmat as critical_matrix_module
import facebook_dcn_traffic.utility
from evaluation.performance_evaluator import *
from multiprocessing import Pool
from proto import *

from topology_engineering import *
from traffic_engineering import *
from traffic_generator import *

## import for plotting uses 
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.image as img
from matplotlib import cm
      



def scale_matrix(nblocks, traffic_matrix, multiplying_factor):
	for i in range(nblocks - 1):
		for j in range(i + 1, nblocks, 1):
			if i != j:
				traffic_matrix[i][j] *= multiplying_factor
				traffic_matrix[j][i] *= multiplying_factor
	return traffic_matrix

def flatten_traffic_matrix(tm, num_nodes):
	vector = []
	for i in range(num_nodes):
		for j in range(num_nodes):
			if i != j:
				vector.append(tm[i][j])
	return np.array(vector)

def unflatten_traffic_vector(vector, nblocks):
	tm = np.zeros((nblocks, nblocks))
	offset = 0
	for i in range(nblocks):
		for j in range(nblocks):
			if i != j:
				tm[i][j] = vector[offset]
				offset += 1
	return tm

def read_in_facebook_traffic(trace_file_name, network_ids_filename, aggregation_window):
	valid_network_ids = facebook_dcn_traffic.utility.read_in_network_ids(network_ids_filename)
	traffic_matrices, timestamps = facebook_dcn_traffic.utility.read_traffic_matrix_protobuf(trace_file_name, considered_network_id=valid_network_ids, return_timestamps=True)
	print("Number of traffic matrices : {}".format(len(traffic_matrices)))
	for tm in traffic_matrices:
		for i in range(len(tm)):
			for j in range(len(tm)):
				tm[i][j] = float(tm[i][j]) * 8 / 1E6 / float(aggregation_window)
	return traffic_matrices, valid_network_ids, timestamps

def read_in_facebook_traces(cluster_name):
	aggregation_window = 1

	cluster_alias = "database"
	
	## simply change the clusters here
	if cluster_name == "A":
		cluster_alias = "database"
	elif cluster_name == "B":
		cluster_alias = "web"
	elif cluster_name == "C":
		cluster_alias = "hadoop"
	elif cluster_name == "combined":
		cluster_alias = "combined"
	elif cluster_name == "combinedclique":
		cluster_alias = "combinedclique"
	else:
		print("Unrecognized ")
		sys.exit()
	
	tm_snapshots_protobuf_filename = "/Users/minyee/src/facebook_dcn_traffic/traffic_matrices/cluster{}/{}_aggregationwindow_{}.pb".format(cluster_name, cluster_alias, aggregation_window)
	valid_network_ids_filename = "/Users/minyee/src/facebook_dcn_traffic/traffic_matrices/cluster{}/cluster{}_pods.txt".format(cluster_name, cluster_name)

	print("Reading facebook dcn {} cluster traces...".format(cluster_alias))
	traffic_matrices, valid_network_ids, timestamps = read_in_facebook_traffic(tm_snapshots_protobuf_filename, valid_network_ids_filename, aggregation_window)
	traffic_matrices = traffic_matrices[1:-1]
	timestamps = timestamps[1:-1]
	return traffic_matrices, valid_network_ids


if __name__ == "__main__":
	print("Evaluation suite for synthetically-generated traffic traces")
	cluster_name = "B"
	number_of_pods = 8
	## Topology link number 
	per_node_pair_num_links = 15
	link_capacity = 5

	traffic_matrices, valid_network_ids = read_in_facebook_traces(cluster_name)
	number_of_pods=len(valid_network_ids)
	block_params = {BlockType.SUPERBLOCK : {}, BlockType.BORDER_ROUTER : {}}
	block_params[BlockType.SUPERBLOCK]["link capacity"] = float(link_capacity)
	block_params[BlockType.SUPERBLOCK]["num links"] = float((number_of_pods - 1) * per_node_pair_num_links)
	block_params[BlockType.BORDER_ROUTER]["link capacity"] = float(link_capacity) # in gbps
	block_params[BlockType.BORDER_ROUTER]["num links"] = float((number_of_pods - 1) * per_node_pair_num_links)

	## Declare the aurora network
	block_names_list = ["ju{}".format(x) for x in range(1, number_of_pods + 1, 1)]
	aurora_network = AuroraNetwork("artifical_cluster_{}".format(number_of_pods), block_params, block_names_list)
	## find the direct and indirect paths
	path_selector = PathSelector(aurora_network, use_multihop=True)
	all_paths = path_selector.get_all_paths()

	
	print("Generating the synthetic traffic matrices")
	#traffic_matrices = []
	nsnapshots = 1000
	ncliques = 3
	traffic_matrix_generator = bipartite_traffic_generator.BipartiteTrafficGenerator(number_of_pods)
	traffic_matrix_generator = clique_traffic_generator.CliqueTrafficGenerator(number_of_pods, ncliques)
	
	#traffic_matrices += traffic_matrix_generator.generate_probability_traffic_matrices(nsnapshots)
	tm_scaleup_factor = 1. * float(number_of_pods * per_node_pair_num_links * link_capacity)
	traffic_matrices = [scale_matrix(number_of_pods, x, tm_scaleup_factor) for x in traffic_matrices]
	random.Random(10).shuffle(traffic_matrices)
	training_traffic_matrices = traffic_matrices[:int(nsnapshots/10)]
	reconfiguration_period = len(traffic_matrices)

	## now start forming the reconfiguration windows
	num_traffic_snapshots = len(traffic_matrices)


	# Declare the static ToE uniform mesh
	static_toe = topology_engineer.TopologyEngineer(aurora_network, reconfiguration_period)
	#uniform_adj_matrix, _ = static_toe.topology_engineer_given_representative_TMs(None, all_paths)
	
	
	k = 5

	training_traffic_vectors = [flatten_traffic_matrix(x, number_of_pods) for x in training_traffic_matrices]
	crit_mat = critical_matrix_module.CritMat(training_traffic_vectors, None, critical_or_average="critical")
	representative_traffic_vectors = crit_mat.train(number_of_clusters=k)
	representative_traffic_matrices = [unflatten_traffic_vector(vector, number_of_pods) for vector in representative_traffic_vectors]
	toe = multi_cluster_toe_sensitivity_critical_demands.MultiTrafficTOESensitivityCriticalDemands(aurora_network, 
																									reconfiguration_period, 
																									reconfiguration_period, 
																									all_paths, 
																									traffic_matrices, 
																									k,
																									mlu_relaxation=1.0)
	te = multi_cluster_sensitivity_critical_demands.MultiClusterTESensitivityCriticalDemands(aurora_network, 
																								all_paths, 
																								reconfiguration_period, 
																								reconfiguration_period, 
																								k, 
																								reduce_multihop=True,
																								sensitivity_relaxation=2, 
																								mlu_relaxation=1.0)
	toe_logical_topology, _ = toe.topology_engineer_given_representative_TMs(copy.deepcopy(representative_traffic_matrices), all_paths)
	print(toe_logical_topology)
	toe_routing_weights = te.compute_path_weights(toe_logical_topology, copy.deepcopy(representative_traffic_matrices))

	performance_timeseries = [0] * len(traffic_matrices)
	## Start evaluation - check the ToE case or no ToE case, if no ToE 
	for tm, time_index in zip(traffic_matrices, range(len(traffic_matrices))):
		mlu, lu90, lu50, ave_hop_count, lu_distribution = evaluate_traffic_matrix_performance(aurora_network, tm, toe_logical_topology, toe_routing_weights)
		performance_timeseries[time_index] = (mlu, lu90, lu50, ave_hop_count, lu_distribution)	

	### Finally export results
	## At this point, all of the evaluations have succesfully run
	fig = plt.figure()
	print("length of output_results : {}".format(len(performance_timeseries)))
	
	mlu_timeseries = sorted([x[0]for x in performance_timeseries])
	print("largest mlu is : {}".format(mlu_timeseries[-1]))
	plt.plot(sorted(mlu_timeseries))
	#print(sorted(mlu_timeseries))
	plt.ylabel('MLU')

	fig = plt.figure()
	ahc_timeseries = sorted([x[3]for x in performance_timeseries])
	plt.plot(sorted(ahc_timeseries))
	plt.ylabel('Avg Hop Count')

	plt.show()
	print("Completed Evaluation... Exiting Safely")