import sys, os, copy
sys.path.append("..")
from path_selector import *
from aurora_network import *
import topology_engineering.ideal_topology_engineer as ideal_toe
import traffic_engineering.ideal_traffic_engineer as ideal_te
import facebook_dcn_traffic.utility
import CritMat.critmat as critical_matrix_module
from evaluation.performance_evaluator import *
from multiprocessing import Pool
from proto import *

from topology_engineering import *
from traffic_engineering import *

## import for plotting uses 
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.image as img
from matplotlib import cm

def flatten_traffic_matrix(tm, num_nodes):
	vector = []
	for i in range(num_nodes):
		for j in range(num_nodes):
			if i != j:
				vector.append(tm[i][j])
	return np.array(vector)

def unflatten_traffic_vector(vector, nblocks):
	tm = np.zeros((nblocks, nblocks))
	offset = 0
	for i in range(nblocks):
		for j in range(nblocks):
			if i != j:
				tm[i][j] = vector[offset]
				offset += 1
	return tm

def read_in_facebook_traffic(trace_file_name, network_ids_filename, aggregation_window):
	valid_network_ids = facebook_dcn_traffic.utility.read_in_network_ids(network_ids_filename)
	traffic_matrices, timestamps = facebook_dcn_traffic.utility.read_traffic_matrix_protobuf(trace_file_name, considered_network_id=valid_network_ids, return_timestamps=True)
	print("Number of traffic matrices : {}".format(len(traffic_matrices)))
	for tm in traffic_matrices:
		for i in range(len(tm)):
			for j in range(len(tm)):
				tm[i][j] = float(tm[i][j]) * 8 / 1E6 / float(aggregation_window)
	return traffic_matrices, valid_network_ids, timestamps

## returns the results and 
def online_evaluation(aurora_network, toe, te, training_traffic_matrices, evaluation_traffic_matrices, numK=1):
	## Initially find the representative traffic matrices
	numK = min(numK, len(training_traffic_matrices))
	nblocks = aurora_network.get_num_blocks()
	training_traffic_vectors = [flatten_traffic_matrix(x, nblocks) for x in training_traffic_matrices]
	crit_mat = critical_matrix_module.CritMat(training_traffic_vectors, None, critical_or_average="critical")
	representative_traffic_vectors = crit_mat.train(number_of_clusters=numK)
	representative_traffic_matrices = [unflatten_traffic_vector(vector, nblocks) for vector in representative_traffic_vectors]

	## find the direct and indirect paths
	path_selector = PathSelector(aurora_network, use_multihop=True)
	all_paths = path_selector.get_all_paths()
	path_selector = PathSelector(aurora_network, use_multihop=False)
	all_direct_paths = path_selector.get_all_paths()

	## Initialize the topology engineer and get topology
	logical_topology, routing_weight = toe.topology_engineer_given_representative_TMs(copy.deepcopy(representative_traffic_matrices), all_paths, minimize_multihop=True)
	plt.figure()
	plt.imshow(logical_topology)
	logical_topology = aurora_network.round_fractional_topology_giant_switch(logical_topology, [], rounding_simple=False)

	## Initialize the traffic engineer and get routing weights
	routing_weights = te.compute_path_weights(logical_topology, copy.deepcopy(representative_traffic_matrices))

	performance_timseries = [0] * len(evaluation_traffic_matrices)
	
	# evaluate performance
	for tm, tm_index in zip(evaluation_traffic_matrices, range(len(evaluation_traffic_matrices))):
		mlu, lu90, lu50, ave_hop_count, lu_distribution = evaluate_traffic_matrix_performance(aurora_network, tm, logical_topology, routing_weights)
		performance_timseries[tm_index] = (mlu, lu90, lu50, ave_hop_count, lu_distribution)
	return performance_timseries

def compute_routing_weight_stats(nblocks, routing_weights):
	path_weights = np.zeros((nblocks, nblocks))
	direct_weights = np.zeros((nblocks, nblocks))
	for path in routing_weights:
		src = path[0]
		dst = path[-1]
		if len(path) == 2:
			direct_weights[src][dst] = routing_weights[path]
		path_weights[src][dst] += routing_weights[path]
	print("Printing direct weight paths: \n{}".format(direct_weights))
	print("Printing routing weight paths sum: \n{}".format(path_weights))

def analyze_risk(nblocks, topology, routing_weights, traffic_matrices):
	max_vector = flatten_traffic_matrix(traffic_matrices[0], nblocks)
	for i in range(1, len(traffic_matrices), 1):
		max_vector = np.maximum(max_vector, flatten_traffic_matrix(traffic_matrices[i], nblocks))
	max_traffic_matrix = unflatten_traffic_vector(max_vector, nblocks)
	risk_matrix = np.zeros((nblocks, nblocks))
	for i in range(nblocks):
		for j in range(nblocks):
			if i != j:
				direct_weight = routing_weights[(i, j)]
				risk_matrix[i][j] = direct_weight / max_traffic_matrix[i][j]
				risk_matrix[i][j] /= topology[i][j]
	return risk_matrix


if __name__ == "__main__":
	print("Evaluation suite for FB's DCN interpod traces")
	## simply change the clusters here
	cluster_name = "B"
	cluster_alias = "database"
	aggregation_window = 1
	## simply change the clusters here
	if cluster_name == "A":
		cluster_alias = "database"
	elif cluster_name == "B":
		cluster_alias = "web"
	elif cluster_name == "C":
		cluster_alias = "hadoop"
	elif cluster_name == "combined":
		cluster_alias = "combined"
	else:
		print("Unrecognized ")
		sys.exit()
	
	tm_snapshots_protobuf_filename = "/Users/minyee/src/facebook_dcn_traffic/traffic_matrices/cluster{}/{}_aggregationwindow_{}.pb".format(cluster_name, cluster_alias, aggregation_window)
	valid_network_ids_filename = "/Users/minyee/src/facebook_dcn_traffic/traffic_matrices/cluster{}/cluster{}_pods.txt".format(cluster_name, cluster_name)

	print("Reading facebook dcn {} cluster traces...".format(cluster_alias))
	traffic_matrices, valid_network_ids, timestamps = read_in_facebook_traffic(tm_snapshots_protobuf_filename, valid_network_ids_filename, aggregation_window)
	traffic_matrices = traffic_matrices[1:-1]
	traffic_matrix = traffic_matrices[10]
	print("Reading facebook cluster... COMPLETE")
	#traffic_matrices = traffic_matrices[1:30]
	#timestamps = timestamps[1:30]
	number_of_pods = len(valid_network_ids)

	## Topology link number 
	per_node_pair_num_links = 15
	link_capacity = 5

	# Uniform Topology
	uniform_topology = [0] * number_of_pods
	for i in range(number_of_pods):
		uniform_topology[i] = [0] * number_of_pods
		for j in range(number_of_pods):
			if i != j:
				uniform_topology[i][j] = per_node_pair_num_links



	block_params = {BlockType.SUPERBLOCK : {}, BlockType.BORDER_ROUTER : {}}
	block_params[BlockType.SUPERBLOCK]["link capacity"] = float(link_capacity)
	block_params[BlockType.SUPERBLOCK]["num links"] = float((number_of_pods - 1) * per_node_pair_num_links)
	block_params[BlockType.BORDER_ROUTER]["link capacity"] = float(link_capacity) # in gbps
	block_params[BlockType.BORDER_ROUTER]["num links"] = float((number_of_pods - 1) * per_node_pair_num_links)

	## Declare the aurora network
	block_names_list = ["ju{}".format(x) for x in range(1, number_of_pods + 1, 1)]
	aurora_network = AuroraNetwork("fb_cluster_{}".format(cluster_name), block_params, block_names_list)
	path_selector = PathSelector(aurora_network, use_multihop=True)
	all_paths = path_selector.get_all_paths()
	#ideal_performance_evaluator(aurora_network, timestamps, traffic_matrices, aggregation_window)

	toe = robust_multi_traffic_topology_engineer_v3.RobustMultiTrafficTopologyEngineerImplementationV3(aurora_network, 10,
																10, all_paths, 
																[traffic_matrix,], 1)
	static_toe = topology_engineer.TopologyEngineer(aurora_network, 10)
	te = robust_multi_cluster_traffic_engineer.RobustMultiClusterTrafficEngineer(aurora_network, all_paths, 10, 10, 1, reduce_multihop=True)
	static_te = robust_multi_cluster_traffic_engineer.RobustMultiClusterTrafficEngineer(aurora_network, all_paths, 10, 10, 1, reduce_multihop=False)
	representative_traffic_matrices = [traffic_matrix, ]


	logical_topology, routing_weight = toe.topology_engineer_given_representative_TMs(copy.deepcopy(representative_traffic_matrices), all_paths, minimize_multihop=True)
	#logical_topology = aurora_network.round_fractional_topology_giant_switch(logical_topology, [], rounding_simple=False)
	uniform_topology, _ = static_toe.topology_engineer_given_representative_TMs(copy.deepcopy(representative_traffic_matrices), all_paths)
	
	## Initialize the traffic engineer and get routing weights
	routing_weights_toe = te.compute_path_weights(logical_topology, copy.deepcopy(representative_traffic_matrices))
	routing_weights_static = static_te.compute_path_weights(uniform_topology, copy.deepcopy(representative_traffic_matrices))


	analyze_risk(number_of_pods, logical_topology, routing_weights_toe, representative_traffic_matrices)
	analyze_risk(number_of_pods, uniform_topology, routing_weights_toe, representative_traffic_matrices)
	plt.show()

	mlu_toe, _, _, ahc_toe, _ = evaluate_traffic_matrix_performance(aurora_network, representative_traffic_matrices[0], logical_topology, routing_weights_toe)
	mlu_static, _, _, ahc_static, _ = evaluate_traffic_matrix_performance(aurora_network, representative_traffic_matrices[0], uniform_topology, routing_weights_static)
	compute_routing_weight_stats(number_of_pods, routing_weights_toe)
	compute_routing_weight_stats(number_of_pods, routing_weights_static)
	print("ToE : MLU - {} AHC - {}".format(mlu_toe, ahc_toe))
	print("Static : MLU - {} AHC - {}".format(mlu_static, ahc_static))

	