import numpy as np
## import for plotting uses 
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.image as img
from matplotlib import cm
import math

def compute_link_utilization_statistics(nblocks, aurora_network, link_utilization_matrix, adj_matrix):
	total_links = 0.
	lu_distribution = []
	total_weighted_lu = 0.
	for i in range(nblocks):
		for j in range(nblocks):
			if i != j:
				lu_distribution.append( (link_utilization_matrix[i][j], adj_matrix[i][j], i, j) )
				total_links += adj_matrix[i][j]
				total_weighted_lu += (adj_matrix[i][j] * link_utilization_matrix[i][j])
	lu_distribution_sorted = sorted(lu_distribution, key=lambda x: x[0])	
	cumulative_link_counts = (aurora_network.get_total_links() - total_links)
	p50_index = 0.5 * (aurora_network.get_total_links())
	p90_index = 0.9 * (aurora_network.get_total_links())
	lu50 = -1.
	lu90 = -1.
	mlu = lu_distribution_sorted[-1][0]
	for (lu, num_links, _, _) in lu_distribution_sorted:
		if lu50 < 0 and cumulative_link_counts >= p50_index:
			lu50 = lu
		elif lu90 < 0 and cumulative_link_counts >= p90_index:
			lu90 = lu
		cumulative_link_counts += num_links
	alu = total_weighted_lu / total_links
	return mlu, lu90, lu50, alu, lu_distribution_sorted

# computes the routing performance for a topology with a given set of routing weights
# part of static routing. Returns the following tuple 
def evaluate_traffic_matrix_performance(aurora_network, traffic_matrix, topology_adj_matrix, routing_weights):
	nblocks = len(topology_adj_matrix)
	link_utilization_matrix = np.zeros((nblocks, nblocks,))
	hop_counter = 0.
	total_traffic = 0.
	#unable_to_route = False
	for path in routing_weights:
		src = path[0]
		dst = path[-1]
		weight = routing_weights[path]
		if weight <= 0.000001:
			continue
		path_capacity = topology_adj_matrix[src][dst] * min(aurora_network.get_link_capacity(src), aurora_network.get_link_capacity(dst))
		if len(path) == 3:
			intermediate = path[1]
			cap_ik = topology_adj_matrix[src][intermediate] * min(aurora_network.get_link_capacity(src), aurora_network.get_link_capacity(intermediate))
			cap_kj = topology_adj_matrix[intermediate][dst] * min(aurora_network.get_link_capacity(dst), aurora_network.get_link_capacity(intermediate))
			path_capacity = min(cap_ik, cap_kj)
		if traffic_matrix[src][dst] == 0 or path_capacity == 0:
			continue
		traffic_load = weight * traffic_matrix[src][dst]
		
		total_traffic += traffic_load
		hop_counter += (float(len(path) - 1) * traffic_load)
		curr_node = src
		for index in range(1, len(path), 1):
			next_node = path[index]
			capacity = min(aurora_network.get_link_capacity(curr_node), aurora_network.get_link_capacity(next_node))
			link_capacity = capacity * topology_adj_matrix[curr_node][next_node]
			link_utilization_matrix[curr_node][next_node] += (traffic_load / link_capacity)
			curr_node = next_node
	ave_hop_count = hop_counter / total_traffic
	mlu, lu90, lu50, alu, lu_distribution = compute_link_utilization_statistics(nblocks, aurora_network, link_utilization_matrix, topology_adj_matrix)
	return (mlu, lu90, lu50, ave_hop_count, lu_distribution)


## given a list of MLUs, returns the decay rate.
def extract_mlu_decay_rate(mlu_timeseries, mlu_min=None, mlu_max=None, nbins=100):
	number_of_entries = len(mlu_timeseries)
	constant = 1./number_of_entries
	sorted_mlu_timeseries = sorted(mlu_timeseries)
	distance = sorted_mlu_timeseries[-1] - sorted_mlu_timeseries[0]
	starting_point = sorted_mlu_timeseries[0]
	if mlu_min is not None and mlu_max is not None:
		distance = mlu_max - mlu_min
		starting_point = mlu_min
	frequency_of_bins = [0] * (nbins + 1)
	x_axis = [0] * (nbins + 1)
	x_axis[0] = starting_point
	for i in range(1, len(x_axis), 1):
		x_axis[i] = x_axis[i - 1] + (float(distance) / nbins)
	for mlu in mlu_timeseries:
		index = min(int(float(mlu - starting_point) / distance * nbins), len(x_axis) - 1)
		frequency_of_bins[index] += constant
	cdf = np.cumsum(frequency_of_bins)
	inverse_cdf = [1. - x for x in cdf]
	assert(len(inverse_cdf) == len(x_axis))
	return x_axis, inverse_cdf
	#return x_axis[:-1], inverse_cdf[:-1]

def __extract_critical_podpairs(aurora_network, nblocks, logical_topology_adj_matrix, routing_weights, traffic_matrices, threshold=0.99):
	critical_pairs = []
	non_critical_pairs = []
	utilization_matrices = [None] * len(traffic_matrices)
	main_utilization_matrix = np.zeros((nblocks, nblocks))
	mlu = 0
	for tm, tm_index in zip(traffic_matrices, range(len(traffic_matrices))):
		utilization_matrices[tm_index] = np.zeros((nblocks, nblocks))
		for path in routing_weights:	
			src = path[0]
			dst = path[1]
			weight = routing_weights[path]
			current_node = src
			for next_node in path[1:]:
				capacity = min(aurora_network.get_link_capacity(current_node), aurora_network.get_link_capacity(next_node))
				utilization_matrices[tm_index][current_node][next_node] += (weight * tm[src][dst] / (logical_topology_adj_matrix[current_node][next_node] * capacity))
				current_node = next_node
		for i in range(nblocks):
			for j in range(nblocks):
				if i != j:
					main_utilization_matrix[i][j] = max(main_utilization_matrix[i][j], utilization_matrices[tm_index][i][j])
					mlu = max(mlu, main_utilization_matrix[i][j])
	# analyze critical pairs
	for i in range(nblocks):
		for j in range(nblocks):
			if i != j:
				if main_utilization_matrix[i][j] >= threshold * mlu:
					critical_pairs.append((i,j))
				else:
					non_critical_pairs.append((i,j))
	print("Non critical pod pairs : {}".format(non_critical_pairs))
	print("Critical pod pairs : {}".format(critical_pairs))
	return critical_pairs, non_critical_pairs, main_utilization_matrix

def evaluate_singlehop_performance(aurora_network, nblocks, logical_topology_adj_matrix, traffic_matrix):
	lu_matrix = np.zeros((nblocks, nblocks))
	for i in range(nblocks):
		for j in range(nblocks):
			if i != j:
				capacity = min(aurora_network.get_link_capacity(i), aurora_network.get_link_capacity(j))
				path_capacity = capacity * logical_topology_adj_matrix[i][j]
				utilization = 0
				if path_capacity == 0:
					if traffic_matrix[i][j] > 0:
						utilization = float(1E15)
					else:
						utilization = 0
				else:
					utilization = traffic_matrix[i][j] / float( capacity * logical_topology_adj_matrix[i][j] )
				lu_matrix[i][j] = utilization
				#mlu = max(mlu, utilization)
	mlu, lu90, lu50, alu, lu_distribution_sorted = compute_link_utilization_statistics(nblocks, aurora_network, lu_matrix, logical_topology_adj_matrix)
	return (mlu, lu90, lu50, 1, lu_distribution_sorted)

# evaluates the fat tree performance
# tapering - the fraction of link counts between the core and aggregation links, must be in range (0, 1]
def evaluate_fattree_ecmp_performance(aurora_network, traffic_matrix, tapering=1.):
	nblocks = aurora_network.get_num_blocks()
	link_utilization_vector = [0] * nblocks
	lu_output_vector = []
	for src in range(nblocks):
		num_links = math.floor(aurora_network.get_num_links(src))
		capacity = aurora_network.get_link_capacity(src)
		total_capacity = tapering * num_links * capacity
		total_ingress_traffic = 0.
		total_egress_traffic = 0.
		for dst in range(nblocks):
			if src != dst:
				total_ingress_traffic += traffic_matrix[dst][src]
				total_egress_traffic += traffic_matrix[src][dst]
		lu_output_vector += [(total_egress_traffic/total_capacity, aurora_network.get_num_links(src), src, src),
							(total_ingress_traffic/total_capacity, aurora_network.get_num_links(src), src, src),]
		link_utilization_vector[src] = max(total_egress_traffic, total_ingress_traffic) / total_capacity
	lu_output_vector = sorted(lu_output_vector, key=lambda x:x[0])
	link_utilization = sorted(link_utilization_vector)
	mlu = link_utilization[-1]
	lu90 = link_utilization[int(0.9 * len(link_utilization))]
	lu50 = link_utilization[int(0.5 * len(link_utilization))]
	ahc = 2
	perf_tuple = (mlu, lu90, lu50, ahc, lu_output_vector)
	return perf_tuple

def analyze_sensitivity_pairwise(aurora_network, nblocks, topology, routing_weights, traffic_matrices):
	if routing_weights is None:
		routing_weights = {}
		for i in range(nblocks):
			for j in range(nblocks):
				if i != j:
					routing_weights[(i, j)] = 1.
	max_traffic_matrix = np.zeros((nblocks, nblocks))
	for tm in traffic_matrices:
		for i in range(nblocks):
			for j in range(nblocks):
				if i != j:
					max_traffic_matrix[i][j] = max(max_traffic_matrix[i][j], tm[i][j])
	sensitivity_matrix = np.zeros((nblocks, nblocks))
	path_weight_sum = np.zeros((nblocks, nblocks))
	for path in routing_weights:
		src = path[0]
		dst = path[-1]
		weight = routing_weights[path]
		path_weight_sum[src][dst] += weight
		traffic_demand = max_traffic_matrix[src][dst]
		if len(path) > 2:
			intermediate = path[1]
			if topology[src][intermediate] > 0 and traffic_demand > 0:
				#sensitivity_matrix[src][intermediate] += (weight/topology[src][intermediate]/traffic_demand)
				sensitivity_matrix[src][intermediate] = max(weight/topology[src][intermediate], sensitivity_matrix[src][intermediate])
				#sensitivity_matrix[src][intermediate] += weight/topology[src][intermediate]
			if topology[intermediate][dst] > 0 and traffic_demand > 0:
				#sensitivity_matrix[intermediate][dst] += (weight/topology[intermediate][dst]/traffic_demand)
				sensitivity_matrix[intermediate][dst] = max(weight/topology[intermediate][dst], sensitivity_matrix[intermediate][dst])
				#sensitivity_matrix[intermediate][dst] += weight/topology[intermediate][dst]
		else:
			assert(len(path) == 2)
			if topology[src][dst] > 0 and traffic_demand > 0:
				#sensitivity_matrix[src][dst] += (weight/topology[src][dst]/traffic_demand)
				sensitivity_matrix[src][dst] = max(weight/topology[src][dst], sensitivity_matrix[src][dst])
				#sensitivity_matrix[src][dst] += weight/topology[src][dst]
	sensitivity_distribution = []
	utilization_distribution = []
	_, _, utilization_matrix = __extract_critical_podpairs(aurora_network, nblocks, topology, routing_weights, traffic_matrices, threshold=0.98)
	direct_weights = []
	max_entries = []
	for i in range(nblocks):
		for j in range(nblocks):
			if i != j:
				sensitivity_distribution.append(sensitivity_matrix[i][j])
				utilization_distribution.append(utilization_matrix[i][j])
				assert(abs(1 - path_weight_sum[i][j]) <= 0.00001)
				direct_weights.append(routing_weights[(i,j)])
				max_entries.append(max_traffic_matrix[i][j])
	#fig = plt.figure()
	#plt.scatter(direct_weights, max_entries)
	return sorted(sensitivity_distribution)

def analyze_sensitivity(aurora_network, nblocks, topology, routing_weights, traffic_matrices):
	max_traffic_matrix = np.zeros((nblocks, nblocks))
	for tm in traffic_matrices:
		for i in range(nblocks):
			for j in range(nblocks):
				if i != j:
					max_traffic_matrix[i][j] = max(max_traffic_matrix[i][j], tm[i][j])
	sensitivity_distribution = []
	num_zero_routing_weights = 0
	for i in range(nblocks):
		for j in range(nblocks):
			if i != j:
				direct_weight = routing_weights[(i, j)]
				direct_sensitivity = 0
				if topology[i][j] > 0 and max_traffic_matrix[i][j] > 0:
					direct_sensitivity = direct_weight / topology[i][j] / max_traffic_matrix[i][j]
				sensitivity_distribution.append(( direct_sensitivity, (i, j) ))
				risk = direct_weight * max_traffic_matrix[i][j]
				for k in range(nblocks):
					if k != j and k != i:
						indirect_weight = routing_weights[(i,k,j)]
						path_capacity = min(topology[i][k], topology[k][j])
						indirect_sensitivity = 0
						if path_capacity > 0 and max_traffic_matrix[i][j] > 0:
							indirect_sensitivity = indirect_weight / path_capacity / max_traffic_matrix[i][j]
						sensitivity_distribution.append((indirect_sensitivity, (i, k, j)))
	sensitivity_distribution = sorted(sensitivity_distribution, key=lambda x: x[0])			
	for index in range(1, 11, 1):
		path_capacity = 0
		if len(sensitivity_distribution[-index][1]) == 2:
			i, j = sensitivity_distribution[-index][1]
			path_capacity = topology[i][j]
		elif len(sensitivity_distribution[-index][1]) == 3:
			i, k, j = sensitivity_distribution[-index][1]
			path_capacity = min(topology[i][k], topology[k][j])
		else:
			raise Exception("cannot have this length")
		print("entry : {} x_ij : {} value : {}".format(sensitivity_distribution[-index][1], path_capacity, sensitivity_distribution[-index][0]))
	return [x[0] for x in sensitivity_distribution]
