import sys, os, copy, math
from topology_engineer import *
sys.path.append("..")
#import aurora_network
from gurobipy import *
from robust_multi_traffic_topology_engineer_v3 import *

'''		
Design fractional topology based on 
traffic clusters using LP formulation

Solves ONE topology for all K traffic matrices without scaling them up first
Also this version solves for the MLU directly, rather than uses the traffic scale up factor
to solve for the ideal topology. Therefore, we can immediately solve for the routing weights 
as well
NOTE: This is the best version for tail MLU so far, when used in conjuction with CritMat to find
		the cluster HEADS that bounds all other TMs in said cluster.

The TE that should pair best with this ToE algorithm is the robust_multi_cluster_traffic_engineer.py
class, since it has the exact same implementation except the TE already knows what the logical topology is
'''
#combines multiple fractional topology into one
class RobustMultiTrafficTopologyEngineerSensitivity(RobustMultiTrafficTopologyEngineerImplementationV3):
	def __init__(self, aurora_network, reconfig_length, training_length, all_paths, all_traffic_snapshots, numK, minimize_multihop=True, lower_bound_sensitivity=0, upper_bound_sensitivity_relaxation=1):
		num_snapshots = len(all_traffic_snapshots)
		RobustMultiTrafficTopologyEngineerImplementationV3.__init__(self, aurora_network, reconfig_length, training_length, all_paths, all_traffic_snapshots, numK)
		self.upper_bound_sensitivity_relaxation = upper_bound_sensitivity_relaxation
		self.lower_bound_sensitivity = lower_bound_sensitivity
		self.minimize_multihop = minimize_multihop
		return

	def get_filename_param(self):
		return "rmtoesensitive_r{}t{}k{}".format(self.reconfig_length, self.training_length, self.numK)

	# returns a string description of this class
	def get_string(self):
		return "Robust multi traffic ToE with sensitivity tuning: {}".format(self.numK)

	# given an MLU, find the topology/routing weights such that the average hop count in the worst case
	# is minimized. This is an LP formulation that directly minimizes hop count, rather than previously done
	# which is implicit through the use of QP to minimize non-minimal path weights
	def _minimize_multihop_direct(self, traffic_matrices, beta_value, maximum_sensivity, minimum_sensitivity, max_tm, all_paths):
		## using QP to reduce multihop reliance
		nblocks = self.aurora_network.get_num_blocks()
		num_tm = len(traffic_matrices)
		model = Model("minimize multihop directly")
		model.setParam( 'OutputFlag', False )
		interpod_link_counts = [None] * nblocks
		routing_weights_var_hat = {}
		link_capacity_constraints = [None] * num_tm
		for tm_index in range(num_tm):
			link_capacity_constraints[tm_index] = [None] * nblocks
			for i in range(nblocks):
				link_capacity_constraints[tm_index][i] = [None] * nblocks
				for j in range(nblocks):
					if i != j:
						link_capacity_constraints[tm_index][i][j] = LinExpr()

		## setup the link constraints and all optization variables, 
		## which are the link counts and routing variables
		for i in range(nblocks):
			interpod_link_counts[i] = [None] * nblocks
			for j in range(nblocks):
				if (i != j):
					upper_bound = min(self.aurora_network.get_num_links(i), self.aurora_network.get_num_links(j))
					interpod_link_counts[i][j] = model.addVar(lb=0, ub=upper_bound, obj=0, vtype=GRB.CONTINUOUS, name="lc" + str(i) + ":" + str(j))
					routing_weight_sum = LinExpr()
					for path in all_paths[i][j]:
						routing_weights_var_hat[path] = model.addVar(lb=0, ub=GRB.INFINITY, obj=0., vtype=GRB.CONTINUOUS, name="w_{}".format(path))
						routing_weight_sum += routing_weights_var_hat[path]
					model.addConstr(lhs=routing_weight_sum, sense=GRB.EQUAL, rhs=beta_value)

		## add radix degree constraints
		for pod in range(nblocks):
			row_constraint = LinExpr()
			col_constraint = LinExpr()
			nlinks = self.aurora_network.get_num_links(pod)
			for target_pod in range(nblocks):
				if target_pod != pod:
					row_constraint.add(interpod_link_counts[pod][target_pod], mult=1.)
					col_constraint.add(interpod_link_counts[target_pod][pod], mult=1.)
			model.addConstr(lhs=row_constraint, sense=GRB.LESS_EQUAL, rhs=nlinks)
			model.addConstr(lhs=col_constraint, sense=GRB.LESS_EQUAL, rhs=nlinks)
		
		## add link capacity limit for all paths constraints
		for tm_index in range(num_tm):
			for i in range(nblocks):
				for j in range(nblocks):
					if (i != j):
						for path in all_paths[i][j]:
							path_len = len(path)
							curr_node = path[0]
							for next_node_index in range(1, path_len, 1):
								next_node = path[next_node_index]
								link_capacity_constraints[tm_index][curr_node][next_node] += (routing_weights_var_hat[path] * traffic_matrices[tm_index][i][j])
								curr_node = next_node
		for tm_index in range(num_tm):
			for i in range(nblocks):
				for j in range(nblocks):
					if (i != j):
						capacity = min(self.aurora_network.get_link_capacity(i), self.aurora_network.get_link_capacity(j))
						model.addConstr(lhs=link_capacity_constraints[tm_index][i][j], sense=GRB.LESS_EQUAL, rhs=interpod_link_counts[i][j] * capacity)

		## Finally, add in the sensitivity constraints
		# version 2 constraining
		path_ikj = {}
		binary_ikj = {}
		large_constant = 1000 * self.aurora_network.get_num_links(0)
		for i in range(nblocks):
			for j in range(nblocks):
				if i != j:
					model.addConstr(lhs=routing_weights_var_hat[(i,j)], sense=GRB.LESS_EQUAL, rhs=maximum_sensivity * interpod_link_counts[i][j] * max_tm[i][j] * beta_value)
					model.addConstr(lhs=minimum_sensitivity * interpod_link_counts[i][j] * max_tm[i][j] * beta_value , sense=GRB.LESS_EQUAL, rhs=routing_weights_var_hat[(i,j)])
					for k in range(nblocks):
						if k != i and k != j:
							path_ikj[(i,k,j)] = model.addVar(lb=0, ub=GRB.INFINITY, obj=0, vtype=GRB.CONTINUOUS, name="path_{}_{}_{}".format(i, k, j))
							binary_ikj[(i,k,j)] = model.addVar(lb=0, ub=1, obj=0, vtype=GRB.CONTINUOUS, name="binary_{}_{}_{}".format(i, k, j))
							# first impose the binary constraints
							model.addConstr(lhs=interpod_link_counts[k][j] - interpod_link_counts[i][k], sense=GRB.LESS_EQUAL, rhs=large_constant * binary_ikj[(i,k,j)])
							model.addConstr(lhs=interpod_link_counts[i][k] - interpod_link_counts[k][j], sense=GRB.LESS_EQUAL, rhs=large_constant * (1 - binary_ikj[(i,k,j)]))
							# next, impose the X = min(x1,x2) constraints for X
							model.addConstr(lhs=path_ikj[(i,k,j)], sense=GRB.LESS_EQUAL, rhs=interpod_link_counts[i][k])
							model.addConstr(lhs=path_ikj[(i,k,j)], sense=GRB.LESS_EQUAL, rhs=interpod_link_counts[k][j])
							model.addConstr(lhs=path_ikj[(i,k,j)], sense=GRB.LESS_EQUAL, rhs=interpod_link_counts[i][k] + large_constant * (1 - binary_ikj[(i,k,j)]))
							model.addConstr(lhs=path_ikj[(i,k,j)], sense=GRB.LESS_EQUAL, rhs=interpod_link_counts[k][j] - large_constant * binary_ikj[(i,k,j)])
							# finally add the path capacity variable into the sensitivity constraint
							model.addConstr(lhs=routing_weights_var_hat[(i,k,j)], sense=GRB.LESS_EQUAL, rhs=maximum_sensivity * path_ikj[(i,k,j)] * max_tm[i][j] * beta_value)
							model.addConstr(lhs=minimum_sensitivity * path_ikj[(i,k,j)] * max_tm[i][j] * beta_value, sense=GRB.LESS_EQUAL, rhs=routing_weights_var_hat[(i,k,j)])
		## add in the sensitivity constraints
		'''
		for i in range(nblocks):
			for j in range(nblocks):
				if i != j:
					model.addConstr(lhs=routing_weights_var_hat[(i,j)], sense=GRB.LESS_EQUAL, rhs=maximum_sensivity * interpod_link_counts[i][j] * max_tm[i][j] * beta_value)
					model.addConstr(lhs=minimum_sensitivity * interpod_link_counts[i][j] * max_tm[i][j] * beta_value , sense=GRB.LESS_EQUAL, rhs=routing_weights_var_hat[(i,j)])
					for k in range(nblocks):
						if k != i and k != j:
							path_capacity = model.addVar(lb=0, ub=GRB.INFINITY, obj=0, vtype=GRB.CONTINUOUS, name="z_{}_{}_{}".format(i, k, j))
							model.addConstr(lhs=path_capacity, sense=GRB.LESS_EQUAL, rhs=interpod_link_counts[i][k])
							model.addConstr(lhs=path_capacity, sense=GRB.LESS_EQUAL, rhs=interpod_link_counts[k][j])
							model.addConstr(lhs=routing_weights_var_hat[(i,k,j)], sense=GRB.LESS_EQUAL, rhs=maximum_sensivity * path_capacity * max_tm[i][j] * beta_value)
							model.addConstr(lhs=minimum_sensitivity * path_capacity * max_tm[i][j] * beta_value, sense=GRB.LESS_EQUAL, rhs=routing_weights_var_hat[(i,k,j)])
		'''

		## maximum average hop count constraint
		alpha = 1./beta_value
		min_direct_hop = model.addVar(lb=0, ub=GRB.INFINITY, obj=0, vtype=GRB.CONTINUOUS, name="min_direct_hop")
		for tm_index, tm in zip(range(num_tm), traffic_matrices):
			traffic_sum = sum([sum(x) for x in tm])
			hop_count = LinExpr()
			for i in range(nblocks):
				for j in range(nblocks):
					if i != j:
						hop_count += (routing_weights_var_hat[(i, j)] * (alpha * tm[i][j] / traffic_sum))
			model.addConstr(lhs=min_direct_hop, sense=GRB.LESS_EQUAL, rhs=hop_count)

		# set up the objective function
		model.setObjective(min_direct_hop, GRB.MAXIMIZE)
		try:
			# start optimizing 
			model.optimize()
			adj_matrix = np.zeros( (nblocks, nblocks,) )
			routing_weights = {}
			for i in range(nblocks):
				for j in range(nblocks):
					if (i != j):
						adj_matrix[i][j] = interpod_link_counts[i][j].x
			for i in range(nblocks):
				for j in range(nblocks):
					if i != j:
						for path in all_paths[i][j]:
							routing_weights[path] = routing_weights_var_hat[path].x
			return adj_matrix, routing_weights
		except GurobiError as e:
			print ("MinimizeMultihop: Error code " + str(e. errno ) + ": " + str(e))
		except AttributeError :
			print ("MinimizeMultihop: Encountered an attribute error ")
		return None

	## could maybe try to have multiple sets of routing weights, each corresponding to a specific TM
	def _minimize_mlu_for_all_TMs_transformed(self, traffic_matrices, all_inter_block_paths):
		nblocks = self.aurora_network.get_num_blocks()
		num_tm = len(traffic_matrices)
		model = Model("minimize mlu")
		model.setParam( 'OutputFlag', False )
		beta = model.addVar(lb=0, ub=GRB.INFINITY, obj=1., vtype=GRB.CONTINUOUS, name="beta")
		fractional_topology_var = [None] * nblocks
		routing_weights_var_hat = {}
		link_capacity_constraints = [None] * num_tm
		for m in range(num_tm):
			link_capacity_constraints[m] = [None] * nblocks
			for i in range(nblocks):
				link_capacity_constraints[m][i] = [None] * nblocks
				for j in range(nblocks):
					if i != j:
						link_capacity_constraints[m][i][j] = LinExpr()
		## setup the link constraints and all optization variables, 
		## which are the link counts and routing variables
		for i in range(nblocks):
			fractional_topology_var[i] = [None] * nblocks
			for j in range(nblocks):
				if i != j:
					upper_bound = min(self.aurora_network.get_num_links(i), self.aurora_network.get_num_links(j))
					fractional_topology_var[i][j] = model.addVar(lb=0, ub=upper_bound, obj=0, vtype=GRB.CONTINUOUS, name="lc" + str(i) + ":" + str(j))
					routing_weight_sum = LinExpr()
					for path in all_inter_block_paths[i][j]:
						var = model.addVar(lb=0, ub=GRB.INFINITY, obj=0, vtype=GRB.CONTINUOUS, name="w_{}".format(path))
						routing_weights_var_hat[path] = var
						routing_weight_sum += var
					model.addConstr(lhs=routing_weight_sum, sense=GRB.EQUAL, rhs=beta)

		## add radix degree constraints
		for pod in range(nblocks):
			row_constraint = LinExpr()
			col_constraint = LinExpr()
			for target_pod in range(nblocks):
				if target_pod != pod:
					row_constraint.add(fractional_topology_var[pod][target_pod], mult=1.)
					col_constraint.add(fractional_topology_var[target_pod][pod], mult=1.)
			model.addConstr(lhs=row_constraint, sense=GRB.LESS_EQUAL, rhs=float(self.aurora_network.get_num_links(pod)))
			model.addConstr(lhs=col_constraint, sense=GRB.LESS_EQUAL, rhs=float(self.aurora_network.get_num_links(pod)))

		## add achievability of flows for all i j pairs
		for tm_index in range(num_tm):
			for i in range(nblocks):
				for j in range(nblocks):
					if i != j:
						for path in all_inter_block_paths[i][j]:
							path_len = len(path)
							curr_node = path[0]
							for next_node_index in range(1, path_len, 1):
								next_node = path[next_node_index]
								link_capacity_constraints[tm_index][curr_node][next_node] += (routing_weights_var_hat[path] * traffic_matrices[tm_index][i][j])
								curr_node = next_node
		
		## add link capacity limit for all paths constraints
		for tm_index in range(num_tm):
			for i in range(nblocks):
				for j in range(nblocks):
					if (i != j):
						capacity = min(self.aurora_network.get_link_capacity(i), self.aurora_network.get_link_capacity(j))
						model.addConstr(lhs=link_capacity_constraints[tm_index][i][j], sense=GRB.LESS_EQUAL, rhs=fractional_topology_var[i][j] * capacity)

		# set up the objective function
		model.setObjective(beta, GRB.MAXIMIZE)
		# start optimizing
		try: 
			model.optimize()
			alpha = 1. / beta.x
			print("Worst case scale up : {}".format(alpha))
			
			routing_weights = {}
			for i in range(nblocks):
				for j in range(nblocks):
					if i != j:
						for path in all_inter_block_paths[i][j]:
							routing_weights[path] = routing_weights_var_hat[path].x * alpha
			adj_matrix = np.zeros((nblocks, nblocks))
			for i in range(nblocks):
				for j in range(nblocks):
					if i != j:
						adj_matrix[i][j] = fractional_topology_var[i][j].x
			return beta.x, adj_matrix, routing_weights # returns the max scale up factor
		except GurobiError as e:
			print ("_minimize_mlu_for_all_TMs_transformed: Error code " + str(e. errno ) + ": " + str(e))
			return None
		except AttributeError :
			print ("_minimize_mlu_for_all_TMs_transformed: Encountered an attribute error ")
			return None

	## given a specific worst_case_mlu to preserve, ensure that the worst case sensitivity is preserved
	## max_tm denotes the traffic matrix such that each element (i,j) is the maximum among all representative TMs
	def _try_maximum_sensitivity(self, representative_tms, all_inter_block_paths, beta_value, max_sensitivity, max_tm):
		nblocks = self.aurora_network.get_num_blocks()
		num_tm = len(representative_tms)
		model = Model("Minimize Max Sensitivity MLU preserved")
		model.setParam( 'OutputFlag', False )
		fractional_topology_var = [None] * nblocks
		routing_weights_var_hat = {}
		link_capacity_constraints = [None] * num_tm
		for m in range(num_tm):
			link_capacity_constraints[m] = [None] * nblocks
			for i in range(nblocks):
				link_capacity_constraints[m][i] = [None] * nblocks
				for j in range(nblocks):
					if i != j:
						link_capacity_constraints[m][i][j] = LinExpr()
		## setup the link constraints and all optization variables, 
		## which are the link counts and routing variables
		for i in range(nblocks):
			fractional_topology_var[i] = [None] * nblocks
			for j in range(nblocks):
				if i != j:
					upper_bound = min(self.aurora_network.get_num_links(i), self.aurora_network.get_num_links(j))
					fractional_topology_var[i][j] = model.addVar(lb=0, ub=upper_bound, obj=0, vtype=GRB.CONTINUOUS, name="lc" + str(i) + ":" + str(j))
					routing_weight_sum = LinExpr()
					for path in all_inter_block_paths[i][j]:
						var = model.addVar(lb=0, ub=GRB.INFINITY, obj=0, vtype=GRB.CONTINUOUS, name="w_{}".format(path))
						routing_weights_var_hat[path] = var
						routing_weight_sum += var
					model.addConstr(lhs=routing_weight_sum, sense=GRB.EQUAL, rhs=beta_value)

		## Add the ingress/egress radix degree constraints
		for pod in range(nblocks):
			row_constraint = LinExpr()
			col_constraint = LinExpr()
			for target_pod in range(nblocks):
				if target_pod != pod:
					row_constraint.add(fractional_topology_var[pod][target_pod], mult=1.)
					col_constraint.add(fractional_topology_var[target_pod][pod], mult=1.)
			model.addConstr(lhs=row_constraint, sense=GRB.LESS_EQUAL, rhs=float(self.aurora_network.get_num_links(pod)))
			model.addConstr(lhs=col_constraint, sense=GRB.LESS_EQUAL, rhs=float(self.aurora_network.get_num_links(pod)))

		## add link utilization constraints of flows for all i j pairs
		for tm_index in range(num_tm):
			for i in range(nblocks):
				for j in range(nblocks):
					if i != j:
						for path in all_inter_block_paths[i][j]:
							path_len = len(path)
							curr_node = path[0]
							for next_node_index in range(1, path_len, 1):
								next_node = path[next_node_index]
								link_capacity_constraints[tm_index][curr_node][next_node] += (routing_weights_var_hat[path] * representative_tms[tm_index][i][j])
								curr_node = next_node
		for tm_index in range(num_tm):
			for i in range(nblocks):
				for j in range(nblocks):
					if (i != j):
						capacity = min(self.aurora_network.get_link_capacity(i), self.aurora_network.get_link_capacity(j))
						model.addConstr(lhs=link_capacity_constraints[tm_index][i][j], sense=GRB.LESS_EQUAL, rhs=fractional_topology_var[i][j] * capacity)

		## Finally, add in the sensitivity constraints
		# version 2 constraining
		path_ikj = {}
		binary_ikj = {}
		large_constant = 1000 * self.aurora_network.get_num_links(0)
		for i in range(nblocks):
			for j in range(nblocks):
				if i != j:
					model.addConstr(lhs=routing_weights_var_hat[(i,j)], sense=GRB.LESS_EQUAL, rhs=max_sensitivity * fractional_topology_var[i][j] * max_tm[i][j] * beta_value)
					for k in range(nblocks):
						if k != i and k != j:
							path_ikj[(i,k,j)] = model.addVar(lb=0, ub=GRB.INFINITY, obj=0, vtype=GRB.CONTINUOUS, name="path_{}_{}_{}".format(i, k, j))
							binary_ikj[(i,k,j)] = model.addVar(lb=0, ub=1, obj=0, vtype=GRB.CONTINUOUS, name="binary_{}_{}_{}".format(i, k, j))
							# first impose the binary constraints
							model.addConstr(lhs=fractional_topology_var[k][j] - fractional_topology_var[i][k], sense=GRB.LESS_EQUAL, rhs=large_constant * binary_ikj[(i,k,j)])
							model.addConstr(lhs=fractional_topology_var[i][k] - fractional_topology_var[k][j], sense=GRB.LESS_EQUAL, rhs=large_constant * (1 - binary_ikj[(i,k,j)]))
							# next, impose the X = min(x1,x2) constraints for X
							model.addConstr(lhs=path_ikj[(i,k,j)], sense=GRB.LESS_EQUAL, rhs=fractional_topology_var[i][k])
							model.addConstr(lhs=path_ikj[(i,k,j)], sense=GRB.LESS_EQUAL, rhs=fractional_topology_var[k][j])
							model.addConstr(lhs=path_ikj[(i,k,j)], sense=GRB.LESS_EQUAL, rhs=fractional_topology_var[i][k] + large_constant * (1 - binary_ikj[(i,k,j)]))
							model.addConstr(lhs=path_ikj[(i,k,j)], sense=GRB.LESS_EQUAL, rhs=fractional_topology_var[k][j] - large_constant * binary_ikj[(i,k,j)])
							# finally add the path capacity variable into the sensitivity constraint
							model.addConstr(lhs=routing_weights_var_hat[(i,k,j)], sense=GRB.LESS_EQUAL, rhs=max_sensitivity * path_ikj[(i,k,j)] * max_tm[i][j] * beta_value)
		
		# version 1 constraining
		'''
		z_ikj = {}
		for i in range(nblocks):
			for j in range(nblocks):
				if i != j:
					model.addConstr(lhs=routing_weights_var_hat[(i,j)], sense=GRB.LESS_EQUAL, rhs=max_sensitivity * fractional_topology_var[i][j] * max_tm[i][j] * beta_value)
					for k in range(nblocks):
						if k != i and k != j:
							path_capacity = LinExpr()
							# add the constraints for path_capacity
							z_ikj[(i, k, j, 1)] = model.addVar(lb=0, ub=GRB.INFINITY, obj=0, vtype=GRB.CONTINUOUS, name="z_{}_{}_{}_1".format(i, k, j))
							z_ikj[(i, k, j, 2)] = model.addVar(lb=0, ub=GRB.INFINITY, obj=0, vtype=GRB.CONTINUOUS, name="z_{}_{}_{}_2".format(i, k, j))
							path_capacity = 0.5 * (fractional_topology_var[i][k] + fractional_topology_var[k][j] - z_ikj[(i, k, j, 1)] - z_ikj[(i, k, j, 2)])
							model.addConstr(lhs=fractional_topology_var[i][k] - fractional_topology_var[k][j], sense=GRB.LESS_EQUAL, rhs=z_ikj[(i, k, j, 1)] - z_ikj[(i, k, j, 2)])
							# finally add the path capacity variable into the sensitivity constraint
							model.addConstr(lhs=routing_weights_var_hat[(i,k,j)], sense=GRB.LESS_EQUAL, rhs=max_sensitivity * max_tm[i][j] * beta_value *  path_capacity)
		'''
		# set up the objective function
		model.setObjective(0, GRB.MAXIMIZE)
		# start optimizing
		try: 
			model.optimize()
			routing_weights = {}
			mlu = 1./beta_value
			for i in range(nblocks):
				for j in range(nblocks):
					if i != j:
						for path in all_inter_block_paths[i][j]:
							routing_weights[path] = routing_weights_var_hat[path].x * mlu
			adj_matrix = np.zeros((nblocks, nblocks))
			for i in range(nblocks):
				for j in range(nblocks):
					if i != j:
						adj_matrix[i][j] = fractional_topology_var[i][j].x

			return adj_matrix, routing_weights # returns the max scale up factor
		except GurobiError as e:
			print ("Try Maximum Sensitivity: Error code " + str(e. errno ) + ": " + str(e))
			return None
		except AttributeError :
			print ("Try Maximum Sensitivity: Encountered an attribute error ")
			return None

	## given a specific worst_case_mlu to preserve, ensure that the worst case sensitivity is preserved
	## max_tm denotes the traffic matrix such that each element (i,j) is the maximum among all representative TMs
	def _try_minimum_sensitivity(self, representative_tms, all_inter_block_paths, beta_value, max_sensitivity, min_sensitivity, max_tm):
		nblocks = self.aurora_network.get_num_blocks()
		num_tm = len(representative_tms)
		model = Model("Maximize Min Sensitivity MLU preserved")
		model.setParam( 'OutputFlag', False )
		fractional_topology_var = [None] * nblocks
		routing_weights_var_hat = {}
		link_capacity_constraints = [None] * num_tm

		for m in range(num_tm):
			link_capacity_constraints[m] = [None] * nblocks
			for i in range(nblocks):
				link_capacity_constraints[m][i] = [None] * nblocks
				for j in range(nblocks):
					if i != j:
						link_capacity_constraints[m][i][j] = LinExpr()
		## setup the link constraints and all optization variables, 
		## which are the link counts and routing variables
		for i in range(nblocks):
			fractional_topology_var[i] = [None] * nblocks
			for j in range(nblocks):
				if i != j:
					upper_bound = min(self.aurora_network.get_num_links(i), self.aurora_network.get_num_links(j))
					fractional_topology_var[i][j] = model.addVar(lb=0, ub=upper_bound, obj=0, vtype=GRB.CONTINUOUS, name="lc" + str(i) + ":" + str(j))
					routing_weight_sum = LinExpr()
					for path in all_inter_block_paths[i][j]:
						var = model.addVar(lb=0, ub=GRB.INFINITY, obj=0, vtype=GRB.CONTINUOUS, name="w_{}".format(path))
						routing_weights_var_hat[path] = var
						routing_weight_sum += var
					model.addConstr(lhs=routing_weight_sum, sense=GRB.EQUAL, rhs=beta_value)

		## Add the ingress/egress radix degree constraints
		for pod in range(nblocks):
			row_constraint = LinExpr()
			col_constraint = LinExpr()
			for target_pod in range(nblocks):
				if target_pod != pod:
					row_constraint.add(fractional_topology_var[pod][target_pod], mult=1.)
					col_constraint.add(fractional_topology_var[target_pod][pod], mult=1.)
			model.addConstr(lhs=row_constraint, sense=GRB.LESS_EQUAL, rhs=float(self.aurora_network.get_num_links(pod)))
			model.addConstr(lhs=col_constraint, sense=GRB.LESS_EQUAL, rhs=float(self.aurora_network.get_num_links(pod)))

		## add link utilization constraints of flows for all i j pairs
		for tm_index in range(num_tm):
			for i in range(nblocks):
				for j in range(nblocks):
					if i != j:
						for path in all_inter_block_paths[i][j]:
							path_len = len(path)
							curr_node = path[0]
							for next_node_index in range(1, path_len, 1):
								next_node = path[next_node_index]
								link_capacity_constraints[tm_index][curr_node][next_node] += (routing_weights_var_hat[path] * representative_tms[tm_index][i][j])
								curr_node = next_node
		for tm_index in range(num_tm):
			for i in range(nblocks):
				for j in range(nblocks):
					if (i != j):
						capacity = min(self.aurora_network.get_link_capacity(i), self.aurora_network.get_link_capacity(j))
						model.addConstr(lhs=link_capacity_constraints[tm_index][i][j], sense=GRB.LESS_EQUAL, rhs=fractional_topology_var[i][j] * capacity)

		## Finally, add in the sensitivity constraints
		# version 2 constraining
		path_ikj = {}
		binary_ikj = {}
		large_constant = 1000 * self.aurora_network.get_num_links(0)
		for i in range(nblocks):
			for j in range(nblocks):
				if i != j:
					model.addConstr(lhs=routing_weights_var_hat[(i,j)], sense=GRB.LESS_EQUAL, rhs=max_sensitivity * fractional_topology_var[i][j] * max_tm[i][j] * beta_value)
					model.addConstr(lhs=min_sensitivity * fractional_topology_var[i][j] * max_tm[i][j] * beta_value , sense=GRB.LESS_EQUAL, rhs=routing_weights_var_hat[(i,j)])
					for k in range(nblocks):
						if k != i and k != j:
							path_ikj[(i,k,j)] = model.addVar(lb=0, ub=GRB.INFINITY, obj=0, vtype=GRB.CONTINUOUS, name="path_{}_{}_{}".format(i, k, j))
							binary_ikj[(i,k,j)] = model.addVar(lb=0, ub=1, obj=0, vtype=GRB.CONTINUOUS, name="binary_{}_{}_{}".format(i, k, j))
							# first impose the binary constraints
							model.addConstr(lhs=fractional_topology_var[k][j] - fractional_topology_var[i][k], sense=GRB.LESS_EQUAL, rhs=large_constant * binary_ikj[(i,k,j)])
							model.addConstr(lhs=fractional_topology_var[i][k] - fractional_topology_var[k][j], sense=GRB.LESS_EQUAL, rhs=large_constant * (1 - binary_ikj[(i,k,j)]))
							# next, impose the X = min(x1,x2) constraints for X
							model.addConstr(lhs=path_ikj[(i,k,j)], sense=GRB.LESS_EQUAL, rhs=fractional_topology_var[i][k])
							model.addConstr(lhs=path_ikj[(i,k,j)], sense=GRB.LESS_EQUAL, rhs=fractional_topology_var[k][j])
							model.addConstr(lhs=path_ikj[(i,k,j)], sense=GRB.LESS_EQUAL, rhs=fractional_topology_var[i][k] + large_constant * (1 - binary_ikj[(i,k,j)]))
							model.addConstr(lhs=path_ikj[(i,k,j)], sense=GRB.LESS_EQUAL, rhs=fractional_topology_var[k][j] - large_constant * binary_ikj[(i,k,j)])
							# finally add the path capacity variable into the sensitivity constraint
							model.addConstr(lhs=routing_weights_var_hat[(i,k,j)], sense=GRB.LESS_EQUAL, rhs=max_sensitivity * path_ikj[(i,k,j)] * max_tm[i][j] * beta_value)
							model.addConstr(lhs=min_sensitivity * path_ikj[(i,k,j)] * max_tm[i][j] * beta_value, sense=GRB.LESS_EQUAL, rhs=routing_weights_var_hat[(i,k,j)])

		'''
		# version 1 constraining
		z_ikj = {}
		for i in range(nblocks):
			for j in range(nblocks):
				if i != j:
					model.addConstr(lhs=routing_weights_var_hat[(i,j)], sense=GRB.LESS_EQUAL, rhs=max_sensitivity * fractional_topology_var[i][j] * max_tm[i][j] * beta_value)
					model.addConstr(lhs=min_sensitivity * fractional_topology_var[i][j] * max_tm[i][j] * beta_value , sense=GRB.LESS_EQUAL, rhs=routing_weights_var_hat[(i,j)])
					for k in range(nblocks):
						if k != i and k != j:
							path_capacity = LinExpr()
							# add the constraints for path_capacity
							z_ikj[(i, k, j, 1)] = model.addVar(lb=0, ub=GRB.INFINITY, obj=0, vtype=GRB.CONTINUOUS, name="z_{}_{}_{}_1".format(i, k, j))
							z_ikj[(i, k, j, 2)] = model.addVar(lb=0, ub=GRB.INFINITY, obj=0, vtype=GRB.CONTINUOUS, name="z_{}_{}_{}_2".format(i, k, j))
							path_capacity = 0.5 * (fractional_topology_var[i][k] + fractional_topology_var[k][j] - z_ikj[(i, k, j, 1)] - z_ikj[(i, k, j, 2)])
							model.addConstr(lhs=fractional_topology_var[i][k] - fractional_topology_var[k][j], sense=GRB.LESS_EQUAL, rhs=z_ikj[(i, k, j, 1)] - z_ikj[(i, k, j, 2)])
							# finally add the path capacity variable into the sensitivity constraint
							model.addConstr(lhs=routing_weights_var_hat[(i,k,j)], sense=GRB.LESS_EQUAL, rhs=max_sensitivity * path_capacity * max_tm[i][j] * beta_value)
							model.addConstr(lhs=min_sensitivity * path_capacity * max_tm[i][j] * beta_value, sense=GRB.LESS_EQUAL, rhs=routing_weights_var_hat[(i,k,j)])
		'''
		# set up the objective function
		model.setObjective(0, GRB.MAXIMIZE)
		# start optimizing
		try: 
			model.optimize()
			mlu = 1./beta_value
			routing_weights = {}
			for i in range(nblocks):
				for j in range(nblocks):
					if i != j:
						for path in all_inter_block_paths[i][j]:
							routing_weights[path] = routing_weights_var_hat[path].x * mlu
			adj_matrix = np.zeros((nblocks, nblocks))
			for i in range(nblocks):
				for j in range(nblocks):
					if i != j:
						adj_matrix[i][j] = fractional_topology_var[i][j].x
			## check validity of the sensitivity solution for indirect paths
			'''
			for i in range(nblocks):
				for j in range(nblocks):
					if i != j:
						for k in range(nblocks):
							if k != i and k != j:
								close_to_zero_1 = (z_ikj[(i,k,j, 1)].x <= 0.0001)
								close_to_zero_2 = (z_ikj[(i,k,j, 2)].x <= 0.0001)
								if not close_to_zero_2 and not close_to_zero_1:
									print("pair: {}, zikj1 : {}, zikj2 : {}".format((i, k, j), z_ikj[(i,k,j, 1)].x, z_ikj[(i,k,j, 2)].x))
								assert(close_to_zero_2 or close_to_zero_1)
			'''
			return adj_matrix, routing_weights # returns the max scale up factor
		except GurobiError as e:
			print ("Try Minimum Sensitivity: Error code " + str(e. errno ) + ": " + str(e))
			return None
		except AttributeError :
			print ("Try Minimum Sensitivity: Encountered an attribute error ")
			return None

	# conducts topology engineering given the representative traffic matrices
	# aurora network - Aurora Network, contains all the parameters
	# training_traffic_snapshots - all the snapshots used for training the topology
	# number of k - which is the number of traffic clusters we use to solve engineer topology
	def topology_engineer_given_representative_TMs(self, representative_tms, all_paths, tolerance=0.01):
		# Initialize by getting the number of blocks, and initializing the adjacency matrix of the logical topology
		nblocks = self.aurora_network.get_num_blocks()
		adj_matrix = np.zeros((nblocks, nblocks))
		#assert(self.numK == len(representative_tms))
		# Then find the max_tm for each i, j entry
		max_tm = np.zeros((nblocks, nblocks))
		for tm in representative_tms:
			for i in range(nblocks):
				for j in range(nblocks):
					if i != j:
						max_tm[i][j] = max(tm[i][j], max_tm[i][j])
		## Step 1 : We want to figure out the best case MLU / scale up when there are no restrictions on the 
		##          ranges of sensitivity
		beta_value, adj_matrix_backup, routing_weights = self._minimize_mlu_for_all_TMs_transformed(representative_tms, all_paths)
		# note that beta is simply the inverse of mlu, i.e. 1/beta = mlu

		## Now we compute the sensitivity of routing weights returned by the previous function
		sensitivity_distribution = []
		for path in routing_weights.keys():
			src = path[0]
			dst = path[-1]
			tij = max_tm[src][dst]
			path_capacity = adj_matrix_backup[src][dst]
			if len(path) == 3:
				intermediate = path[1]
				path_capacity = min(adj_matrix_backup[src][intermediate], adj_matrix_backup[intermediate][dst])
			sensitivity = 0
			if path_capacity > 0 and max_tm[dst][src] > 0:
				sensitivity = routing_weights[path] / path_capacity / max_tm[src][dst]
			sensitivity_distribution.append(sensitivity)
		sensitivity_distribution = sorted(sensitivity_distribution)
		max_sensitivity_ub = sensitivity_distribution[-1]
		max_sensitivity_lb = 0
		upper_sensitivity_error_bound =  tolerance * max_sensitivity_ub
		soln = None
		print("max sensitivity upper bound starts with : {}".format(max_sensitivity_ub))
		while True:
			attempted_max_sensitivity = (max_sensitivity_ub + max_sensitivity_lb) / 2.
			# print("Max sensitivity current value : {}".format(attempted_max_sensitivity))
			soln = self._try_maximum_sensitivity(representative_tms, all_paths, beta_value, attempted_max_sensitivity, max_tm)

			if max_sensitivity_ub - max_sensitivity_lb <= upper_sensitivity_error_bound:
				if soln is None:
					soln = self._try_maximum_sensitivity(representative_tms, all_paths, beta_value, max_sensitivity_ub, max_tm)
					assert(soln is not None)
					max_sensitivity = max_sensitivity_ub
				else:
					max_sensitivity = attempted_max_sensitivity
				break
			else:
				if soln is None:
					max_sensitivity_lb = attempted_max_sensitivity
				else:
					max_sensitivity_ub = attempted_max_sensitivity 

		print("The final max sensitivity is : {}".format(max_sensitivity))
		## Having computed the maximum sensitivity, now we want to find the whether the minimum sensivity is achievable
		min_sensitivity_ub = self.lower_bound_sensitivity
		min_sensitivity_lb = 0
		min_sensitivity = min_sensitivity_ub
		min_sensitivity_error_bound = tolerance * min_sensitivity
		soln = self._try_minimum_sensitivity(representative_tms, all_paths, beta_value, self.upper_bound_sensitivity_relaxation * max_sensitivity, min_sensitivity, max_tm)
		if soln is None:
			## graceful relaxation through binary search
			while True:
				attempted_min_sensitivity = (min_sensitivity_ub + min_sensitivity_lb) / 2.
				# print("Current Min Sensitivity {} ".format(attempted_min_sensitivity))
				soln = self._try_minimum_sensitivity(representative_tms, all_paths, beta_value, self.upper_bound_sensitivity_relaxation * max_sensitivity, attempted_min_sensitivity, max_tm)
				if min_sensitivity_ub - min_sensitivity_lb <= min_sensitivity_error_bound:
					if soln is None:
						soln = self._try_minimum_sensitivity(representative_tms, all_paths, beta_value, self.upper_bound_sensitivity_relaxation * max_sensitivity, min_sensitivity_lb, max_tm)
						assert(soln is not None)
						min_sensitivity = min_sensitivity_lb
					else:
						min_sensitivity = attempted_min_sensitivity
					break
				else:
					if soln is None:
						min_sensitivity_ub = attempted_min_sensitivity
					else:
						min_sensitivity_lb = attempted_min_sensitivity
		assert(soln is not None)
		print("Final min sensitivity is : {}".format(min_sensitivity))
		
		adj_matrix, _ = soln
		## Step 2 : Once the best case MLU/scale_up is found, we want to start introducing constraints to the sensitivity, while preserving 
		## 			the target sensivity constraints for as much as we can.
		if self.minimize_multihop:
			adj_matrix, routing_weights = self._minimize_multihop_direct(representative_tms, beta_value, self.upper_bound_sensitivity_relaxation * max_sensitivity, min_sensitivity, max_tm, all_paths)
			#adj_matrix_backup, routing_weights = self._minimize_multihop_direct(representative_tms, 1./beta_value, all_paths)
		adj_matrix = self.progressive_filling(adj_matrix)
		return adj_matrix, routing_weights