'''
Contains all the main functions of ToE
'''
import sys, os, copy, math
from gurobipy import *
import numpy as np
import aurora_network
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
from enum import Enum
#matplotlib.use('Agg')

class TopologyEngineeringClass(Enum):
	STATIC_UNIFORM = 1
	ROBUST_TOE = 2
	IDEAL = 3
	AVE_TOE = 4
	MAX_TOE = 5

# by default it is a static uniform mesh controller
class TopologyEngineer(object):
	# takes in a num_snapshots argument so that it will know what is the logical topology 
	# based on the snapshot used
	def __init__(self, aurora_network, num_snapshots):
		self.aurora_network = aurora_network
		self.training_length = 0
		self.reconfig_length = num_snapshots
		self.num_snapshots = num_snapshots
		self.cached_logical_topologies = {} # caching logical topologies based on time intervals
		self.toe_class = TopologyEngineeringClass.STATIC_UNIFORM
		self.cached_logical_topologies[(0, num_snapshots - 1)] = self._generate_uniform_logical_topology()
		return

	def get_filename_param(self):
		return "static"

	def topology_engineering_class(self):
		return self.toe_class

	def is_static(self):
		return True
	
	# returns a string description of this class
	def get_string(self):
		return "Static topology (i.e. no ToE)"

	def get_topology_intervals(self):
		return sorted(self.cached_logical_topologies.keys(), key=lambda start_time: start_time[0])

	def get_training_length(self):
		return self.training_length

	def get_reconfig_length(self):
		return self.reconfig_length

	def _generate_uniform_logical_topology(self):
		nblocks = self.aurora_network.get_num_blocks()
		adj_matrix = np.zeros( (nblocks, nblocks,) )
		for i in range(nblocks):
			src_num_links = self.aurora_network.get_num_links(i)
			for j in range(nblocks):
				if i != j:
					dst_num_links = self.aurora_network.get_num_links(j)
					adj_matrix[i][j] = min(float(src_num_links)/(nblocks - 1), float(dst_num_links)/(nblocks - 1))
		return adj_matrix
		
	# get the logical topology at time index t. Logical topology is represented
	# as an adjacency matrix (i.e. 2D numpy array)
	def get_logical_topology_at_time(self, time_index):
		adj_matrix = None
		assert(time_index < self.num_snapshots)
		for (start_time, end_time) in self.cached_logical_topologies.keys():
			if time_index >= start_time and time_index <= end_time:
				adj_matrix = self.cached_logical_topologies[(start_time, end_time)]
				break
		return adj_matrix

	# returns an array of starting times for each topology
	def get_topology_starting_times(self):
		starting_timeslots = []
		for (start_time, _) in self.cached_logical_topologies.keys():
			starting_timeslots.append(start_time)
		return sorted(starting_timeslots)

'''		
Design fractional topology based on 
traffic clusters using LP formulation
'''
#combines multiple fractional topology into one
class RobustMultiTrafficTopologyEngineer(TopologyEngineer):
	def __init__(self, aurora_network, training_length, reconfig_length, all_paths, all_traffic_snapshots, numK, ignore_flows=None):
		num_snapshots = len(all_traffic_snapshots)
		TopologyEngineer.__init__(self, aurora_network, num_snapshots)
		self.flow_pairs_to_ignore = []
		if ignore_flows is not None:
			self.flow_pairs_to_ignore = ignore_flows
		self.toe_class = TopologyEngineeringClass.ROBUST_TOE
		self.training_length = training_length
		self.reconfig_length = reconfig_length
		self.numK = numK
		self.num_snapshots = len(all_traffic_snapshots)
		self.all_traffic_snapshots = all_traffic_snapshots
		self.all_paths = all_paths
		## NOTE: have to rezero the cached logical topologies here because when we initialize the parent class,
		## 		it will assume that the topology is static (see TopologyEngineer class above).
		self.cached_logical_topologies = {}
		return

		# must be called before running other code 
	def cache_logical_topologies(self, round_to_integer=False):
		self.cached_logical_topologies[(0, self.training_length - 1)] = self._generate_uniform_logical_topology()
		## Start computing the logical topologies
		current_time = self.training_length
		print("Caching logical topologies for robust ToE.....")
		while current_time < self.num_snapshots:
			ending_time = min(current_time + self.reconfig_length - 1, self.num_snapshots - 1)
			training_snapshots = self.all_traffic_snapshots[current_time - self.training_length : current_time]
			adj_matrix = self._topology_engineer(training_snapshots, self.all_paths)
			if round_to_integer:
				self.cached_logical_topologies[(current_time, ending_time)] = self.aurora_network.round_fractional_topology_giant_switch(adj_matrix, self.flow_pairs_to_ignore)
			else:
				self.cached_logical_topologies[(current_time, ending_time)] = adj_matrix
			current_time += self.reconfig_length
		print("Caching logical topologies: COMPLETE \n")
		print("first logical topology: \n{}\n".format(self.cached_logical_topologies[(self.training_length, self.training_length + self.reconfig_length - 1)]))
		self.aurora_network.print_stats(self.cached_logical_topologies[(self.training_length, self.training_length + self.reconfig_length - 1)])
		return

	def get_filename_param(self):
		return "robusttoe_r{}t{}c{}".format(self.reconfig_length, self.training_length, self.numK)

	# returns a string description of this class
	def get_string(self):
		return "Robust multi traffic ToE with K clusters : {}".format(self.numK)

	def is_static(self):
		return False

	# internal method
	# Should not be called by user, it is only called as a subroutine by topology_engineer
	def _combine_fractional_topologies(self, list_of_topologies):
		nblocks = self.aurora_network.get_num_blocks()
		model = Model("combine fractional topologies")
		model.setParam( 'OutputFlag', False )
		alpha = model.addVar(lb=0., ub=GRB.INFINITY, obj=1., vtype=GRB.CONTINUOUS ,name="alpha")
		adj_matrix = np.zeros((nblocks, nblocks))
		combined_fractional_topology = {} # stores the fractional link count optimization variables
		for i in range(nblocks):
			for j in range(nblocks):
				if (i != j):
					combined_fractional_topology[(i,j,)] = model.addVar(lb=0., ub=GRB.INFINITY, obj=1., vtype=GRB.CONTINUOUS ,name="alpha")
					for topol_id in range(len(list_of_topologies)):
						model.addConstr(lhs=combined_fractional_topology[(i,j,)], sense=GRB.GREATER_EQUAL, rhs=alpha * float(list_of_topologies[topol_id][i][j]))
		## add pod radix constraints
		for block in range(nblocks):
			row_constraint = LinExpr()
			col_constraint = LinExpr()
			nlinks = self.aurora_network.get_num_links(block)
			for target_block in range(nblocks):
				if target_block != block:
					row_constraint.add(combined_fractional_topology[(block, target_block,)], mult=1.)
					col_constraint.add(combined_fractional_topology[(target_block, block,)], mult=1.)
			model.addConstr(lhs=row_constraint, sense=GRB.LESS_EQUAL, rhs=nlinks)
			model.addConstr(lhs=col_constraint, sense=GRB.LESS_EQUAL, rhs=nlinks)
		model.setObjective(alpha, GRB.MAXIMIZE)
		try: 
			model.optimize()
			for i in range(nblocks):
				for j in range(nblocks):
					if (i != j):
						adj_matrix[i][j] = combined_fractional_topology[(i, j)].x
			return adj_matrix
		except GurobiError as e:
			print ("CombineFractionalTopologies: Error code " + str(e. errno ) + ": " + str(e))
			return None
		except AttributeError :
			print ("CombineFractionalTopologies: Encountered an attribute error ")
			return None

	# given a scale up factor, figure out the routing that minimizes two-hop paths if they exists
	## Todo(jason) : fix this and debug
	def _minimize_multihop(self, traffic_matrix, scaling_factor, all_paths):
		## using QP to reduce multihop reliance
		nblocks = self.aurora_network.get_num_blocks()
		model = Model("minimize multihop")
		model.setParam( 'OutputFlag', False )
		interpod_link_counts = [None] * nblocks
		routing_weights = {}
		link_capacity_constraints = [None] * nblocks
		## setup the link constraints and all optization variables, 
		## which are the link counts and routing variables
		objective_function = QuadExpr()
		for i in range(nblocks):
			interpod_link_counts[i] = [None] * nblocks
			link_capacity_constraints[i] = [None] * nblocks
			for j in range(nblocks):
				if (i != j):
					link_capacity_constraints[i][j] = LinExpr()
					upper_bound = min(self.aurora_network.get_num_links(i), self.aurora_network.get_num_links(j))
					interpod_link_counts[i][j] = model.addVar(lb=0, ub=upper_bound, obj=0, vtype=GRB.CONTINUOUS, name="lc" + str(i) + "_" + str(j))
					weight_index = 0
					for path in all_paths[i][j]:
						var = model.addVar(lb=0, ub=GRB.INFINITY, obj=0., vtype=GRB.CONTINUOUS, name="w_{}".format(path))
						routing_weights[path] = var
						# if multihop path, then add to objective function
						if len(path) > 2: 
							objective_function.add(var * var, mult=1.)

		## add radix degree constraints
		for pod in range(nblocks):
			row_constraint = LinExpr()
			col_constraint = LinExpr()
			nlinks = self.aurora_network.get_num_links(pod)
			for target_pod in range(nblocks):
				if target_pod != pod:
					row_constraint.add(interpod_link_counts[pod][target_pod], mult=1.)
					col_constraint.add(interpod_link_counts[target_pod][pod], mult=1.)
			model.addConstr(lhs=row_constraint, sense=GRB.LESS_EQUAL, rhs=nlinks)
			model.addConstr(lhs=col_constraint, sense=GRB.LESS_EQUAL, rhs=nlinks)

		## add achievability of flows for all i j pairs
		for i in range(nblocks):
			for j in range(nblocks):
				if (i != j):
					flow_achievability_constr = LinExpr()
					for path in all_paths[i][j]:
						flow_achievability_constr.add(routing_weights[path], mult=1.)
						path_len = len(path)
						curr_node = path[0]
						for next_node_index in range(1, path_len, 1):
							next_node = path[next_node_index]
							link_capacity_constraints[curr_node][next_node].add(routing_weights[path], mult=1.)
							curr_node = next_node
					model.addConstr(lhs=scaling_factor * traffic_matrix[i][j], sense=GRB.LESS_EQUAL, rhs=flow_achievability_constr)
		
		## add link capacity limit for all paths constraints
		for i in range(nblocks):
			for j in range(nblocks):
				if (i != j):
					capacity = min(self.aurora_network.get_link_capacity(i), self.aurora_network.get_link_capacity(i))
					model.addConstr(lhs=link_capacity_constraints[i][j], sense=GRB.LESS_EQUAL, rhs=interpod_link_counts[i][j] * capacity)
		# set up the objective function
		model.setObjective(objective_function, GRB.MINIMIZE)
		# start optimizing
		adj_matrix = np.zeros( (nblocks, nblocks,) )
		try: 
			model.optimize()
			status = model.status
			#print("Status : {}".format(status))
			for i in range(nblocks):
				for j in range(nblocks):
					if (i != j):
						adj_matrix[i][j] = interpod_link_counts[i][j].x
			#print adj_matrix
			return adj_matrix
		except GurobiError as e:
			print ("MinimizeMultihop: Error code " + str(e. errno ) + ": " + str(e))
		except AttributeError as e1:
			print ("MinimizeMultihop: Encountered an attribute error : " + str(e1))


	## Todo(jason) : fix this and debug CRITICAL
	def _reinforce_fractional_topology(self, adj_matrix):
		model = Model("Reinforce FractionalTopology")
		model.setParam( 'OutputFlag', False )
		nblocks = self.aurora_network.get_num_blocks()
		leftover_incoming = [0.] * nblocks
		leftover_outgoing = [0.] * nblocks
		reinforced_links = [None] * nblocks
		objective_function = QuadExpr()
		for i in range(nblocks):
			reinforced_links[i] = [None] * nblocks
			row_sum = 0.
			col_sum = 0.
			for j in range(nblocks):
				if i != j:
					upper_bound = min(self.aurora_network.get_num_links(i), self.aurora_network.get_num_links(j))
					reinforced_links[i][j] = model.addVar(lb=0., ub=upper_bound, obj=0., vtype=GRB.CONTINUOUS, name="rlc" + str(i) + "_" + str(j))
					diff = LinExpr()
					diff = upper_bound - reinforced_links[i][j]
					objective_function.add(diff * diff, mult=1.)
					row_sum += adj_matrix[i][j]
					col_sum += adj_matrix[j][i]
			leftover_outgoing[i] = self.aurora_network.get_num_links(i) - row_sum
			leftover_incoming[i] = self.aurora_network.get_num_links(i) - col_sum

		for i in range(nblocks):
			row_constraint = LinExpr()
			col_constraint = LinExpr()
			for j in range(nblocks):
				if i != j :
					row_constraint.add(reinforced_links[i][j], mult=1.)
					col_constraint.add(reinforced_links[j][i], mult=1.)
			model.addConstr(lhs=row_constraint, sense=GRB.LESS_EQUAL, rhs=leftover_outgoing[i])
			model.addConstr(lhs=col_constraint, sense=GRB.LESS_EQUAL, rhs=leftover_incoming[i])

		model.setObjective(objective_function, GRB.MINIMIZE)
		try: 
			model.optimize()
			for i in range(nblocks):
				for j in range(nblocks):
					if (i != j):
						adj_matrix[i][j] += reinforced_links[i][j].x
			#for i in range(nblocks):
			#	rowsum = 0.
			#	colsum = 0.
			#	for j in range(nblocks):
			#		if i != j:
			#			rowsum += adj_matrix[i][j]
			#			colsum += adj_matrix[j][i]
			#	print("pod {} : rowsum = {} colsum = {}".format(i, rowsum, colsum))
			return adj_matrix
		except GurobiError as e:
			print ("ReinforceFractionalTopology: Error code " + str(e. errno ) + ": " + str(e))
			return
		except AttributeError :
			print ("ReinforceFractionalTopology: Encountered an attribute error ")
			return
	
	## done - ish
	# first step of topology design which solves the LP that maximizes traffic scale up
	def _maximize_traffic_scaleup(self, traffic_matrix, all_inter_block_paths):
		nblocks = self.aurora_network.get_num_blocks()
		model = Model("maximize traffic scale up")
		model.setParam( 'OutputFlag', False )
		mu = model.addVar(lb=0, ub=GRB.INFINITY, obj=1., vtype=GRB.CONTINUOUS, name="mu")
		fractional_topology_var = [None] * nblocks
		routing_weights_var = [None] * nblocks
		link_capacity_constraints = [0] * nblocks
		## setup the link constraints and all optization variables, 
		## which are the link counts and routing variables
		for i in range(nblocks):
			fractional_topology_var[i] = [None] * nblocks
			routing_weights_var[i] = [None] * nblocks
			link_capacity_constraints[i] = [None] * nblocks
			for j in range(nblocks):
				if i != j:
					link_capacity_constraints[i][j] = LinExpr()
					upper_bound = min(self.aurora_network.get_num_links(i), self.aurora_network.get_num_links(j))
					fractional_topology_var[i][j] = model.addVar(lb=0, ub=upper_bound, obj=0, vtype=GRB.CONTINUOUS, name="lc" + str(i) + ":" + str(j))
					routing_weights_var[i][j] = [] 
					weight_index = 0
					for path in all_inter_block_paths[i][j]:
						var = model.addVar(lb=0, ub=GRB.INFINITY, obj=0, vtype=GRB.CONTINUOUS, name="w_{}".format(path))
						routing_weights_var[i][j].append(var)
						weight_index += 1

		## add radix degree constraints
		for pod in range(nblocks):
			row_constraint = LinExpr()
			col_constraint = LinExpr()
			for target_pod in range(nblocks):
				if target_pod != pod:
					row_constraint.add(fractional_topology_var[pod][target_pod], mult=1.)
					col_constraint.add(fractional_topology_var[target_pod][pod], mult=1.)
			model.addConstr(lhs=row_constraint, sense=GRB.LESS_EQUAL, rhs=float(self.aurora_network.get_num_links(pod)))
			model.addConstr(lhs=col_constraint, sense=GRB.LESS_EQUAL, rhs=float(self.aurora_network.get_num_links(pod)))

		## add achievability of flows for all i j pairs
		for i in range(nblocks):
			for j in range(nblocks):
				if (i != j):
					flow_achievability_constr = LinExpr()
					for weight_index in range(len(all_inter_block_paths[i][j])):
						flow_achievability_constr.add(routing_weights_var[i][j][weight_index], 1.)
						path_len = len(all_inter_block_paths[i][j][weight_index])
						curr_node = all_inter_block_paths[i][j][weight_index][0]
						for next_node_index in range(1, path_len, 1):
							next_node = all_inter_block_paths[i][j][weight_index][next_node_index]
							link_capacity_constraints[curr_node][next_node].add(routing_weights_var[i][j][weight_index], 1.)
							curr_node = all_inter_block_paths[i][j][weight_index][next_node_index]
					model.addConstr(lhs=mu * traffic_matrix[i][j], sense=GRB.LESS_EQUAL, rhs=flow_achievability_constr)
		
		## add link capacity limit for all paths constraints
		for i in range(nblocks):
			for j in range(nblocks):
				if (i != j):
					capacity = min(self.aurora_network.get_link_capacity(i),self.aurora_network.get_link_capacity(j))
					model.addConstr(lhs=link_capacity_constraints[i][j], sense=GRB.LESS_EQUAL, rhs=fractional_topology_var[i][j] * capacity)

		# set up the objective function
		model.setObjective(mu, GRB.MAXIMIZE)
		# start optimizing
		try: 
			model.optimize()
			return mu.x # returns the max scale up factor
		except GurobiError as e:
			print ("MaximizeTrafficScaleUp: Error code " + str(e. errno ) + ": " + str(e))
			return None
		except AttributeError :
			print ("MaximizeTrafficScaleUp: Encountered an attribute error ")
			return None

	## Computes the cluster centroids for a bunch of historical traffic matrices
	## These will then be used for topology engineering
	def _compute_clusters(self, training_points, run_pca_before_clustering=True, num_clustering_components=20):
		if len(training_points) <= self.numK:
			return training_points
		traffic_points_for_training = training_points
		principle_components = None
		nblocks = self.aurora_network.get_num_blocks()
		for point in training_points:
			for entry in point:
				assert(entry >= 0)
		if run_pca_before_clustering:
			pca = PCA(n_components=num_clustering_components)
			traffic_points_for_training = pca.fit_transform(training_points)
			principle_components = pca.components_
		kmeans = KMeans(n_clusters=self.numK, random_state=0).fit(traffic_points_for_training)
		cluster_centroids = kmeans.cluster_centers_
		# transform the clusters (which are in the dimension-reduced space) back to the dimension of traffic points
		traffic_centroid_points = []
		if run_pca_before_clustering:
			#print("Shape of principle components: {}".format(principle_components.shape))
			traffic_centroid_points = np.matmul(cluster_centroids, principle_components)
			traffic_centroid_points = np.array(traffic_centroid_points)
		else:
			traffic_centroid_points = cluster_centroids
		#assert(traffic_centroid_points.shape == (self.numK, self.aurora_network.get_num_blocks() * (self.aurora_network.get_num_blocks() - 1)))
		for centroid_offset in range(len(traffic_centroid_points)):
			for i in range(len(traffic_centroid_points[centroid_offset])):
				if traffic_centroid_points[centroid_offset][i] < 0:
					traffic_centroid_points[centroid_offset][i] = 0.
		return traffic_centroid_points



	## Computes the cluster centroids for a bunch of historical traffic matrices
	## These will then be used for topology engineering
	def _compute_clusters_compare(self, training_points, num_clustering_components=20):
		traffic_points_for_training = training_points
		principle_components = None
		nblocks = self.aurora_network.get_num_blocks()
		#if False:
		
		pca1 = PCA(n_components=num_clustering_components)
		traffic_points_for_training = pca1.fit_transform(training_points)
		principle_components = pca1.components_
		kmeans = KMeans(n_clusters=self.numK, random_state=0).fit(traffic_points_for_training)
		cluster_centroids1 = kmeans.cluster_centers_
		# transform the clusters (which are in the dimension-reduced space) back to the dimension of traffic points
		traffic_centroid_points1 = np.matmul(cluster_centroids1 , principle_components)
		traffic_centroid_points1 = np.array(traffic_centroid_points1)
		


		pca2 = PCA(n_components=nblocks * (nblocks - 1))
		traffic_points_for_training = pca2.fit_transform(training_points)
		principle_components = pca2.components_
		kmeans = KMeans(n_clusters=self.numK, random_state=0).fit(traffic_points_for_training)
		cluster_centroids2 = kmeans.cluster_centers_
		# transform the clusters (which are in the dimension-reduced space) back to the dimension of traffic points
		traffic_centroid_points2 = np.matmul(cluster_centroids2 , principle_components)
		traffic_centroid_points2 = np.array(traffic_centroid_points2)

		#assert(traffic_centroid_points.shape == (self.numK, self.aurora_network.get_num_blocks() * (self.aurora_network.get_num_blocks() - 1)))
		for ind in range(self.numK):
			print("")
			print("traffic centroid point (PCA) : {}".format(traffic_centroid_points1[ind]))
			print("traffic centroid point (no PCA) : {}".format(traffic_centroid_points2[ind]))
			print("")
		return traffic_centroid_points

	# conducts topology engineering
	# aurora network - Aurora Network, contains all the parameters
	# training_traffic_snapshots - all the snapshots used for training the topology
	# number of k - which is the number of traffic clusters we use to solve engineer topology
	def _topology_engineer(self, training_traffic_snapshots, all_paths, spare_traffic=0.1, fraction_threshold=0.002):
		# first get number of blocks and number of links and link capacity
		nblocks = self.aurora_network.get_num_blocks()
		adj_matrix = np.zeros((nblocks, nblocks))
		# first, run k means clustering and get the clusters, and normalize to 1
		traffic_points = np.array([self.aurora_network.get_traffic_vector_from_traffic_snapshot(x, normalize_to=1.) for x in training_traffic_snapshots])
		# collect the k traffic matrices
		#traffic_matrices = [self.aurora_network.transform_traffic_point_to_traffic_matrix(x) for x in traffic_points]
		traffic_cluster_centroid_points = self._compute_clusters(traffic_points, run_pca_before_clustering=False)
		(nrows, ncols) = traffic_cluster_centroid_points.shape
		#assert(nrows == self.numK)
		list_of_fractional_topologies = [0] * nrows
		scale_up = 0
		#for topol_id in range(self.numK):
		for topol_id in range(nrows):
			traffic_matrix = self.aurora_network.transform_traffic_point_to_traffic_matrix(traffic_cluster_centroid_points[topol_id])
			## newly added for robustness
			traffic_sum = sum([sum(x) for x in traffic_matrix])
			reset_num_entries = 0
			for i in range(nblocks):
				for j in range(nblocks):
					if i != j and self.flow_pairs_to_ignore is not None and (i,j) not in self.flow_pairs_to_ignore:
						if traffic_matrix[i][j] < fraction_threshold * traffic_sum:
							reset_num_entries += 1
			if reset_num_entries > 0:
				scratch_traffic = (spare_traffic * traffic_sum) / (1 - spare_traffic) / float(reset_num_entries)
				for i in range(nblocks):
					for j in range(nblocks):
						if i != j and self.flow_pairs_to_ignore is not None and (i,j) not in self.flow_pairs_to_ignore:
							if traffic_matrix[i][j] < fraction_threshold * traffic_sum:
								traffic_matrix[i][j] = scratch_traffic
			## newly added for robustness
			mu = self._maximize_traffic_scaleup(traffic_matrix, all_paths)
			#print("mu {} is : {}".format(topol_id, mu))
			filtered_logical_topology = self._minimize_multihop(traffic_matrix, mu, all_paths)
			relaxation = 0.99
			while filtered_logical_topology is None:
				filtered_logical_topology = self._minimize_multihop(traffic_matrix, relaxation * mu, all_paths)
				relaxation -= 0.01
			list_of_fractional_topologies[topol_id] = filtered_logical_topology
			#list_of_fractional_topologies[topol_id] = self._minimize_multihop(traffic_matrix, 0.98 * mu, all_paths)
		adj_matrix = self._combine_fractional_topologies(list_of_fractional_topologies)
		adj_matrix = self._reinforce_fractional_topology(adj_matrix)
		return adj_matrix

	# conducts topology engineering given the representative traffic matrices
	# aurora network - Aurora Network, contains all the parameters
	# training_traffic_snapshots - all the snapshots used for training the topology
	# number of k - which is the number of traffic clusters we use to solve engineer topology
	def topology_engineer_given_representative_TMs(self, representative_tms, all_paths):
		# first get number of blocks and number of links and link capacity
		nblocks = self.aurora_network.get_num_blocks()
		num_representative_tms = len(representative_tms)
		adj_matrix = np.zeros((nblocks, nblocks))
		list_of_fractional_topologies = [0] * num_representative_tms
		scale_up = 0
		#for topol_id in range(self.numK):
		for topol_id in range(num_representative_tms):
			traffic_matrix = representative_tms[topol_id]
			mu = self._maximize_traffic_scaleup(traffic_matrix, all_paths)
			filtered_logical_topology = self._minimize_multihop(traffic_matrix, mu, all_paths)
			relaxation = 0.99
			while filtered_logical_topology is None:
				filtered_logical_topology = self._minimize_multihop(traffic_matrix, relaxation * mu, all_paths)
				relaxation -= 0.01
			list_of_fractional_topologies[topol_id] = filtered_logical_topology
		adj_matrix = self._combine_fractional_topologies(list_of_fractional_topologies)
		adj_matrix = self._reinforce_fractional_topology(adj_matrix)
		return adj_matrix

'''		
Design fractional topology based on 
traffic clusters using LP formulation
'''
#combines multiple fractional topology into one
class RobustMultiTrafficTopologyEngineerImplementationV2(TopologyEngineer):
	def __init__(self, aurora_network, training_length, reconfig_length, all_paths, all_traffic_snapshots, numK):
		num_snapshots = len(all_traffic_snapshots)
		TopologyEngineer.__init__(self, aurora_network, num_snapshots)
		self.toe_class = TopologyEngineeringClass.ROBUST_TOE
		self.training_length = training_length
		self.reconfig_length = reconfig_length
		self.numK = numK
		self.num_snapshots = len(all_traffic_snapshots)
		self.all_traffic_snapshots = all_traffic_snapshots
		self.all_paths = all_paths
		## NOTE: have to rezero the cached logical topologies here because when we initialize the parent class,
		## 		it will assume that the topology is static (see TopologyEngineer class above).
		self.cached_logical_topologies = {}
		return

		# must be called before running other code 
	def cache_logical_topologies(self):
		self.cached_logical_topologies[(0, self.training_length - 1)] = self._generate_uniform_logical_topology()
		## Start computing the logical topologies
		current_time = self.training_length
		print("Caching logical topologies for robust ToE Implementation V2.....")
		while current_time < self.num_snapshots:
			ending_time = min(current_time + self.reconfig_length - 1, self.num_snapshots - 1)
			training_snapshots = self.all_traffic_snapshots[current_time - self.training_length : current_time]
			adj_matrix = self._topology_engineer(training_snapshots, self.all_paths)
			self.cached_logical_topologies[(current_time, ending_time)] = adj_matrix
			current_time += self.reconfig_length
		print("Caching logical topologies: COMPLETE \n")
		return

	def get_filename_param(self):
		return "robusttoev2_r{}t{}c{}".format(self.reconfig_length, self.training_length, self.numK)

	# returns a string description of this class
	def get_string(self):
		return "Robust multi traffic ToE (V2) with K clusters : {}".format(self.numK)

	def is_static(self):
		return False

	# given a scale up factor, figure out the routing that minimizes two-hop paths if they exists
	## Todo(jason) : fix this and debug
	def _minimize_multihop(self, scaled_traffic_matrices, scaling_factors, all_paths):
		## using QP to reduce multihop reliance
		nblocks = self.aurora_network.get_num_blocks()
		num_tm = len(scaled_traffic_matrices)
		model = Model("minimize multihop")
		model.setParam( 'OutputFlag', False )
		interpod_link_counts = [None] * nblocks
		routing_weight_vars = {}
		link_capacity_constraints = [None] * nblocks
		## setup the link constraints and all optization variables, 
		## which are the link counts and routing variables
		objective_function = QuadExpr()
		for i in range(nblocks):
			interpod_link_counts[i] = [None] * nblocks
			link_capacity_constraints[i] = [None] * nblocks
			for j in range(nblocks):
				if (i != j):
					link_capacity_constraints[i][j] = LinExpr()
					upper_bound = min(self.aurora_network.get_num_links(i), self.aurora_network.get_num_links(j))
					interpod_link_counts[i][j] = model.addVar(lb=0, ub=upper_bound, obj=0, vtype=GRB.CONTINUOUS, name="lc" + str(i) + ":" + str(j))
					for path in all_paths[i][j]:
						var = model.addVar(lb=0, ub=GRB.INFINITY, obj=0., vtype=GRB.CONTINUOUS, name="w_{}".format(path))
						routing_weight_vars[path] = var
						# if multihop path, then add to objective function
						if len(path) > 2: 
							objective_function += (var * var)

		## add radix degree constraints
		for pod in range(nblocks):
			row_constraint = LinExpr()
			col_constraint = LinExpr()
			nlinks = self.aurora_network.get_num_links(pod)
			for target_pod in range(nblocks):
				if target_pod != pod:
					row_constraint.add(interpod_link_counts[pod][target_pod], mult=1.)
					col_constraint.add(interpod_link_counts[target_pod][pod], mult=1.)
			model.addConstr(lhs=row_constraint, sense=GRB.LESS_EQUAL, rhs=nlinks)
			model.addConstr(lhs=col_constraint, sense=GRB.LESS_EQUAL, rhs=nlinks)

		## add achievability of flows for all i j pairs
		for tm_index in range(num_tm):
			for i in range(nblocks):
				for j in range(nblocks):
					if (i != j):
						flow_achievability_constr = LinExpr()
						for path in all_paths[i][j]:
							flow_achievability_constr.add(routing_weight_vars[path], 1.)
						model.addConstr(lhs=scaling_factors[tm_index] * scaled_traffic_matrices[tm_index][i][j], sense=GRB.LESS_EQUAL, rhs=flow_achievability_constr)
		
		## add link capacity limit for all paths constraints
		for i in range(nblocks):
			for j in range(nblocks):
				if (i != j):
					for path in all_paths[i][j]:
						path_len = len(path)
						curr_node = path[0]
						for next_node_index in range(1, path_len, 1):
							next_node = path[next_node_index]
							link_capacity_constraints[curr_node][next_node].add(routing_weight_vars[path], mult=1.)
							curr_node = next_node

		for i in range(nblocks):
			for j in range(nblocks):
				if (i != j):
					capacity = min(self.aurora_network.get_link_capacity(i), self.aurora_network.get_link_capacity(j))
					model.addConstr(lhs=link_capacity_constraints[i][j], sense=GRB.LESS_EQUAL, rhs=interpod_link_counts[i][j] * capacity)
		# set up the objective function
		model.setObjective(objective_function, GRB.MINIMIZE)
		# start optimizing
		adj_matrix = np.zeros( (nblocks, nblocks,) )
		try: 
			model.optimize()
			for i in range(nblocks):
				for j in range(nblocks):
					if (i != j):
						adj_matrix[i][j] = interpod_link_counts[i][j].x
			#print adj_matrix
		except GurobiError as e:
			print ("MinimizeMultihop: Error code " + str(e. errno ) + ": " + str(e))
		except AttributeError :
			print ("MinimizeMultihop: Encountered an attribute error ")
		return adj_matrix

	def _maximize_scaleup_factor_for_all_traffic_matrices(self, scaled_traffic_matrices, all_inter_block_paths):
		nblocks = self.aurora_network.get_num_blocks()
		num_tm = len(scaled_traffic_matrices)
		model = Model("maximize traffic scale up")
		model.setParam( 'OutputFlag', False )
		mu = model.addVar(lb=0, ub=GRB.INFINITY, obj=1., vtype=GRB.CONTINUOUS, name="mu")
		fractional_topology_var = [None] * nblocks
		routing_weights_var = {}
		link_capacity_constraints = [None] * nblocks
		## setup the link constraints and all optization variables, 
		## which are the link counts and routing variables
		for i in range(nblocks):
			fractional_topology_var[i] = [None] * nblocks
			link_capacity_constraints[i] = [None] * nblocks
			for j in range(nblocks):
				if i != j:
					link_capacity_constraints[i][j] = LinExpr()
					upper_bound = min(self.aurora_network.get_num_links(i), self.aurora_network.get_num_links(j))
					fractional_topology_var[i][j] = model.addVar(lb=0, ub=upper_bound, obj=0, vtype=GRB.CONTINUOUS, name="lc" + str(i) + ":" + str(j))
					for path in all_inter_block_paths[i][j]:
						var = model.addVar(lb=0, ub=GRB.INFINITY, obj=0, vtype=GRB.CONTINUOUS, name="w_{}".format(path))
						routing_weights_var[path] = var
		## add radix degree constraints
		for pod in range(nblocks):
			row_constraint = LinExpr()
			col_constraint = LinExpr()
			for target_pod in range(nblocks):
				if target_pod != pod:
					row_constraint.add(fractional_topology_var[pod][target_pod], mult=1.)
					col_constraint.add(fractional_topology_var[target_pod][pod], mult=1.)
			model.addConstr(lhs=row_constraint, sense=GRB.LESS_EQUAL, rhs=float(self.aurora_network.get_num_links(pod)))
			model.addConstr(lhs=col_constraint, sense=GRB.LESS_EQUAL, rhs=float(self.aurora_network.get_num_links(pod)))

		## add achievability of flows for all i j pairs
		for tm_index in range(num_tm):
			for i in range(nblocks):
				for j in range(nblocks):
					if (i != j):
						flow_achievability_constr = LinExpr()
						for path in all_inter_block_paths[i][j]:
							flow_achievability_constr.add(routing_weights_var[path], 1.)
						model.addConstr(lhs=mu * scaled_traffic_matrices[tm_index][i][j], sense=GRB.LESS_EQUAL, rhs=flow_achievability_constr)
		
		## add link capacity limit for all paths constraints
		for i in range(nblocks):
			for j in range(nblocks):
				if (i != j):
					for path in all_inter_block_paths[i][j]:
						path_len = len(path)
						curr_node = path[0]
						for next_node_index in range(1, path_len, 1):
							next_node = path[next_node_index]
							link_capacity_constraints[curr_node][next_node].add(routing_weights_var[path], 1.)
							curr_node = next_node
					

		for i in range(nblocks):
			for j in range(nblocks):
				if i != j:
					capacity = min(self.aurora_network.get_link_capacity(i), self.aurora_network.get_link_capacity(j))
					model.addConstr(lhs=link_capacity_constraints[i][j], sense=GRB.LESS_EQUAL, rhs=fractional_topology_var[i][j] * capacity)
		# set up the objective function
		model.setObjective(mu, GRB.MAXIMIZE)
		# start optimizing
		try: 
			model.optimize()
			print("Worst case scale up : {}".format(mu.x))
			return mu.x # returns the max scale up factor
		except GurobiError as e:
			print ("MaximizeTrafficScaleUp: Error code " + str(e. errno ) + ": " + str(e))
			return None
		except AttributeError :
			print ("MaximizeTrafficScaleUp: Encountered an attribute error ")
			return None

	## Todo(jason) : fix this and debug CRITICAL
	def _reinforce_fractional_topology(self, adj_matrix):
		model = Model("Reinforce FractionalTopology")
		model.setParam( 'OutputFlag', False )
		nblocks = self.aurora_network.get_num_blocks()
		leftover_incoming = [0.] * nblocks
		leftover_outgoing = [0.] * nblocks
		reinforced_links = [None] * nblocks
		objective_function = QuadExpr()
		for i in range(nblocks):
			reinforced_links[i] = [None] * nblocks
			row_sum = 0.
			col_sum = 0.
			for j in range(nblocks):
				if i != j:
					upper_bound = min(self.aurora_network.get_num_links(i), self.aurora_network.get_num_links(j))
					reinforced_links[i][j] = model.addVar(lb=0., ub=upper_bound, obj=0., vtype=GRB.CONTINUOUS, name="rlc" + str(i) + "_" + str(j))
					diff = LinExpr()
					diff = upper_bound - reinforced_links[i][j]
					objective_function.add(diff * diff, mult=1.)
					row_sum += adj_matrix[i][j]
					col_sum += adj_matrix[j][i]
			leftover_outgoing[i] = self.aurora_network.get_num_links(i) - row_sum
			leftover_incoming[i] = self.aurora_network.get_num_links(i) - col_sum

		for i in range(nblocks):
			row_constraint = LinExpr()
			col_constraint = LinExpr()
			for j in range(nblocks):
				if i != j :
					row_constraint.add(reinforced_links[i][j], mult=1.)
					col_constraint.add(reinforced_links[j][i], mult=1.)
			model.addConstr(lhs=row_constraint, sense=GRB.LESS_EQUAL, rhs=leftover_outgoing[i])
			model.addConstr(lhs=col_constraint, sense=GRB.LESS_EQUAL, rhs=leftover_incoming[i])

		model.setObjective(objective_function, GRB.MINIMIZE)
		try: 
			model.optimize()
			for i in range(nblocks):
				for j in range(nblocks):
					if (i != j):
						adj_matrix[i][j] += reinforced_links[i][j].x
			return adj_matrix
		except GurobiError as e:
			print ("ReinforceFractionalTopology: Error code " + str(e. errno ) + ": " + str(e))
			return
		except AttributeError :
			print ("ReinforceFractionalTopology: Encountered an attribute error ")
			return
	
	## done - ish
	# first step of topology design which solves the LP that maximizes traffic scale up
	def _compute_maximum_traffic_scaleup(self, traffic_matrix, all_inter_block_paths):
		nblocks = self.aurora_network.get_num_blocks()
		model = Model("maximize traffic scale up")
		model.setParam( 'OutputFlag', False )
		mu = model.addVar(lb=0, ub=GRB.INFINITY, obj=1., vtype=GRB.CONTINUOUS, name="mu")
		fractional_topology_var = [None] * nblocks
		routing_weights_var = [None] * nblocks
		link_capacity_constraints = [0] * nblocks
		## setup the link constraints and all optization variables, 
		## which are the link counts and routing variables
		for i in range(nblocks):
			fractional_topology_var[i] = [None] * nblocks
			routing_weights_var[i] = [None] * nblocks
			link_capacity_constraints[i] = [None] * nblocks
			for j in range(nblocks):
				if i != j:
					link_capacity_constraints[i][j] = LinExpr()
					upper_bound = min(self.aurora_network.get_num_links(i), self.aurora_network.get_num_links(j))
					fractional_topology_var[i][j] = model.addVar(lb=0, ub=upper_bound, obj=0, vtype=GRB.CONTINUOUS, name="lc" + str(i) + ":" + str(j))
					routing_weights_var[i][j] = [] 
					weight_index = 0
					for path in all_inter_block_paths[i][j]:
						var = model.addVar(lb=0, ub=GRB.INFINITY, obj=0, vtype=GRB.CONTINUOUS, name="w_{}".format(path))
						routing_weights_var[i][j].append(var)
						weight_index += 1

		## add radix degree constraints
		for pod in range(nblocks):
			row_constraint = LinExpr()
			col_constraint = LinExpr()
			for target_pod in range(nblocks):
				if target_pod != pod:
					row_constraint.add(fractional_topology_var[pod][target_pod], mult=1.)
					col_constraint.add(fractional_topology_var[target_pod][pod], mult=1.)
			model.addConstr(lhs=row_constraint, sense=GRB.LESS_EQUAL, rhs=float(self.aurora_network.get_num_links(pod)))
			model.addConstr(lhs=col_constraint, sense=GRB.LESS_EQUAL, rhs=float(self.aurora_network.get_num_links(pod)))

		## add achievability of flows for all i j pairs
		for i in range(nblocks):
			for j in range(nblocks):
				if (i != j):
					flow_achievability_constr = LinExpr()
					for weight_index in range(len(all_inter_block_paths[i][j])):
						flow_achievability_constr.add(routing_weights_var[i][j][weight_index], 1.)
						path_len = len(all_inter_block_paths[i][j][weight_index])
						curr_node = all_inter_block_paths[i][j][weight_index][0]
						for next_node_index in range(1, path_len, 1):
							next_node = all_inter_block_paths[i][j][weight_index][next_node_index]
							link_capacity_constraints[curr_node][next_node].add(routing_weights_var[i][j][weight_index], 1.)
							curr_node = all_inter_block_paths[i][j][weight_index][next_node_index]
					model.addConstr(lhs=mu * traffic_matrix[i][j], sense=GRB.LESS_EQUAL, rhs=flow_achievability_constr)
		
		## add link capacity limit for all paths constraints
		for i in range(nblocks):
			for j in range(nblocks):
				if (i != j):
					capacity = min(self.aurora_network.get_link_capacity(i),self.aurora_network.get_link_capacity(j))
					model.addConstr(lhs=link_capacity_constraints[i][j], sense=GRB.LESS_EQUAL, rhs=fractional_topology_var[i][j] * capacity)

		# set up the objective function
		model.setObjective(mu, GRB.MAXIMIZE)
		# start optimizing
		try: 
			model.optimize()
			return mu.x # returns the max scale up factor
		except GurobiError as e:
			print ("MaximizeTrafficScaleUp (single): Error code " + str(e. errno ) + ": " + str(e))
			return None
		except AttributeError :
			print ("MaximizeTrafficScaleUp (single): Encountered an attribute error ")
			return None

	## Computes the cluster centroids for a bunch of historical traffic matrices
	## These will then be used for topology engineering
	def _compute_clusters(self, training_points, run_pca_before_clustering=True, num_clustering_components=20):
		traffic_points_for_training = training_points
		principle_components = None
		nblocks = self.aurora_network.get_num_blocks()
		for point in training_points:
			for entry in point:
				assert(entry >= 0)
		if run_pca_before_clustering:
			pca = PCA(n_components=num_clustering_components)
			traffic_points_for_training = pca.fit_transform(training_points)
			principle_components = pca.components_
		kmeans = KMeans(n_clusters=self.numK, random_state=0).fit(traffic_points_for_training)
		cluster_centroids = kmeans.cluster_centers_
		# transform the clusters (which are in the dimension-reduced space) back to the dimension of traffic points
		traffic_centroid_points = []
		if run_pca_before_clustering:
			#print("Shape of principle components: {}".format(principle_components.shape))
			traffic_centroid_points = np.matmul(cluster_centroids, principle_components)
			traffic_centroid_points = np.array(traffic_centroid_points)
		else:
			traffic_centroid_points = cluster_centroids
		#assert(traffic_centroid_points.shape == (self.numK, self.aurora_network.get_num_blocks() * (self.aurora_network.get_num_blocks() - 1)))
		for centroid_offset in range(len(traffic_centroid_points)):
			for i in range(len(traffic_centroid_points[centroid_offset])):
				if traffic_centroid_points[centroid_offset][i] < 0:
					traffic_centroid_points[centroid_offset][i] = 0.
		return traffic_centroid_points

	# conducts topology engineering
	# aurora network - Aurora Network, contains all the parameters
	# training_traffic_snapshots - all the snapshots used for training the topology
	# number of k - which is the number of traffic clusters we use to solve engineer topology
	def _topology_engineer(self, training_traffic_snapshots, all_paths, spare_traffic=0.1):
		# first get number of blocks and number of links and link capacity
		nblocks = self.aurora_network.get_num_blocks()
		adj_matrix = np.zeros((nblocks, nblocks))
		# first, run k means clustering and get the clusters, and normalize to 1
		traffic_points = np.array([self.aurora_network.get_traffic_vector_from_traffic_snapshot(x, normalize_to=1.) for x in training_traffic_snapshots])
		# collect the k traffic matrices
		#traffic_matrices = [self.aurora_network.transform_traffic_point_to_traffic_matrix(x) for x in traffic_points]
		traffic_cluster_centroid_points = self._compute_clusters(traffic_points, run_pca_before_clustering=False)
		list_of_fractional_topologies = [0] * self.numK
		scaled_traffic_matrices = []
		for topol_id in range(self.numK):
			traffic_matrix = self.aurora_network.transform_traffic_point_to_traffic_matrix(traffic_cluster_centroid_points[topol_id])

			## newly added for robustness			
			traffic_sum = sum([sum(x) for x in traffic_matrix])
			reset_num_entries = 0
			for i in range(nblocks):
				for j in range(nblocks):
					if i != j:
						if traffic_matrix[i][j] < 0.0001:
							reset_num_entries += 1
			for i in range(nblocks):
				for j in range(nblocks):
					if i != j:
						if traffic_matrix[i][j] < 0.0001:
							traffic_matrix[i][j] = (spare_traffic * traffic_sum) / (1 - spare_traffic) / float(reset_num_entries)
			## newly added for robustness

			mu = self._compute_maximum_traffic_scaleup(traffic_matrix, all_paths)
			for i in range(nblocks):
				for j in range(nblocks):
					if i != j:
						traffic_matrix[i][j] *= mu
			scaled_traffic_matrices.append(traffic_matrix)
		scale_up = self._maximize_scaleup_factor_for_all_traffic_matrices(scaled_traffic_matrices, all_paths)
		adj_matrix = self._minimize_multihop(scaled_traffic_matrices, [scale_up] * len(scaled_traffic_matrices), all_paths)
		adj_matrix = self._reinforce_fractional_topology(adj_matrix)
		return adj_matrix

	def _test_individual_scaleup_factor_for_all_TMs_uniform_topology(self, scaled_traffic_matrices, minimal_scale_up, all_inter_block_paths):
		nblocks = self.aurora_network.get_num_blocks()
		num_tm = len(scaled_traffic_matrices)
		model = Model("maximize individual traffic scale up")
		model.setParam( 'OutputFlag', False )
		individual_scaleups = [None] * num_tm
		objective_function = QuadExpr()

		num_links_per_block = self.aurora_network.get_num_links(0)
		uniform_topology = np.zeros((nblocks, nblocks,))
		for i in range(nblocks):
			for j in range(nblocks):
				if i != j:
					uniform_topology[i][j] = float(num_links_per_block) / (nblocks - 1)

		for index in range(num_tm):
			mu = model.addVar(lb=minimal_scale_up * 0.999, ub=1, obj=0, vtype=GRB.CONTINUOUS, name="u_{}".format(index))
			objective_function += ((1 - mu) * (1 - mu))
			individual_scaleups[index] = mu
		
		routing_weights_var = {}
		link_capacity_constraints = [None] * nblocks
		## setup the link constraints and all optization variables, 
		## which are the link counts and routing variables
		for i in range(nblocks):
			link_capacity_constraints[i] = [None] * nblocks
			for j in range(nblocks):
				if i != j:
					link_capacity_constraints[i][j] = LinExpr()
					for path in all_inter_block_paths[i][j]:
						var = model.addVar(lb=0, ub=GRB.INFINITY, obj=0, vtype=GRB.CONTINUOUS, name="w_{}".format(path))
						routing_weights_var[path] = var

		## add achievability of flows for all i j pairs
		for tm_index in range(num_tm):
			for i in range(nblocks):
				for j in range(nblocks):
					if (i != j):
						flow_achievability_constr = LinExpr()
						for path in all_inter_block_paths[i][j]:
							flow_achievability_constr.add(routing_weights_var[path], 1.)
						model.addConstr(lhs=individual_scaleups[tm_index] * scaled_traffic_matrices[tm_index][i][j], sense=GRB.LESS_EQUAL, rhs=flow_achievability_constr)
		
		## add link capacity limit for all paths constraints
		for i in range(nblocks):
			for j in range(nblocks):
				if (i != j):
					for path in all_inter_block_paths[i][j]:
						path_len = len(path)
						curr_node = path[0]
						for next_node_index in range(1, path_len, 1):
							next_node = path[next_node_index]
							link_capacity_constraints[curr_node][next_node].add(routing_weights_var[path], 1.) 
							curr_node = next_node

		for i in range(nblocks):
			for j in range(nblocks):
				if (i != j):
					capacity = min(self.aurora_network.get_link_capacity(i),self.aurora_network.get_link_capacity(j))
					model.addConstr(lhs=link_capacity_constraints[i][j], sense=GRB.LESS_EQUAL, rhs=uniform_topology[i][j] * capacity)

		# set up the objective function
		model.setObjective(objective_function, GRB.MINIMIZE)
		# start optimizing
		try: 
			model.optimize()
			scaleups = [0] * num_tm
			for mu, index in zip(individual_scaleups, range(num_tm)):
				scaleups[index] = mu.x
			return scaleups # returns the max scale up factor
		except GurobiError as e:
			print ("MaximizeTrafficScaleUp: Error code " + str(e. errno ) + ": " + str(e))
			return None
		except AttributeError :
			print ("MaximizeTrafficScaleUp: Encountered an attribute error ")
			return None

	def _maximize_individual_scaleup_factor_for_all_TMs(self, scaled_traffic_matrices, minimal_scale_up, all_inter_block_paths):
		nblocks = self.aurora_network.get_num_blocks()
		num_tm = len(scaled_traffic_matrices)
		model = Model("maximize individual traffic scale up")
		model.setParam( 'OutputFlag', False )
		individual_scaleups = [None] * num_tm
		objective_function = QuadExpr()
		for index in range(num_tm):
			mu = model.addVar(lb=minimal_scale_up * 0.999, ub=1, obj=0, vtype=GRB.CONTINUOUS, name="u_{}".format(index))
			objective_function += ((1 - mu) * (1 - mu))
			individual_scaleups[index] = mu
		
		fractional_topology_var = [None] * nblocks
		routing_weights_var = {}
		link_capacity_constraints = [None] * nblocks
		## setup the link constraints and all optization variables, 
		## which are the link counts and routing variables
		for i in range(nblocks):
			fractional_topology_var[i] = [None] * nblocks
			link_capacity_constraints[i] = [None] * nblocks
			for j in range(nblocks):
				if i != j:
					link_capacity_constraints[i][j] = LinExpr()
					upper_bound = min(self.aurora_network.get_num_links(i), self.aurora_network.get_num_links(j))
					fractional_topology_var[i][j] = model.addVar(lb=0, ub=upper_bound, obj=0, vtype=GRB.CONTINUOUS, name="lc" + str(i) + ":" + str(j))
					for path in all_inter_block_paths[i][j]:
						var = model.addVar(lb=0, ub=GRB.INFINITY, obj=0, vtype=GRB.CONTINUOUS, name="w_{}".format(path))
						routing_weights_var[path] = var
		## add radix degree constraints
		for pod in range(nblocks):
			row_constraint = LinExpr()
			col_constraint = LinExpr()
			for target_pod in range(nblocks):
				if target_pod != pod:
					row_constraint.add(fractional_topology_var[pod][target_pod], mult=1.)
					col_constraint.add(fractional_topology_var[target_pod][pod], mult=1.)
			model.addConstr(lhs=row_constraint, sense=GRB.LESS_EQUAL, rhs=float(self.aurora_network.get_num_links(pod)))
			model.addConstr(lhs=col_constraint, sense=GRB.LESS_EQUAL, rhs=float(self.aurora_network.get_num_links(pod)))

		## add achievability of flows for all i j pairs
		for tm_index in range(num_tm):
			for i in range(nblocks):
				for j in range(nblocks):
					if (i != j):
						flow_achievability_constr = LinExpr()
						for path in all_inter_block_paths[i][j]:
							flow_achievability_constr.add(routing_weights_var[path], 1.)
						model.addConstr(lhs=individual_scaleups[tm_index] * scaled_traffic_matrices[tm_index][i][j], sense=GRB.LESS_EQUAL, rhs=flow_achievability_constr)
		
		## add link capacity limit for all paths constraints
		for i in range(nblocks):
			for j in range(nblocks):
				if (i != j):
					for path in all_inter_block_paths[i][j]:
						path_len = len(path)
						curr_node = path[0]
						for next_node_index in range(1, path_len, 1):
							next_node = path[next_node_index]
							link_capacity_constraints[curr_node][next_node].add(routing_weights_var[path], 1.)
							curr_node = next_node

		for i in range(nblocks):
			for j in range(nblocks):
				if i != j:
					capacity = min(self.aurora_network.get_link_capacity(i),self.aurora_network.get_link_capacity(j))
					model.addConstr(lhs=link_capacity_constraints[i][j], sense=GRB.LESS_EQUAL, rhs=fractional_topology_var[i][j] * capacity)
		# set up the objective function
		model.setObjective(objective_function, GRB.MINIMIZE)
		# start optimizing
		try: 
			model.optimize()
			scaleups = [0] * num_tm
			for mu, index in zip(individual_scaleups, range(num_tm)):
				scaleups[index] = mu.x
			adj_matrix = [0] * nblocks
			for i in range(nblocks):
				adj_matrix[i] = [0] * nblocks
			for i in range(nblocks):
				for j in range(nblocks):
					if i != j:
						adj_matrix[i][j] = fractional_topology_var[i][j].x
			routing_weights = {}
			for path in routing_weights_var.keys():
				routing_weights[path] = routing_weights_var[path].x
			print adj_matrix
			return scaleups, adj_matrix, routing_weights # returns the max scale up factor
		except GurobiError as e:
			print ("MaximizeTrafficScaleUp: Error code " + str(e. errno ) + ": " + str(e))
			return None
		except AttributeError :
			print ("MaximizeTrafficScaleUp: Encountered an attribute error ")
			return None

	def __normalize_routing_weights(self, nblocks, unnormalized_routing_weights):
		normalized_routing_weights = {}
		weight_sum = np.zeros((nblocks, nblocks,))
		for path in unnormalized_routing_weights:
			src = path[0]
			dst = path[-1]
			weight_sum[src][dst] += unnormalized_routing_weights[path]
			normalized_routing_weights[path] = unnormalized_routing_weights[path]
		for path in unnormalized_routing_weights:
			src = path[0]
			dst = path[-1]
			normalized_routing_weights[path] /= weight_sum[src][dst]
		return normalized_routing_weights

	# conducts topology engineering given the representative traffic matrices
	# aurora network - Aurora Network, contains all the parameters
	# training_traffic_snapshots - all the snapshots used for training the topology
	# number of k - which is the number of traffic clusters we use to solve engineer topology
	def topology_engineer_given_representative_TMs(self, representative_tms, all_paths, minimize_multihop=True):
		# first get number of blocks and number of links and link capacity
		nblocks = self.aurora_network.get_num_blocks()
		adj_matrix = np.zeros((nblocks, nblocks))
		list_of_fractional_topologies = [0] * self.numK
		scaled_traffic_matrices = []
		assert(self.numK == len(representative_tms))
		individual_matrix_scaleups = []
		for topol_id in range(self.numK):
			traffic_matrix = np.zeros((nblocks, nblocks,))
			## newly added for robustness			
			mu = self._compute_maximum_traffic_scaleup(representative_tms[topol_id], all_paths)
			individual_matrix_scaleups.append(mu)
			for i in range(nblocks):
				for j in range(nblocks):
					if i != j:
						traffic_matrix[i][j] = mu * representative_tms[topol_id][i][j]
			scaled_traffic_matrices.append(traffic_matrix)
		scale_up = self._maximize_scaleup_factor_for_all_traffic_matrices(scaled_traffic_matrices, all_paths)
		individual_scaleups, adj_matrix, routing_weights = self._maximize_individual_scaleup_factor_for_all_TMs(scaled_traffic_matrices, scale_up, all_paths)
		normalized_routing_weights = self.__normalize_routing_weights(nblocks, routing_weights)
		if minimize_multihop:
			adj_matrix = self._minimize_multihop(scaled_traffic_matrices, individual_scaleups, all_paths)
		adj_matrix = self._reinforce_fractional_topology(adj_matrix)
		return adj_matrix, normalized_routing_weights
		print("Individual scale up with ONE topology : {}".format(individual_scaleups))
		individual_scaleups_uniform = self._test_individual_scaleup_factor_for_all_TMs_uniform_topology(scaled_traffic_matrices, scale_up, all_paths)
		print("Individual scale up with UNIFORM topology : {}".format(individual_scaleups))
		print("\n\n\n Global scale up is : {}".format(scale_up))
		print("Individual TM scale ups : {} \n\n\n".format(individual_matrix_scaleups))
		if minimize_multihop:
			adj_matrix = self._minimize_multihop(scaled_traffic_matrices, individual_scaleups, all_paths)
		adj_matrix = self._reinforce_fractional_topology(adj_matrix)
		return adj_matrix


'''		
Design fractional topology based on 
traffic clusters using LP formulation
'''
#combines multiple fractional topology into one
class RobustMultiTrafficTopologyEngineerImplementationV3(TopologyEngineer):
	def __init__(self, aurora_network, training_length, reconfig_length, all_paths, all_traffic_snapshots, numK):
		num_snapshots = len(all_traffic_snapshots)
		TopologyEngineer.__init__(self, aurora_network, num_snapshots)
		self.toe_class = TopologyEngineeringClass.ROBUST_TOE
		self.training_length = training_length
		self.reconfig_length = reconfig_length
		self.numK = numK
		self.num_snapshots = len(all_traffic_snapshots)
		self.all_traffic_snapshots = all_traffic_snapshots
		self.all_paths = all_paths
		## NOTE: have to rezero the cached logical topologies here because when we initialize the parent class,
		## 		it will assume that the topology is static (see TopologyEngineer class above).
		self.cached_logical_topologies = {}
		return

		# must be called before running other code 
	def cache_logical_topologies(self):
		self.cached_logical_topologies[(0, self.training_length - 1)] = self._generate_uniform_logical_topology()
		## Start computing the logical topologies
		current_time = self.training_length
		print("Caching logical topologies for robust ToE Implementation V2.....")
		while current_time < self.num_snapshots:
			ending_time = min(current_time + self.reconfig_length - 1, self.num_snapshots - 1)
			training_snapshots = self.all_traffic_snapshots[current_time - self.training_length : current_time]
			adj_matrix = self._topology_engineer(training_snapshots, self.all_paths)
			self.cached_logical_topologies[(current_time, ending_time)] = adj_matrix
			current_time += self.reconfig_length
		print("Caching logical topologies: COMPLETE \n")
		return

	def get_filename_param(self):
		return "robusttoev3_r{}t{}c{}".format(self.reconfig_length, self.training_length, self.numK)

	# returns a string description of this class
	def get_string(self):
		return "Robust multi traffic ToE (V3) with K clusters : {}".format(self.numK)

	def is_static(self):
		return False

	# given a scale up factor, figure out the routing that minimizes two-hop paths if they exists
	## Todo(jason) : fix this and debug
	def _minimize_multihop(self, traffic_matrices, beta_value, all_paths):
		## using QP to reduce multihop reliance
		nblocks = self.aurora_network.get_num_blocks()
		num_tm = len(traffic_matrices)
		model = Model("minimize multihop")
		model.setParam( 'OutputFlag', False )
		interpod_link_counts = [None] * nblocks
		routing_weights_var_hat = {}
		link_capacity_constraints = [None] * num_tm
		for tm_index in range(num_tm):
			link_capacity_constraints[tm_index] = [None] * nblocks
			for i in range(nblocks):
				link_capacity_constraints[tm_index][i] = [None] * nblocks
				for j in range(nblocks):
					if i != j:
						link_capacity_constraints[tm_index][i][j] = LinExpr()

		## setup the link constraints and all optization variables, 
		## which are the link counts and routing variables
		objective_function = QuadExpr()
		for i in range(nblocks):
			interpod_link_counts[i] = [None] * nblocks
			for j in range(nblocks):
				if (i != j):
					upper_bound = min(self.aurora_network.get_num_links(i), self.aurora_network.get_num_links(j))
					interpod_link_counts[i][j] = model.addVar(lb=0, ub=upper_bound, obj=0, vtype=GRB.CONTINUOUS, name="lc" + str(i) + ":" + str(j))
					routing_weight_sum = LinExpr()
					for path in all_paths[i][j]:
						var = model.addVar(lb=0, ub=GRB.INFINITY, obj=0., vtype=GRB.CONTINUOUS, name="w_{}".format(path))
						routing_weights_var_hat[path] = var
						routing_weight_sum += var
						# if multihop path, then add to objective function
						if len(path) > 2: 
							objective_function += (var * var)
					model.addConstr(lhs=routing_weight_sum, sense=GRB.EQUAL, rhs=beta_value)

		## add radix degree constraints
		for pod in range(nblocks):
			row_constraint = LinExpr()
			col_constraint = LinExpr()
			nlinks = self.aurora_network.get_num_links(pod)
			for target_pod in range(nblocks):
				if target_pod != pod:
					row_constraint.add(interpod_link_counts[pod][target_pod], mult=1.)
					col_constraint.add(interpod_link_counts[target_pod][pod], mult=1.)
			model.addConstr(lhs=row_constraint, sense=GRB.LESS_EQUAL, rhs=nlinks)
			model.addConstr(lhs=col_constraint, sense=GRB.LESS_EQUAL, rhs=nlinks)
		
		## add link capacity limit for all paths constraints
		for tm_index in range(num_tm):
			for i in range(nblocks):
				for j in range(nblocks):
					if (i != j):
						for path in all_paths[i][j]:
							path_len = len(path)
							curr_node = path[0]
							for next_node_index in range(1, path_len, 1):
								next_node = path[next_node_index]
								link_capacity_constraints[tm_index][curr_node][next_node] += (routing_weights_var_hat[path] * traffic_matrices[tm_index][i][j])
								curr_node = next_node
		for tm_index in range(num_tm):
			for i in range(nblocks):
				for j in range(nblocks):
					if (i != j):
						capacity = min(self.aurora_network.get_link_capacity(i), self.aurora_network.get_link_capacity(j))
						model.addConstr(lhs=link_capacity_constraints[tm_index][i][j], sense=GRB.LESS_EQUAL, rhs=interpod_link_counts[i][j] * capacity)

		# set up the objective function
		model.setObjective(objective_function, GRB.MINIMIZE)
		# start optimizing
		adj_matrix = np.zeros( (nblocks, nblocks,) )
		routing_weights = {}
		try: 
			model.optimize()
			for i in range(nblocks):
				for j in range(nblocks):
					if (i != j):
						adj_matrix[i][j] = interpod_link_counts[i][j].x
			for i in range(nblocks):
				for j in range(nblocks):
					if i != j:
						for path in all_paths[i][j]:
							routing_weights[path] = routing_weights_var_hat[path].x / beta_value
			#print adj_matrix
		except GurobiError as e:
			print ("MinimizeMultihop: Error code " + str(e. errno ) + ": " + str(e))
		except AttributeError :
			print ("MinimizeMultihop: Encountered an attribute error ")
		return adj_matrix, routing_weights

	def _reinforce_fractional_topology(self, adj_matrix):
		model = Model("Reinforce FractionalTopology")
		model.setParam( 'OutputFlag', False )
		nblocks = self.aurora_network.get_num_blocks()
		leftover_incoming = [0.] * nblocks
		leftover_outgoing = [0.] * nblocks
		reinforced_links = [None] * nblocks
		objective_function = QuadExpr()
		for i in range(nblocks):
			reinforced_links[i] = [None] * nblocks
			row_sum = 0.
			col_sum = 0.
			for j in range(nblocks):
				if i != j:
					upper_bound = min(self.aurora_network.get_num_links(i), self.aurora_network.get_num_links(j))
					reinforced_links[i][j] = model.addVar(lb=0., ub=upper_bound, obj=0., vtype=GRB.CONTINUOUS, name="rlc" + str(i) + "_" + str(j))
					diff = LinExpr()
					diff = upper_bound - reinforced_links[i][j]
					objective_function.add(diff * diff, mult=1.)
					row_sum += adj_matrix[i][j]
					col_sum += adj_matrix[j][i]
			leftover_outgoing[i] = self.aurora_network.get_num_links(i) - row_sum
			leftover_incoming[i] = self.aurora_network.get_num_links(i) - col_sum

		for i in range(nblocks):
			row_constraint = LinExpr()
			col_constraint = LinExpr()
			for j in range(nblocks):
				if i != j :
					row_constraint.add(reinforced_links[i][j], mult=1.)
					col_constraint.add(reinforced_links[j][i], mult=1.)
			model.addConstr(lhs=row_constraint, sense=GRB.LESS_EQUAL, rhs=leftover_outgoing[i])
			model.addConstr(lhs=col_constraint, sense=GRB.LESS_EQUAL, rhs=leftover_incoming[i])

		model.setObjective(objective_function, GRB.MINIMIZE)
		try: 
			model.optimize()
			for i in range(nblocks):
				for j in range(nblocks):
					if (i != j):
						adj_matrix[i][j] += reinforced_links[i][j].x
			return adj_matrix
		except GurobiError as e:
			print ("ReinforceFractionalTopology: Error code " + str(e. errno ) + ": " + str(e))
			return
		except AttributeError :
			print ("ReinforceFractionalTopology: Encountered an attribute error ")
			return

	def _maximize_scaleup_for_all_TMs_transformed(self, traffic_matrices, all_inter_block_paths):
		nblocks = self.aurora_network.get_num_blocks()
		num_tm = len(traffic_matrices)
		model = Model("maximize traffic scale up")
		model.setParam( 'OutputFlag', False )
		beta = model.addVar(lb=0, ub=GRB.INFINITY, obj=1., vtype=GRB.CONTINUOUS, name="beta")
		fractional_topology_var = [None] * nblocks
		routing_weights_var_hat = {}
		link_capacity_constraints = [None] * num_tm
		for m in range(num_tm):
			link_capacity_constraints[m] = [None] * nblocks
			for i in range(nblocks):
				link_capacity_constraints[m][i] = [None] * nblocks
				for j in range(nblocks):
					if i != j:
						link_capacity_constraints[m][i][j] = LinExpr()
		## setup the link constraints and all optization variables, 
		## which are the link counts and routing variables
		for i in range(nblocks):
			fractional_topology_var[i] = [None] * nblocks
			for j in range(nblocks):
				if i != j:
					upper_bound = min(self.aurora_network.get_num_links(i), self.aurora_network.get_num_links(j))
					fractional_topology_var[i][j] = model.addVar(lb=0, ub=upper_bound, obj=0, vtype=GRB.CONTINUOUS, name="lc" + str(i) + ":" + str(j))
					routing_weight_sum = LinExpr()
					for path in all_inter_block_paths[i][j]:
						var = model.addVar(lb=0, ub=GRB.INFINITY, obj=0, vtype=GRB.CONTINUOUS, name="w_{}".format(path))
						routing_weights_var_hat[path] = var
						routing_weight_sum += var
					model.addConstr(lhs=routing_weight_sum, sense=GRB.EQUAL, rhs=beta)

		## add radix degree constraints
		for pod in range(nblocks):
			row_constraint = LinExpr()
			col_constraint = LinExpr()
			for target_pod in range(nblocks):
				if target_pod != pod:
					row_constraint.add(fractional_topology_var[pod][target_pod], mult=1.)
					col_constraint.add(fractional_topology_var[target_pod][pod], mult=1.)
			model.addConstr(lhs=row_constraint, sense=GRB.LESS_EQUAL, rhs=float(self.aurora_network.get_num_links(pod)))
			model.addConstr(lhs=col_constraint, sense=GRB.LESS_EQUAL, rhs=float(self.aurora_network.get_num_links(pod)))

		## add achievability of flows for all i j pairs
		for tm_index in range(num_tm):
			for i in range(nblocks):
				for j in range(nblocks):
					if i != j:
						for path in all_inter_block_paths[i][j]:
							path_len = len(path)
							curr_node = path[0]
							for next_node_index in range(1, path_len, 1):
								next_node = path[next_node_index]
								link_capacity_constraints[tm_index][curr_node][next_node] += (routing_weights_var_hat[path] * traffic_matrices[tm_index][i][j])
								curr_node = next_node
		
		## add link capacity limit for all paths constraints
		for tm_index in range(num_tm):
			for i in range(nblocks):
				for j in range(nblocks):
					if (i != j):
						capacity = min(self.aurora_network.get_link_capacity(i), self.aurora_network.get_link_capacity(j))
						model.addConstr(lhs=link_capacity_constraints[tm_index][i][j], sense=GRB.LESS_EQUAL, rhs=fractional_topology_var[i][j] * capacity)

		# set up the objective function
		model.setObjective(beta, GRB.MAXIMIZE)
		# start optimizing
		try: 
			model.optimize()
			alpha = 1. / beta.x
			print("Worst case scale up : {}".format(alpha))
			
			routing_weights = {}
			for i in range(nblocks):
				for j in range(nblocks):
					if i != j:
						for path in all_inter_block_paths[i][j]:
							routing_weights[path] = routing_weights_var_hat[path].x * alpha
			adj_matrix = np.zeros((nblocks, nblocks))
			for i in range(nblocks):
				for j in range(nblocks):
					if i != j:
						adj_matrix[i][j] = fractional_topology_var[i][j].x
			return beta.x, adj_matrix, routing_weights # returns the max scale up factor
		except GurobiError as e:
			print ("MaximizeTrafficScaleUp: Error code " + str(e. errno ) + ": " + str(e))
			return None
		except AttributeError :
			print ("MaximizeTrafficScaleUp: Encountered an attribute error ")
			return None

	# conducts topology engineering given the representative traffic matrices
	# aurora network - Aurora Network, contains all the parameters
	# training_traffic_snapshots - all the snapshots used for training the topology
	# number of k - which is the number of traffic clusters we use to solve engineer topology
	def topology_engineer_given_representative_TMs(self, representative_tms, all_paths, minimize_multihop=True):
		# first get number of blocks and number of links and link capacity
		nblocks = self.aurora_network.get_num_blocks()
		adj_matrix = np.zeros((nblocks, nblocks))
		assert(self.numK == len(representative_tms))
		beta_value, adj_matrix, routing_weights = self._maximize_scaleup_for_all_TMs_transformed(representative_tms, all_paths)
		print("beta : {}, mlu : {}".format(beta_value, 1./beta_value))
		if minimize_multihop:
			adj_matrix, routing_weights = self._minimize_multihop(representative_tms, beta_value, all_paths)
		adj_matrix = self._reinforce_fractional_topology(adj_matrix)
		return adj_matrix, routing_weights


'''		
Design fractional topology based on 
traffic clusters using LP formulation
'''
#combines multiple fractional topology into one
class IdealTopologyEngineer(TopologyEngineer):
	def __init__(self, aurora_network, all_paths, all_traffic_snapshots):
		num_snapshots = len(all_traffic_snapshots)
		TopologyEngineer.__init__(self, aurora_network, num_snapshots)
		self.toe_class = TopologyEngineeringClass.IDEAL
		## NOTE: have to rezero the cached logical topologies here because when we initialize the parent class,
		## 		it will assume that the topology is static (see TopologyEngineer class above).
		self.all_traffic_snapshots = all_traffic_snapshots
		self.cached_logical_topologies = {}
		self.all_paths = all_paths
		return

	def get_logical_topology_at_time(self, time_index):
		adj_matrix = None
		assert(time_index < self.num_snapshots)
		adj_matrix = self._topology_engineer(self.all_traffic_snapshots[time_index], self.all_paths)
		return self.aurora_network.round_fractional_topology_giant_switch(adj_matrix, [])

	def get_filename_param(self):
		return "perfect"

	# returns a string description of this class
	def get_string(self):
		return "Perfect ToE"

	def is_static(self):
		return False

	# given a scale up factor, figure out the routing that minimizes two-hop paths if they exists
	## Todo(jason) : fix this and debug
	def _minimize_multihop(self, traffic_matrix, scaling_factor, all_paths):
		## using QP to reduce multihop reliance
		nblocks = self.aurora_network.get_num_blocks()
		model = Model("minimize multihop")
		model.setParam( 'OutputFlag', False )
		interpod_link_counts = [None] * nblocks
		routing_weights = {}
		link_capacity_constraints = [None] * nblocks
		## setup the link constraints and all optization variables, 
		## which are the link counts and routing variables
		objective_function = QuadExpr()
		for i in range(nblocks):
			interpod_link_counts[i] = [None] * nblocks
			link_capacity_constraints[i] = [None] * nblocks
			for j in range(nblocks):
				if (i != j):
					link_capacity_constraints[i][j] = LinExpr()
					upper_bound = min(self.aurora_network.get_num_links(i), self.aurora_network.get_num_links(j))
					interpod_link_counts[i][j] = model.addVar(lb=0, ub=upper_bound, obj=0, vtype=GRB.CONTINUOUS, name="lc" + str(i) + ":" + str(j))
					for path in all_paths[i][j]:
						var = model.addVar(lb=0, ub=GRB.INFINITY, obj=0, vtype=GRB.CONTINUOUS, name="w_{}".format(path))
						routing_weights[path] = var
						# if multihop path, then add to objective function
						if len(path) > 2: 
							objective_function.add(var * var, mult=1.)

		## add radix degree constraints
		for pod in range(nblocks):
			row_constraint = LinExpr()
			col_constraint = LinExpr()
			nlinks = self.aurora_network.get_num_links(pod)
			for target_pod in range(nblocks):
				if target_pod != pod:
					row_constraint.add(interpod_link_counts[pod][target_pod], mult=1.)
					col_constraint.add(interpod_link_counts[target_pod][pod], mult=1.)
			model.addConstr(lhs=row_constraint, sense=GRB.LESS_EQUAL, rhs=nlinks)
			model.addConstr(lhs=col_constraint, sense=GRB.LESS_EQUAL, rhs=nlinks)

		## add achievability of flows for all i j pairs
		for i in range(nblocks):
			for j in range(nblocks):
				if (i != j):
					flow_achievability_constr = LinExpr()
					for path in all_paths[i][j]:
						flow_achievability_constr.add(routing_weights[path], 1.)
						path_len = len(path)
						curr_node = path[0]
						for next_node_index in range(1, path_len, 1):
							next_node = path[next_node_index]
							link_capacity_constraints[curr_node][next_node].add(routing_weights[path], mult=1.)
							curr_node = next_node
					model.addConstr(lhs=scaling_factor * traffic_matrix[i][j], sense=GRB.LESS_EQUAL, rhs=flow_achievability_constr)
		
		## add link capacity limit for all paths constraints
		for i in range(nblocks):
			for j in range(nblocks):
				if (i != j):
					capacity = min(self.aurora_network.get_link_capacity(i), self.aurora_network.get_link_capacity(i))
					model.addConstr(lhs=link_capacity_constraints[i][j], sense=GRB.LESS_EQUAL, rhs=interpod_link_counts[i][j] * capacity)
		# set up the objective function
		model.setObjective(objective_function, GRB.MINIMIZE)
		# start optimizing
		adj_matrix = np.zeros( (nblocks, nblocks,) )
		try: 
			model.optimize()
			for i in range(nblocks):
				for j in range(nblocks):
					if (i != j):
						adj_matrix[i][j] = interpod_link_counts[i][j].x
			#print adj_matrix
			return adj_matrix
		except GurobiError as e:
			print ("MinimizeMultihop: Error code " + str(e. errno ) + ": " + str(e))
			return None
		except AttributeError as e1:
			print ("MinimizeMultihop: Encountered an attribute error " + ": " + str(e1))
			return None


	## Todo(jason) : fix this and debug CRITICAL
	def _reinforce_fractional_topology(self, adj_matrix):
		model = Model("Reinforce FractionalTopology")
		model.setParam( 'OutputFlag', False )
		nblocks = self.aurora_network.get_num_blocks()
		leftover_incoming = [0.] * nblocks
		leftover_outgoing = [0.] * nblocks
		reinforced_links = [None] * nblocks
		objective_function = QuadExpr()
		for i in range(nblocks):
			reinforced_links[i] = [None] * nblocks
			row_sum = 0.
			col_sum = 0.
			for j in range(nblocks):
				if i != j:
					upper_bound = min(self.aurora_network.get_num_links(i), self.aurora_network.get_num_links(j))
					reinforced_links[i][j] = model.addVar(lb=0., ub=upper_bound, obj=0., vtype=GRB.CONTINUOUS, name="rlc" + str(i) + "_" + str(j))
					diff = LinExpr()
					diff = upper_bound - reinforced_links[i][j]
					objective_function.add(diff * diff, mult=1.)
					row_sum += adj_matrix[i][j]
					col_sum += adj_matrix[j][i]
			leftover_outgoing[i] = self.aurora_network.get_num_links(i) - row_sum
			leftover_incoming[i] = self.aurora_network.get_num_links(i) - col_sum

		for i in range(nblocks):
			row_constraint = LinExpr()
			col_constraint = LinExpr()
			for j in range(nblocks):
				if i != j :
					row_constraint.add(reinforced_links[i][j], mult=1.)
					col_constraint.add(reinforced_links[j][i], mult=1.)
			model.addConstr(lhs=row_constraint, sense=GRB.LESS_EQUAL, rhs=leftover_outgoing[i])
			model.addConstr(lhs=col_constraint, sense=GRB.LESS_EQUAL, rhs=leftover_incoming[i])

		model.setObjective(objective_function, GRB.MINIMIZE)
		try: 
			model.optimize()
			for i in range(nblocks):
				for j in range(nblocks):
					if (i != j):
						adj_matrix[i][j] += reinforced_links[i][j].x
			#for i in range(nblocks):
			#	rowsum = 0.
			#	colsum = 0.
			#	for j in range(nblocks):
			#		if i != j:
			#			rowsum += adj_matrix[i][j]
			#			colsum += adj_matrix[j][i]
			#	print("pod {} : rowsum = {} colsum = {}".format(i, rowsum, colsum))
			return adj_matrix
		except GurobiError as e:
			print ("ReinforceFractionalTopology: Error code " + str(e. errno ) + ": " + str(e))
			return
		except AttributeError :
			print ("ReinforceFractionalTopology: Encountered an attribute error ")
			return
	
	## done - ish
	# first step of topology design which solves the LP that maximizes traffic scale up
	def _maximize_traffic_scaleup(self, traffic_matrix, all_inter_block_paths):
		nblocks = self.aurora_network.get_num_blocks()
		model = Model("maximize traffic scale up")
		model.setParam( 'OutputFlag', False )
		mu = model.addVar(lb=0, ub=GRB.INFINITY, obj=1., vtype=GRB.CONTINUOUS, name="mu")
		fractional_topology_var = [None] * nblocks
		routing_weights = {}
		link_capacity_constraints = [0] * nblocks
		## setup the link constraints and all optization variables, 
		## which are the link counts and routing variables
		for i in range(nblocks):
			fractional_topology_var[i] = [None] * nblocks
			link_capacity_constraints[i] = [None] * nblocks
			for j in range(nblocks):
				if i != j:
					link_capacity_constraints[i][j] = LinExpr()
					upper_bound = min(self.aurora_network.get_num_links(i), self.aurora_network.get_num_links(j))
					fractional_topology_var[i][j] = model.addVar(lb=0, ub=upper_bound, obj=0, vtype=GRB.CONTINUOUS, name="lc" + str(i) + ":" + str(j))
					for path in all_inter_block_paths[i][j]:
						var = model.addVar(lb=0, ub=GRB.INFINITY, obj=0, vtype=GRB.CONTINUOUS, name="w_{}".format(path))
						routing_weights[path] = var

		## add radix degree constraints
		for pod in range(nblocks):
			row_constraint = LinExpr()
			col_constraint = LinExpr()
			for target_pod in range(nblocks):
				if target_pod != pod:
					row_constraint.add(fractional_topology_var[pod][target_pod], mult=1.)
					col_constraint.add(fractional_topology_var[target_pod][pod], mult=1.)
			model.addConstr(lhs=row_constraint, sense=GRB.LESS_EQUAL, rhs=float(self.aurora_network.get_num_links(pod)))
			model.addConstr(lhs=col_constraint, sense=GRB.LESS_EQUAL, rhs=float(self.aurora_network.get_num_links(pod)))

		## add achievability of flows for all i j pairs
		for i in range(nblocks):
			for j in range(nblocks):
				if (i != j):
					flow_achievability_constr = LinExpr()
					for path in all_inter_block_paths[i][j]:
						flow_achievability_constr.add(routing_weights[path], 1.)
						path_len = len(path)
						curr_node = path[0]
						for next_node_index in range(1, path_len, 1):
							next_node = path[next_node_index]
							link_capacity_constraints[curr_node][next_node].add(routing_weights[path], 1.)
							curr_node = next_node
					model.addConstr(lhs=mu * traffic_matrix[i][j], sense=GRB.LESS_EQUAL, rhs=flow_achievability_constr)
		
		## add link capacity limit for all paths constraints
		for i in range(nblocks):
			for j in range(nblocks):
				if (i != j):
					capacity = min(self.aurora_network.get_link_capacity(i),self.aurora_network.get_link_capacity(j))
					model.addConstr(lhs=link_capacity_constraints[i][j], sense=GRB.LESS_EQUAL, rhs=fractional_topology_var[i][j] * capacity)

		# set up the objective function
		model.setObjective(mu, GRB.MAXIMIZE)
		# start optimizing
		try: 
			model.optimize()
			adj_matrix = np.zeros((nblocks, nblocks))
			for i in range(nblocks):
				for j in range(nblocks):
					if i != j:
						adj_matrix[i][j] = fractional_topology_var[i][j].x
			return mu.x, adj_matrix # returns the max scale up factor
		except GurobiError as e:
			print ("MaximizeTrafficScaleUp: Error code " + str(e. errno ) + ": " + str(e))
			return None
		except AttributeError :
			print ("MaximizeTrafficScaleUp: Encountered an attribute error ")
			return None

	# conducts topology engineering
	# aurora network - Aurora Network, contains all the parameters
	# training_traffic_snapshots - all the snapshots used for training the topology
	# number of k - which is the number of traffic clusters we use to solve engineer topology
	def _topology_engineer(self, traffic_snapshot, all_paths):
		traffic_matrix = self.aurora_network.get_traffic_matrix_from_traffic_snapshot(traffic_snapshot)
		mu, _ = self._maximize_traffic_scaleup(traffic_matrix, all_paths)
		adj_matrix_stage_2 = None
		relaxation = 1.
		while adj_matrix_stage_2 is None and relaxation > 0:
			adj_matrix_stage_2 = self._minimize_multihop(traffic_matrix, relaxation * mu, all_paths)
			relaxation -= 0.01
		adj_matrix = self._reinforce_fractional_topology(adj_matrix_stage_2)
		if adj_matrix is None:
			adj_matrix = adj_matrix_stage_2
		return adj_matrix

'''		
Design fractional topology based on an expected traffic matrix, which is obtained as an average
of historical traffic matrices
'''
class HistoricalAveTrafficTopologyEngineer(RobustMultiTrafficTopologyEngineer):
	def __init__(self, aurora_network, training_length, reconfig_length, all_paths, all_traffic_snapshots):
		num_snapshots = len(all_traffic_snapshots)
		RobustMultiTrafficTopologyEngineer.__init__(self, aurora_network, training_length, reconfig_length, all_paths, all_traffic_snapshots, 1)
		self.toe_class = TopologyEngineeringClass.AVE_TOE
		## NOTE: have to rezero the cached logical topologies here because when we initialize the parent class,
		## 		it will assume that the topology is static (see TopologyEngineer class above).
		self.cached_logical_topologies = {}
		return

		# must be called before running other code 
	def cache_logical_topologies(self, round_to_integer=False):
		self.cached_logical_topologies[(0, self.training_length - 1)] = self._generate_uniform_logical_topology()
		## Start computing the logical topologies
		current_time = self.training_length
		print("Caching logical topologies for AVE ToE.....")
		while current_time < self.num_snapshots:
			ending_time = min(current_time + self.reconfig_length - 1, self.num_snapshots - 1)
			training_snapshots = self.all_traffic_snapshots[current_time - self.training_length : current_time]
			adj_matrix = self._topology_engineer(training_snapshots, self.all_paths)
			if round_to_integer:
				adj_matrix = self.aurora_network.round_fractional_topology_giant_switch(adj_matrix, [])
			self.cached_logical_topologies[(current_time, ending_time)] = adj_matrix
			current_time += self.reconfig_length
		print("Caching logical topologies: COMPLETE \n")
		return

	def get_filename_param(self):
		return "avetoe_r{}t{}".format(self.reconfig_length, self.training_length)

	# returns a string description of this class
	def get_string(self):
		return "AVE traffic ToE"

	# training_traffic_snapshots - all the snapshots used for training the topology
	def _topology_engineer(self, training_traffic_snapshots, all_paths):
		nblocks = self.aurora_network.get_num_blocks()
		adj_matrix = np.zeros((nblocks, nblocks))
		# first, run k means clustering and get the clusters, and normalize to 1
		traffic_matrices = np.array([self.aurora_network.get_traffic_matrix_from_traffic_snapshot(x) for x in training_traffic_snapshots])
		# collect the k traffic matrices
		num_training_snapshots = len(traffic_matrices)
		expected_tm = np.zeros((nblocks, nblocks,))
		for i in range(nblocks):
			for j in range(nblocks):
				if i != j:
					for tm_index in range(num_training_snapshots):
						expected_tm[i][j] += (traffic_matrices[tm_index][i][j] / num_training_snapshots)
		mu = self._maximize_traffic_scaleup(expected_tm, all_paths)
		adj_matrix = None
		relaxation = 1.
		while adj_matrix is None:
			adj_matrix = self._minimize_multihop(expected_tm, relaxation * mu, all_paths)
			relaxation -= 0.01
		#adj_matrix = self._reinforce_fractional_topology(adj_matrix)
		return adj_matrix

	# training_traffic_snapshots - all the snapshots used for training the topology
	def topology_engineer_given_TMs(self, traffic_matrices, all_paths):
		nblocks = self.aurora_network.get_num_blocks()
		adj_matrix = np.zeros((nblocks, nblocks))
		num_training_snapshots = len(traffic_matrices)
		expected_tm = np.zeros((nblocks, nblocks,))
		for i in range(nblocks):
			for j in range(nblocks):
				if i != j:
					for tm_index in range(num_training_snapshots):
						expected_tm[i][j] += (traffic_matrices[tm_index][i][j] / num_training_snapshots)
		mu = self._maximize_traffic_scaleup(expected_tm, all_paths)
		adj_matrix = None
		relaxation = 1.
		while adj_matrix is None:
			adj_matrix = self._minimize_multihop(expected_tm, relaxation * mu, all_paths)
			relaxation -= 0.01
		#adj_matrix = self._reinforce_fractional_topology(adj_matrix)
		return adj_matrix
'''		
Design fractional topology based on an expected traffic matrix, which is obtained as a component-wise max
of historical traffic matrices
'''
class HistoricalMaxTrafficTopologyEngineer(RobustMultiTrafficTopologyEngineer):
	def __init__(self, aurora_network, training_length, reconfig_length, all_paths, all_traffic_snapshots):
		num_snapshots = len(all_traffic_snapshots)
		RobustMultiTrafficTopologyEngineer.__init__(self, aurora_network, training_length, reconfig_length, all_paths, all_traffic_snapshots, 1)
		self.toe_class = TopologyEngineeringClass.AVE_TOE
		## NOTE: have to rezero the cached logical topologies here because when we initialize the parent class,
		## 		it will assume that the topology is static (see TopologyEngineer class above).
		self.cached_logical_topologies = {}
		return

	def cache_logical_topologies(self, round_to_integer=False):
		self.cached_logical_topologies[(0, self.training_length - 1)] = self._generate_uniform_logical_topology()
		## Start computing the logical topologies
		current_time = self.training_length
		print("Caching logical topologies for MAX ToE.....")
		while current_time < self.num_snapshots:
			ending_time = min(current_time + self.reconfig_length - 1, self.num_snapshots - 1)
			training_snapshots = self.all_traffic_snapshots[current_time - self.training_length : current_time]
			adj_matrix = self._topology_engineer(training_snapshots, self.all_paths)
			if round_to_integer:
				adj_matrix = self.aurora_network.round_fractional_topology_giant_switch(adj_matrix, [])
			self.cached_logical_topologies[(current_time, ending_time)] = adj_matrix
			current_time += self.reconfig_length
		print("Caching logical topologies: COMPLETE \n")
		return

	def get_filename_param(self):
		return "maxtoe_r{}t{}".format(self.reconfig_length, self.training_length)

	# returns a string description of this class
	def get_string(self):
		return "MAX traffic ToE"

	# training_traffic_snapshots - all the snapshots used for training the topology
	def _topology_engineer(self, training_traffic_snapshots, all_paths):
		nblocks = self.aurora_network.get_num_blocks()
		adj_matrix = np.zeros((nblocks, nblocks))
		traffic_matrices = np.array([self.aurora_network.get_traffic_matrix_from_traffic_snapshot(x) for x in training_traffic_snapshots])
		num_training_snapshots = len(traffic_matrices)
		expected_tm = np.zeros((nblocks, nblocks,))
		for i in range(nblocks):
			for j in range(nblocks):
				if i != j:
					for tm_index in range(num_training_snapshots):
						expected_tm[i][j] = max(traffic_matrices[tm_index][i][j], expected_tm[i][j])
		mu = self._maximize_traffic_scaleup(expected_tm, all_paths)
		adj_matrix = None
		relaxation = 1.
		while adj_matrix is None:
			adj_matrix = self._minimize_multihop(expected_tm, relaxation * mu, all_paths)
			relaxation -= 0.01
		#adj_matrix = self._reinforce_fractional_topology(adj_matrix)
		return adj_matrix

	# training_traffic_snapshots - all the snapshots used for training the topology
	def topology_engineer_given_TMs(self, traffic_matrices, all_paths):
		nblocks = self.aurora_network.get_num_blocks()
		adj_matrix = np.zeros((nblocks, nblocks))
		num_training_snapshots = len(traffic_matrices)
		expected_tm = np.zeros((nblocks, nblocks,))
		for i in range(nblocks):
			for j in range(nblocks):
				if i != j:
					for tm_index in range(num_training_snapshots):
						expected_tm[i][j] = max(traffic_matrices[tm_index][i][j], expected_tm[i][j])
		mu = self._maximize_traffic_scaleup(expected_tm, all_paths)
		adj_matrix = None
		relaxation = 1.
		while adj_matrix is None:
			adj_matrix = self._minimize_multihop(expected_tm, relaxation * mu, all_paths)
			relaxation -= 0.01
		#adj_matrix = self._reinforce_fractional_topology(adj_matrix)
		return adj_matrix


'''		
Design fractional topology based on 
traffic clusters using LP formulation
'''
#combines multiple fractional topology into one
class BoundedTopologyEngineer(TopologyEngineer):
	def __init__(self, aurora_network, training_length, reconfig_length, all_paths, all_traffic_snapshots, numK):
		num_snapshots = len(all_traffic_snapshots)
		TopologyEngineer.__init__(self, aurora_network, num_snapshots)
		self.toe_class = TopologyEngineeringClass.ROBUST_TOE
		self.training_length = training_length
		self.reconfig_length = reconfig_length
		self.numK = numK
		self.num_snapshots = len(all_traffic_snapshots)
		self.all_traffic_snapshots = all_traffic_snapshots
		self.all_paths = all_paths
		## NOTE: have to rezero the cached logical topologies here because when we initialize the parent class,
		## 		it will assume that the topology is static (see TopologyEngineer class above).
		self.cached_logical_topologies = {}
		return

		# must be called before running other code 
	def cache_logical_topologies(self):
		self.cached_logical_topologies[(0, self.training_length - 1)] = self._generate_uniform_logical_topology()
		## Start computing the logical topologies
		current_time = self.training_length
		print("Caching logical topologies for robust ToE Implementation V2.......")
		while current_time < self.num_snapshots:
			ending_time = min(current_time + self.reconfig_length - 1, self.num_snapshots - 1)
			training_snapshots = self.all_traffic_snapshots[current_time - self.training_length : current_time]
			adj_matrix = self._topology_engineer(training_snapshots, self.all_paths)
			self.cached_logical_topologies[(current_time, ending_time)] = adj_matrix
			current_time += self.reconfig_length
		print("Caching logical topologies: COMPLETE \n")
		return

	def get_filename_param(self):
		return "boundedtoe_r{}t{}c{}".format(self.reconfig_length, self.training_length, self.numK)

	# returns a string description of this class
	def get_string(self):
		return "Bounded ToE based ToE : {}".format(self.numK)

	def is_static(self):
		return False

	def _reinforce_fractional_topology(self, adj_matrix):
		model = Model("Reinforce FractionalTopology")
		model.setParam( 'OutputFlag', False )
		nblocks = self.aurora_network.get_num_blocks()
		leftover_incoming = [0.] * nblocks
		leftover_outgoing = [0.] * nblocks
		reinforced_links = [None] * nblocks
		objective_function = QuadExpr()
		for i in range(nblocks):
			reinforced_links[i] = [None] * nblocks
			row_sum = 0.
			col_sum = 0.
			for j in range(nblocks):
				if i != j:
					upper_bound = min(self.aurora_network.get_num_links(i), self.aurora_network.get_num_links(j))
					reinforced_links[i][j] = model.addVar(lb=0., ub=upper_bound, obj=0., vtype=GRB.CONTINUOUS, name="rlc" + str(i) + "_" + str(j))
					diff = LinExpr()
					diff = upper_bound - reinforced_links[i][j]
					objective_function.add(diff * diff, mult=1.)
					row_sum += adj_matrix[i][j]
					col_sum += adj_matrix[j][i]
			leftover_outgoing[i] = self.aurora_network.get_num_links(i) - row_sum
			leftover_incoming[i] = self.aurora_network.get_num_links(i) - col_sum
		for i in range(nblocks):
			row_constraint = LinExpr()
			col_constraint = LinExpr()
			for j in range(nblocks):
				if i != j :
					row_constraint.add(reinforced_links[i][j], mult=1.)
					col_constraint.add(reinforced_links[j][i], mult=1.)
			model.addConstr(lhs=row_constraint, sense=GRB.LESS_EQUAL, rhs=leftover_outgoing[i])
			model.addConstr(lhs=col_constraint, sense=GRB.LESS_EQUAL, rhs=leftover_incoming[i])
		model.setObjective(objective_function, GRB.MINIMIZE)
		try: 
			model.optimize()
			for i in range(nblocks):
				for j in range(nblocks):
					if (i != j):
						adj_matrix[i][j] += reinforced_links[i][j].x
			return adj_matrix
		except GurobiError as e:
			print ("ReinforceFractionalTopology: Error code " + str(e. errno ) + ": " + str(e))
			return
		except AttributeError :
			print ("ReinforceFractionalTopology: Encountered an attribute error ")
			return

	# conducts topology engineering
	# aurora network - Aurora Network, contains all the parameters
	# training_traffic_snapshots - all the snapshots used for training the topology
	# number of k - which is the number of traffic clusters we use to solve engineer topology
	def _topology_engineer(self, training_traffic_snapshots, all_paths, spare_traffic=0.1):
		# first get number of blocks and number of links and link capacity
		nblocks = self.aurora_network.get_num_blocks()
		adj_matrix = np.zeros((nblocks, nblocks))
		# first, run k means clustering and get the clusters, and normalize to 1
		traffic_points = np.array([self.aurora_network.get_traffic_vector_from_traffic_snapshot(x, normalize_to=1.) for x in training_traffic_snapshots])
		# collect the k traffic matrices
		#traffic_matrices = [self.aurora_network.transform_traffic_point_to_traffic_matrix(x) for x in traffic_points]
		traffic_cluster_centroid_points = self._compute_clusters(traffic_points, run_pca_before_clustering=False)
		list_of_fractional_topologies = [0] * self.numK
		scaled_traffic_matrices = []
		for topol_id in range(self.numK):
			traffic_matrix = self.aurora_network.transform_traffic_point_to_traffic_matrix(traffic_cluster_centroid_points[topol_id])

			## newly added for robustness			
			traffic_sum = sum([sum(x) for x in traffic_matrix])
			reset_num_entries = 0
			for i in range(nblocks):
				for j in range(nblocks):
					if i != j:
						if traffic_matrix[i][j] < 0.0001:
							reset_num_entries += 1
			for i in range(nblocks):
				for j in range(nblocks):
					if i != j:
						if traffic_matrix[i][j] < 0.0001:
							traffic_matrix[i][j] = (spare_traffic * traffic_sum) / (1 - spare_traffic) / float(reset_num_entries)
			## newly added for robustness

			mu = self._compute_maximum_traffic_scaleup(traffic_matrix, all_paths)
			for i in range(nblocks):
				for j in range(nblocks):
					if i != j:
						traffic_matrix[i][j] *= mu
			scaled_traffic_matrices.append(traffic_matrix)
		scale_up = self._maximize_scaleup_factor_for_all_traffic_matrices(scaled_traffic_matrices, all_paths)
		adj_matrix = self._minimize_multihop(scaled_traffic_matrices, [scale_up] * len(scaled_traffic_matrices), all_paths)
		adj_matrix = self._reinforce_fractional_topology(adj_matrix)
		return adj_matrix

	def __normalize_routing_weights(self, nblocks, unnormalized_routing_weights):
		normalized_routing_weights = {}
		weight_sum = np.zeros((nblocks, nblocks,))
		for path in unnormalized_routing_weights:
			src = path[0]
			dst = path[-1]
			weight_sum[src][dst] += unnormalized_routing_weights[path]
			normalized_routing_weights[path] = unnormalized_routing_weights[path]
		for path in unnormalized_routing_weights:
			src = path[0]
			dst = path[-1]
			normalized_routing_weights[path] /= weight_sum[src][dst]
		return normalized_routing_weights

	# conducts topology engineering given the representative traffic matrices
	# aurora network - Aurora Network, contains all the parameters
	# training_traffic_snapshots - all the snapshots used for training the topology
	# number of k - which is the number of traffic clusters we use to solve engineer topology
	def topology_engineer_given_representative_TMs(self, representative_tms, all_paths, minimize_multihop=True):
		# first get number of blocks and number of links and link capacity
		nblocks = self.aurora_network.get_num_blocks()
		adj_matrix = np.zeros((nblocks, nblocks))
		list_of_fractional_topologies = [0] * self.numK
		scaled_traffic_matrices = []
		assert(self.numK == len(representative_tms))
		link_capacity = self.aurora_network.get_link_capacity(0)
		adj_matrix = self._bounded_wcmp_path_weights_v2(nblocks, link_capacity, representative_tms)
		adj_matrix = self._scale_up_matrix(adj_matrix)
		adj_matrix = self._reinforce_fractional_topology(adj_matrix)
		return adj_matrix

	def _scale_up_matrix(self, adj_matrix):
		nblocks = self.aurora_network.get_num_blocks()
		scaled_up_matrix = np.zeros((nblocks, nblocks))
		scale_up = 1.
		model = Model("Bounded WCMP ToE Scale up process")
		model.setParam( 'OutputFlag', False )
		mu = model.addVar(lb=0., ub=GRB.INFINITY, obj=1, vtype=GRB.CONTINUOUS, name="scale up")
		## Step 1 : add the row sum and col sum constraints
		for pod in range(nblocks):
			pod_num_links = self.aurora_network.get_num_links(pod)
			row_sum_constraint = 0
			col_sum_constraint = 0
			for target_pod in range(nblocks):
				if target_pod != pod:
					row_sum_constraint += adj_matrix[pod][target_pod]
					col_sum_constraint += adj_matrix[target_pod][pod]
			model.addConstr(lhs=row_sum_constraint * mu, sense=GRB.LESS_EQUAL, rhs=pod_num_links)
			model.addConstr(lhs=col_sum_constraint * mu, sense=GRB.LESS_EQUAL, rhs=pod_num_links)
		model.setObjective(mu, GRB.MAXIMIZE)
		try:
			model.optimize()
			scale_up = mu.x
			print("Bounded ToE scale up : {}".format(scale_up))
		except GurobiError as e:
			print ("Bounded ToE WCMP Scale up- Error code " + str(e. errno ) + ": " + str(e))
		except AttributeError :
			print ("Bounded ToE WCMP Scale up- Encountered an attribute error ")
		for i in range(nblocks):
			for j in range(nblocks):
				if i != j:
					scaled_up_matrix[i][j] = adj_matrix[i][j] * scale_up
		print("Printing scaled up matrix of topology: \n{}".format(scaled_up_matrix))
		return scaled_up_matrix

	def _bounded_wcmp_path_weights(self, nblocks, link_capacity, traffic_matrices):
		egress_bound = [0] * nblocks
		ingress_bound = [0] * nblocks
		## Step 1 : compute the pod egress and ingress bounds
		max_tm = np.zeros((nblocks, nblocks,))
		for tm in traffic_matrices:
			for pod in range(nblocks):
				pod_egress = sum(tm[pod])
				pod_ingress = 0
				for target_pod in range(nblocks):
					pod_ingress += tm[target_pod][pod]
					max_tm[pod][target_pod] = max(max_tm[pod][target_pod], tm[pod][target_pod])
				egress_bound[pod] = max(egress_bound[pod], pod_egress)
				ingress_bound[pod] = max(ingress_bound[pod], pod_ingress)


		# Step 2 : Initialize the optimization variables
		model = Model("Bounded WCMP ToE")
		model.setParam( 'OutputFlag', False )
		mlu = model.addVar(lb=0., ub=GRB.INFINITY, obj=0, vtype=GRB.CONTINUOUS, name="mlu")
		routing_weights_var = [None] * (nblocks + 1)
		for index in range(nblocks + 1):
			routing_weights_var[index] = model.addVar(lb=0., ub=1, obj=0, vtype=GRB.CONTINUOUS, name="w_{}".format(index))

		## Step 3 : Start programming the constraints
		## Constraint Type 1 : weights must sum to 1
		weight_constraint = LinExpr()
		for index in range(nblocks + 1):
			weight_constraint += routing_weights_var[index]
		model.addConstr(lhs=weight_constraint,sense=GRB.EQUAL,rhs=1)
		## Constraint Type 2 : mlu constraint
		for i in range(nblocks):
			for j in range(nblocks):
				if i != j:
					mlu_constraint = LinExpr()
					Zi = egress_bound[i]
					Zj = ingress_bound[j]
					Uij = max_tm[i][j]
					w0 = routing_weights_var[0]
					wi = routing_weights_var[i + 1]
					wj = routing_weights_var[j + 1]
					mlu_constraint = Zi * wj + Zj * wi + w0 * Uij
					model.addConstr(lhs=mlu_constraint, sense=GRB.LESS_EQUAL, rhs=mlu * link_capacity)
		
		## Step 4 : insert the objective function
		model.setObjective(mlu, GRB.MINIMIZE)
		try:
			model.optimize()
			block_pair_utilization = np.zeros((nblocks, nblocks))
			for i in range(nblocks):
				for j in range(nblocks):
					if i != j:
						Zi = egress_bound[i]
						Zj = ingress_bound[j]
						Uij = max_tm[i][j]
						direct_utilization = routing_weights_var[0].x * Uij + routing_weights_var[i + 1].x * Zj + routing_weights_var[j + 1].x * Zi
						block_pair_utilization[i][j] = direct_utilization
			#print(block_pair_utilization)
			return block_pair_utilization
		except GurobiError as e:
			print ("Bounded ToE WCMP - Error code " + str(e. errno ) + ": " + str(e))
			return None
		except AttributeError :
			print ("Bounded ToE WCMP - Encountered an attribute error ")
			return None


	def _bounded_wcmp_path_weights_v2(self, nblocks, link_capacity, traffic_matrices):
		egress_bound = [0] * nblocks
		ingress_bound = [0] * nblocks
		## Step 1 : compute the pod egress and ingress bounds
		max_tm = np.zeros((nblocks, nblocks,))
		for tm in traffic_matrices:
			for pod in range(nblocks):
				pod_egress = sum(tm[pod])
				pod_ingress = 0
				for target_pod in range(nblocks):
					pod_ingress += tm[target_pod][pod]
					max_tm[pod][target_pod] = max(max_tm[pod][target_pod], tm[pod][target_pod])
				egress_bound[pod] = max(egress_bound[pod], pod_egress)
				ingress_bound[pod] = max(ingress_bound[pod], pod_ingress)

		node_offset = nblocks * (nblocks - 1)

		# Step 2 : Initialize the optimization variables
		model = Model("Bounded WCMP ToE")
		model.setParam( 'OutputFlag', False )
		mlu = model.addVar(lb=0., ub=GRB.INFINITY, obj=0, vtype=GRB.CONTINUOUS, name="mlu")
		routing_weights_var = [None] * (node_offset + nblocks)
		for index in range(len(routing_weights_var)):
			routing_weights_var[index] = model.addVar(lb=0., ub=1, obj=0, vtype=GRB.CONTINUOUS, name="w_{}".format(index))

		## Step 3 : Start programming the constraints
		## Constraint Type 1 : weights must sum to 1
		offset = 0
		for i in range(nblocks):
			for j in range(nblocks):
				if i != j:
					weight_constraint = LinExpr()		
					weight_constraint += routing_weights_var[offset]
					for index in range(nblocks):
						weight_constraint += routing_weights_var[index + node_offset]
					model.addConstr(lhs=weight_constraint,sense=GRB.EQUAL,rhs=1)	
					offset += 1					
		
		## Constraint Type 2 : mlu constraint
		offset = 0
		for i in range(nblocks):
			for j in range(nblocks):
				if i != j:
					mlu_constraint = LinExpr()
					Zi = egress_bound[i]
					Zj = ingress_bound[j]
					Uij = max_tm[i][j]
					w0 = routing_weights_var[offset]
					wi = routing_weights_var[i + node_offset]
					wj = routing_weights_var[j + node_offset]
					mlu_constraint = Zi * wj + Zj * wi + w0 * Uij
					model.addConstr(lhs=mlu_constraint, sense=GRB.LESS_EQUAL, rhs=mlu * link_capacity)
					offset += 1

		## Step 4 : insert the objective function
		model.setObjective(mlu, GRB.MINIMIZE)
		try:
			model.optimize()
			block_pair_utilization = np.zeros((nblocks, nblocks))
			offset = 0
			routing_weights_sum = np.zeros((nblocks, nblocks))
			for i in range(nblocks):
				for j in range(nblocks):
					if i != j:
						Zi = egress_bound[i]
						Zj = ingress_bound[j]
						Uij = max_tm[i][j]
						direct_utilization = routing_weights_var[offset].x * Uij + routing_weights_var[i + node_offset].x * Zj + routing_weights_var[j + node_offset].x * Zi
						block_pair_utilization[i][j] = direct_utilization

						weight_sum = routing_weights_var[offset].x
						for n in range(nblocks):
							weight_sum += routing_weights_var[node_offset + n].x
						routing_weights_sum[i][j] = weight_sum

						offset += 1
			offset = 0
			for i in range(nblocks):
				for j in range(nblocks):
					if i != j:
						print("w_{}_{} : {}".format(i, j, routing_weights_var[offset].x))
						offset +=1

			#print(block_pair_utilization)
			#print("Routing Weight Sum\n{}".format(routing_weights_sum))
			return block_pair_utilization
		except GurobiError as e:
			print ("Bounded ToE WCMP - Error code " + str(e. errno ) + ": " + str(e))
			return None
		except AttributeError :
			print ("Bounded ToE WCMP - Encountered an attribute error ")
			return None
