import sys, os, copy, math
from topology_engineer import *
sys.path.append("..")
#import aurora_network
from gurobipy import *

'''		
This one solves for the scale up factor of the the smallest traffic matrix.

Also first scales up each TM to its max scale up factor

Does not solve for MLU directly, and thus routing weights are not by default normalized.
'''
#combines multiple fractional topology into one
class RobustMultiTrafficTopologyEngineerImplementationV2(TopologyEngineer):
	def __init__(self, aurora_network, reconfig_length, training_length, all_paths, all_traffic_snapshots, numK):
		num_snapshots = len(all_traffic_snapshots)
		TopologyEngineer.__init__(self, aurora_network, num_snapshots)
		self.toe_class = TopologyEngineeringClass.ROBUST_TOE
		self.training_length = training_length
		self.reconfig_length = reconfig_length
		self.numK = numK
		self.num_snapshots = len(all_traffic_snapshots)
		self.all_traffic_snapshots = all_traffic_snapshots
		self.all_paths = all_paths
		## NOTE: have to rezero the cached logical topologies here because when we initialize the parent class,
		## 		it will assume that the topology is static (see TopologyEngineer class above).
		self.cached_logical_topologies = {}
		return

		# must be called before running other code 
	def cache_logical_topologies(self):
		self.cached_logical_topologies[(0, self.training_length - 1)] = self._generate_uniform_logical_topology()
		## Start computing the logical topologies
		current_time = self.training_length
		print("Caching logical topologies for robust ToE Implementation V2.....")
		while current_time < self.num_snapshots:
			ending_time = min(current_time + self.reconfig_length - 1, self.num_snapshots - 1)
			training_snapshots = self.all_traffic_snapshots[current_time - self.training_length : current_time]
			adj_matrix = self._topology_engineer(training_snapshots, self.all_paths)
			self.cached_logical_topologies[(current_time, ending_time)] = adj_matrix
			current_time += self.reconfig_length
		print("Caching logical topologies: COMPLETE \n")
		return

	def get_filename_param(self):
		return "robusttoev2_r{}t{}k{}".format(self.reconfig_length, self.training_length, self.numK)

	# returns a string description of this class
	def get_string(self):
		return "Robust multi traffic ToE (V2) with K clusters : {}".format(self.numK)

	def is_static(self):
		return False

	# given a scale up factor, figure out the routing that minimizes two-hop paths if they exists
	## Todo(jason) : fix this and debug
	def _minimize_multihop(self, scaled_traffic_matrices, scaling_factors, all_paths):
		## using QP to reduce multihop reliance
		nblocks = self.aurora_network.get_num_blocks()
		num_tm = len(scaled_traffic_matrices)
		model = Model("minimize multihop")
		model.setParam( 'OutputFlag', False )
		interpod_link_counts = [None] * nblocks
		routing_weight_vars = {}
		link_capacity_constraints = [None] * nblocks
		## setup the link constraints and all optization variables, 
		## which are the link counts and routing variables
		objective_function = QuadExpr()
		for i in range(nblocks):
			interpod_link_counts[i] = [None] * nblocks
			link_capacity_constraints[i] = [None] * nblocks
			for j in range(nblocks):
				if (i != j):
					link_capacity_constraints[i][j] = LinExpr()
					upper_bound = min(self.aurora_network.get_num_links(i), self.aurora_network.get_num_links(j))
					interpod_link_counts[i][j] = model.addVar(lb=0, ub=upper_bound, obj=0, vtype=GRB.CONTINUOUS, name="lc" + str(i) + ":" + str(j))
					for path in all_paths[i][j]:
						var = model.addVar(lb=0, ub=GRB.INFINITY, obj=0., vtype=GRB.CONTINUOUS, name="w_{}".format(path))
						routing_weight_vars[path] = var
						# if multihop path, then add to objective function
						if len(path) > 2: 
							objective_function += (var * var)

		## add radix degree constraints
		for pod in range(nblocks):
			row_constraint = LinExpr()
			col_constraint = LinExpr()
			nlinks = self.aurora_network.get_num_links(pod)
			for target_pod in range(nblocks):
				if target_pod != pod:
					row_constraint.add(interpod_link_counts[pod][target_pod], mult=1.)
					col_constraint.add(interpod_link_counts[target_pod][pod], mult=1.)
			model.addConstr(lhs=row_constraint, sense=GRB.LESS_EQUAL, rhs=nlinks)
			model.addConstr(lhs=col_constraint, sense=GRB.LESS_EQUAL, rhs=nlinks)

		## add achievability of flows for all i j pairs
		for tm_index in range(num_tm):
			for i in range(nblocks):
				for j in range(nblocks):
					if (i != j):
						flow_achievability_constr = LinExpr()
						for path in all_paths[i][j]:
							flow_achievability_constr.add(routing_weight_vars[path], 1.)
						model.addConstr(lhs=scaling_factors[tm_index] * scaled_traffic_matrices[tm_index][i][j], sense=GRB.LESS_EQUAL, rhs=flow_achievability_constr)
		
		## add link capacity limit for all paths constraints
		for i in range(nblocks):
			for j in range(nblocks):
				if (i != j):
					for path in all_paths[i][j]:
						path_len = len(path)
						curr_node = path[0]
						for next_node_index in range(1, path_len, 1):
							next_node = path[next_node_index]
							link_capacity_constraints[curr_node][next_node].add(routing_weight_vars[path], mult=1.)
							curr_node = next_node

		for i in range(nblocks):
			for j in range(nblocks):
				if (i != j):
					capacity = min(self.aurora_network.get_link_capacity(i), self.aurora_network.get_link_capacity(j))
					model.addConstr(lhs=link_capacity_constraints[i][j], sense=GRB.LESS_EQUAL, rhs=interpod_link_counts[i][j] * capacity)
		# set up the objective function
		model.setObjective(objective_function, GRB.MINIMIZE)
		# start optimizing
		adj_matrix = np.zeros( (nblocks, nblocks,) )
		try: 
			model.optimize()
			for i in range(nblocks):
				for j in range(nblocks):
					if (i != j):
						adj_matrix[i][j] = interpod_link_counts[i][j].x
			#print adj_matrix
		except GurobiError as e:
			print ("MinimizeMultihop: Error code " + str(e. errno ) + ": " + str(e))
		except AttributeError :
			print ("MinimizeMultihop: Encountered an attribute error ")
		return adj_matrix

	def _maximize_scaleup_factor_for_all_traffic_matrices(self, scaled_traffic_matrices, all_inter_block_paths):
		nblocks = self.aurora_network.get_num_blocks()
		num_tm = len(scaled_traffic_matrices)
		model = Model("maximize traffic scale up")
		model.setParam( 'OutputFlag', False )
		mu = model.addVar(lb=0, ub=GRB.INFINITY, obj=1., vtype=GRB.CONTINUOUS, name="mu")
		fractional_topology_var = [None] * nblocks
		routing_weights_var = {}
		link_capacity_constraints = [None] * nblocks
		## setup the link constraints and all optization variables, 
		## which are the link counts and routing variables
		for i in range(nblocks):
			fractional_topology_var[i] = [None] * nblocks
			link_capacity_constraints[i] = [None] * nblocks
			for j in range(nblocks):
				if i != j:
					link_capacity_constraints[i][j] = LinExpr()
					upper_bound = min(self.aurora_network.get_num_links(i), self.aurora_network.get_num_links(j))
					fractional_topology_var[i][j] = model.addVar(lb=0, ub=upper_bound, obj=0, vtype=GRB.CONTINUOUS, name="lc" + str(i) + ":" + str(j))
					for path in all_inter_block_paths[i][j]:
						var = model.addVar(lb=0, ub=GRB.INFINITY, obj=0, vtype=GRB.CONTINUOUS, name="w_{}".format(path))
						routing_weights_var[path] = var
		## add radix degree constraints
		for pod in range(nblocks):
			row_constraint = LinExpr()
			col_constraint = LinExpr()
			for target_pod in range(nblocks):
				if target_pod != pod:
					row_constraint.add(fractional_topology_var[pod][target_pod], mult=1.)
					col_constraint.add(fractional_topology_var[target_pod][pod], mult=1.)
			model.addConstr(lhs=row_constraint, sense=GRB.LESS_EQUAL, rhs=float(self.aurora_network.get_num_links(pod)))
			model.addConstr(lhs=col_constraint, sense=GRB.LESS_EQUAL, rhs=float(self.aurora_network.get_num_links(pod)))

		## add achievability of flows for all i j pairs
		for tm_index in range(num_tm):
			for i in range(nblocks):
				for j in range(nblocks):
					if (i != j):
						flow_achievability_constr = LinExpr()
						for path in all_inter_block_paths[i][j]:
							flow_achievability_constr.add(routing_weights_var[path], 1.)
						model.addConstr(lhs=mu * scaled_traffic_matrices[tm_index][i][j], sense=GRB.LESS_EQUAL, rhs=flow_achievability_constr)
		
		## add link capacity limit for all paths constraints
		for i in range(nblocks):
			for j in range(nblocks):
				if (i != j):
					for path in all_inter_block_paths[i][j]:
						path_len = len(path)
						curr_node = path[0]
						for next_node_index in range(1, path_len, 1):
							next_node = path[next_node_index]
							link_capacity_constraints[curr_node][next_node].add(routing_weights_var[path], 1.)
							curr_node = next_node
					

		for i in range(nblocks):
			for j in range(nblocks):
				if i != j:
					capacity = min(self.aurora_network.get_link_capacity(i), self.aurora_network.get_link_capacity(j))
					model.addConstr(lhs=link_capacity_constraints[i][j], sense=GRB.LESS_EQUAL, rhs=fractional_topology_var[i][j] * capacity)
		# set up the objective function
		model.setObjective(mu, GRB.MAXIMIZE)
		# start optimizing
		try: 
			model.optimize()
			print("Worst case scale up : {}".format(mu.x))
			return mu.x # returns the max scale up factor
		except GurobiError as e:
			print ("MaximizeTrafficScaleUp: Error code " + str(e. errno ) + ": " + str(e))
			return None
		except AttributeError :
			print ("MaximizeTrafficScaleUp: Encountered an attribute error ")
			return None

	## done - ish
	# first step of topology design which solves the LP that maximizes traffic scale up
	def _compute_maximum_traffic_scaleup(self, traffic_matrix, all_inter_block_paths):
		nblocks = self.aurora_network.get_num_blocks()
		model = Model("maximize traffic scale up")
		model.setParam( 'OutputFlag', False )
		mu = model.addVar(lb=0, ub=GRB.INFINITY, obj=1., vtype=GRB.CONTINUOUS, name="mu")
		fractional_topology_var = [None] * nblocks
		routing_weights_var = [None] * nblocks
		link_capacity_constraints = [0] * nblocks
		## setup the link constraints and all optization variables, 
		## which are the link counts and routing variables
		for i in range(nblocks):
			fractional_topology_var[i] = [None] * nblocks
			routing_weights_var[i] = [None] * nblocks
			link_capacity_constraints[i] = [None] * nblocks
			for j in range(nblocks):
				if i != j:
					link_capacity_constraints[i][j] = LinExpr()
					upper_bound = min(self.aurora_network.get_num_links(i), self.aurora_network.get_num_links(j))
					fractional_topology_var[i][j] = model.addVar(lb=0, ub=upper_bound, obj=0, vtype=GRB.CONTINUOUS, name="lc" + str(i) + ":" + str(j))
					routing_weights_var[i][j] = [] 
					weight_index = 0
					for path in all_inter_block_paths[i][j]:
						var = model.addVar(lb=0, ub=GRB.INFINITY, obj=0, vtype=GRB.CONTINUOUS, name="w_{}".format(path))
						routing_weights_var[i][j].append(var)
						weight_index += 1

		## add radix degree constraints
		for pod in range(nblocks):
			row_constraint = LinExpr()
			col_constraint = LinExpr()
			for target_pod in range(nblocks):
				if target_pod != pod:
					row_constraint.add(fractional_topology_var[pod][target_pod], mult=1.)
					col_constraint.add(fractional_topology_var[target_pod][pod], mult=1.)
			model.addConstr(lhs=row_constraint, sense=GRB.LESS_EQUAL, rhs=float(self.aurora_network.get_num_links(pod)))
			model.addConstr(lhs=col_constraint, sense=GRB.LESS_EQUAL, rhs=float(self.aurora_network.get_num_links(pod)))

		## add achievability of flows for all i j pairs
		for i in range(nblocks):
			for j in range(nblocks):
				if (i != j):
					flow_achievability_constr = LinExpr()
					for weight_index in range(len(all_inter_block_paths[i][j])):
						flow_achievability_constr.add(routing_weights_var[i][j][weight_index], 1.)
						path_len = len(all_inter_block_paths[i][j][weight_index])
						curr_node = all_inter_block_paths[i][j][weight_index][0]
						for next_node_index in range(1, path_len, 1):
							next_node = all_inter_block_paths[i][j][weight_index][next_node_index]
							link_capacity_constraints[curr_node][next_node].add(routing_weights_var[i][j][weight_index], 1.)
							curr_node = all_inter_block_paths[i][j][weight_index][next_node_index]
					model.addConstr(lhs=mu * traffic_matrix[i][j], sense=GRB.LESS_EQUAL, rhs=flow_achievability_constr)
		
		## add link capacity limit for all paths constraints
		for i in range(nblocks):
			for j in range(nblocks):
				if (i != j):
					capacity = min(self.aurora_network.get_link_capacity(i),self.aurora_network.get_link_capacity(j))
					model.addConstr(lhs=link_capacity_constraints[i][j], sense=GRB.LESS_EQUAL, rhs=fractional_topology_var[i][j] * capacity)

		# set up the objective function
		model.setObjective(mu, GRB.MAXIMIZE)
		# start optimizing
		try: 
			model.optimize()
			return mu.x # returns the max scale up factor
		except GurobiError as e:
			print ("MaximizeTrafficScaleUp (single): Error code " + str(e. errno ) + ": " + str(e))
			return None
		except AttributeError :
			print ("MaximizeTrafficScaleUp (single): Encountered an attribute error ")
			return None

	## Computes the cluster centroids for a bunch of historical traffic matrices
	## These will then be used for topology engineering
	def _compute_clusters(self, training_points, run_pca_before_clustering=True, num_clustering_components=20):
		traffic_points_for_training = training_points
		principle_components = None
		nblocks = self.aurora_network.get_num_blocks()
		for point in training_points:
			for entry in point:
				assert(entry >= 0)
		if run_pca_before_clustering:
			pca = PCA(n_components=num_clustering_components)
			traffic_points_for_training = pca.fit_transform(training_points)
			principle_components = pca.components_
		kmeans = KMeans(n_clusters=self.numK, random_state=0).fit(traffic_points_for_training)
		cluster_centroids = kmeans.cluster_centers_
		# transform the clusters (which are in the dimension-reduced space) back to the dimension of traffic points
		traffic_centroid_points = []
		if run_pca_before_clustering:
			#print("Shape of principle components: {}".format(principle_components.shape))
			traffic_centroid_points = np.matmul(cluster_centroids, principle_components)
			traffic_centroid_points = np.array(traffic_centroid_points)
		else:
			traffic_centroid_points = cluster_centroids
		#assert(traffic_centroid_points.shape == (self.numK, self.aurora_network.get_num_blocks() * (self.aurora_network.get_num_blocks() - 1)))
		for centroid_offset in range(len(traffic_centroid_points)):
			for i in range(len(traffic_centroid_points[centroid_offset])):
				if traffic_centroid_points[centroid_offset][i] < 0:
					traffic_centroid_points[centroid_offset][i] = 0.
		return traffic_centroid_points

	# conducts topology engineering
	# aurora network - Aurora Network, contains all the parameters
	# training_traffic_snapshots - all the snapshots used for training the topology
	# number of k - which is the number of traffic clusters we use to solve engineer topology
	def _topology_engineer(self, training_traffic_snapshots, all_paths, spare_traffic=0.1):
		# first get number of blocks and number of links and link capacity
		nblocks = self.aurora_network.get_num_blocks()
		adj_matrix = np.zeros((nblocks, nblocks))
		# first, run k means clustering and get the clusters, and normalize to 1
		traffic_points = np.array([self.aurora_network.get_traffic_vector_from_traffic_snapshot(x, normalize_to=1.) for x in training_traffic_snapshots])
		# collect the k traffic matrices
		#traffic_matrices = [self.aurora_network.transform_traffic_point_to_traffic_matrix(x) for x in traffic_points]
		traffic_cluster_centroid_points = self._compute_clusters(traffic_points, run_pca_before_clustering=False)
		list_of_fractional_topologies = [0] * self.numK
		scaled_traffic_matrices = []
		for topol_id in range(self.numK):
			traffic_matrix = self.aurora_network.transform_traffic_point_to_traffic_matrix(traffic_cluster_centroid_points[topol_id])

			## newly added for robustness			
			traffic_sum = sum([sum(x) for x in traffic_matrix])
			reset_num_entries = 0
			for i in range(nblocks):
				for j in range(nblocks):
					if i != j:
						if traffic_matrix[i][j] < 0.0001:
							reset_num_entries += 1
			for i in range(nblocks):
				for j in range(nblocks):
					if i != j:
						if traffic_matrix[i][j] < 0.0001:
							traffic_matrix[i][j] = (spare_traffic * traffic_sum) / (1 - spare_traffic) / float(reset_num_entries)
			## newly added for robustness

			mu = self._compute_maximum_traffic_scaleup(traffic_matrix, all_paths)
			for i in range(nblocks):
				for j in range(nblocks):
					if i != j:
						traffic_matrix[i][j] *= mu
			scaled_traffic_matrices.append(traffic_matrix)
		scale_up = self._maximize_scaleup_factor_for_all_traffic_matrices(scaled_traffic_matrices, all_paths)
		adj_matrix = self._minimize_multihop(scaled_traffic_matrices, [scale_up] * len(scaled_traffic_matrices), all_paths)
		adj_matrix = self._reinforce_fractional_topology(adj_matrix)
		return adj_matrix

	def _test_individual_scaleup_factor_for_all_TMs_uniform_topology(self, scaled_traffic_matrices, minimal_scale_up, all_inter_block_paths):
		nblocks = self.aurora_network.get_num_blocks()
		num_tm = len(scaled_traffic_matrices)
		model = Model("maximize individual traffic scale up")
		model.setParam( 'OutputFlag', False )
		individual_scaleups = [None] * num_tm
		objective_function = QuadExpr()

		num_links_per_block = self.aurora_network.get_num_links(0)
		uniform_topology = np.zeros((nblocks, nblocks,))
		for i in range(nblocks):
			for j in range(nblocks):
				if i != j:
					uniform_topology[i][j] = float(num_links_per_block) / (nblocks - 1)

		for index in range(num_tm):
			mu = model.addVar(lb=minimal_scale_up * 0.999, ub=1, obj=0, vtype=GRB.CONTINUOUS, name="u_{}".format(index))
			objective_function += ((1 - mu) * (1 - mu))
			individual_scaleups[index] = mu
		
		routing_weights_var = {}
		link_capacity_constraints = [None] * nblocks
		## setup the link constraints and all optization variables, 
		## which are the link counts and routing variables
		for i in range(nblocks):
			link_capacity_constraints[i] = [None] * nblocks
			for j in range(nblocks):
				if i != j:
					link_capacity_constraints[i][j] = LinExpr()
					for path in all_inter_block_paths[i][j]:
						var = model.addVar(lb=0, ub=GRB.INFINITY, obj=0, vtype=GRB.CONTINUOUS, name="w_{}".format(path))
						routing_weights_var[path] = var

		## add achievability of flows for all i j pairs
		for tm_index in range(num_tm):
			for i in range(nblocks):
				for j in range(nblocks):
					if (i != j):
						flow_achievability_constr = LinExpr()
						for path in all_inter_block_paths[i][j]:
							flow_achievability_constr.add(routing_weights_var[path], 1.)
						model.addConstr(lhs=individual_scaleups[tm_index] * scaled_traffic_matrices[tm_index][i][j], sense=GRB.LESS_EQUAL, rhs=flow_achievability_constr)
		
		## add link capacity limit for all paths constraints
		for i in range(nblocks):
			for j in range(nblocks):
				if (i != j):
					for path in all_inter_block_paths[i][j]:
						path_len = len(path)
						curr_node = path[0]
						for next_node_index in range(1, path_len, 1):
							next_node = path[next_node_index]
							link_capacity_constraints[curr_node][next_node].add(routing_weights_var[path], 1.) 
							curr_node = next_node

		for i in range(nblocks):
			for j in range(nblocks):
				if (i != j):
					capacity = min(self.aurora_network.get_link_capacity(i),self.aurora_network.get_link_capacity(j))
					model.addConstr(lhs=link_capacity_constraints[i][j], sense=GRB.LESS_EQUAL, rhs=uniform_topology[i][j] * capacity)

		# set up the objective function
		model.setObjective(objective_function, GRB.MINIMIZE)
		# start optimizing
		try: 
			model.optimize()
			scaleups = [0] * num_tm
			for mu, index in zip(individual_scaleups, range(num_tm)):
				scaleups[index] = mu.x
			return scaleups # returns the max scale up factor
		except GurobiError as e:
			print ("MaximizeTrafficScaleUp: Error code " + str(e. errno ) + ": " + str(e))
			return None
		except AttributeError :
			print ("MaximizeTrafficScaleUp: Encountered an attribute error ")
			return None

	def _maximize_individual_scaleup_factor_for_all_TMs(self, scaled_traffic_matrices, minimal_scale_up, all_inter_block_paths):
		nblocks = self.aurora_network.get_num_blocks()
		num_tm = len(scaled_traffic_matrices)
		model = Model("maximize individual traffic scale up")
		model.setParam( 'OutputFlag', False )
		individual_scaleups = [None] * num_tm
		objective_function = QuadExpr()
		for index in range(num_tm):
			mu = model.addVar(lb=minimal_scale_up * 0.999, ub=1, obj=0, vtype=GRB.CONTINUOUS, name="u_{}".format(index))
			objective_function += ((1 - mu) * (1 - mu))
			individual_scaleups[index] = mu
		
		fractional_topology_var = [None] * nblocks
		routing_weights_var = {}
		link_capacity_constraints = [None] * nblocks
		## setup the link constraints and all optization variables, 
		## which are the link counts and routing variables
		for i in range(nblocks):
			fractional_topology_var[i] = [None] * nblocks
			link_capacity_constraints[i] = [None] * nblocks
			for j in range(nblocks):
				if i != j:
					link_capacity_constraints[i][j] = LinExpr()
					upper_bound = min(self.aurora_network.get_num_links(i), self.aurora_network.get_num_links(j))
					fractional_topology_var[i][j] = model.addVar(lb=0, ub=upper_bound, obj=0, vtype=GRB.CONTINUOUS, name="lc" + str(i) + ":" + str(j))
					for path in all_inter_block_paths[i][j]:
						var = model.addVar(lb=0, ub=GRB.INFINITY, obj=0, vtype=GRB.CONTINUOUS, name="w_{}".format(path))
						routing_weights_var[path] = var
		## add radix degree constraints
		for pod in range(nblocks):
			row_constraint = LinExpr()
			col_constraint = LinExpr()
			for target_pod in range(nblocks):
				if target_pod != pod:
					row_constraint.add(fractional_topology_var[pod][target_pod], mult=1.)
					col_constraint.add(fractional_topology_var[target_pod][pod], mult=1.)
			model.addConstr(lhs=row_constraint, sense=GRB.LESS_EQUAL, rhs=float(self.aurora_network.get_num_links(pod)))
			model.addConstr(lhs=col_constraint, sense=GRB.LESS_EQUAL, rhs=float(self.aurora_network.get_num_links(pod)))

		## add achievability of flows for all i j pairs
		for tm_index in range(num_tm):
			for i in range(nblocks):
				for j in range(nblocks):
					if (i != j):
						flow_achievability_constr = LinExpr()
						for path in all_inter_block_paths[i][j]:
							flow_achievability_constr.add(routing_weights_var[path], 1.)
						model.addConstr(lhs=individual_scaleups[tm_index] * scaled_traffic_matrices[tm_index][i][j], sense=GRB.LESS_EQUAL, rhs=flow_achievability_constr)
		
		## add link capacity limit for all paths constraints
		for i in range(nblocks):
			for j in range(nblocks):
				if (i != j):
					for path in all_inter_block_paths[i][j]:
						path_len = len(path)
						curr_node = path[0]
						for next_node_index in range(1, path_len, 1):
							next_node = path[next_node_index]
							link_capacity_constraints[curr_node][next_node].add(routing_weights_var[path], 1.)
							curr_node = next_node

		for i in range(nblocks):
			for j in range(nblocks):
				if i != j:
					capacity = min(self.aurora_network.get_link_capacity(i),self.aurora_network.get_link_capacity(j))
					model.addConstr(lhs=link_capacity_constraints[i][j], sense=GRB.LESS_EQUAL, rhs=fractional_topology_var[i][j] * capacity)
		# set up the objective function
		model.setObjective(objective_function, GRB.MINIMIZE)
		# start optimizing
		try: 
			model.optimize()
			scaleups = [0] * num_tm
			for mu, index in zip(individual_scaleups, range(num_tm)):
				scaleups[index] = mu.x
			adj_matrix = [0] * nblocks
			for i in range(nblocks):
				adj_matrix[i] = [0] * nblocks
			for i in range(nblocks):
				for j in range(nblocks):
					if i != j:
						adj_matrix[i][j] = fractional_topology_var[i][j].x
			routing_weights = {}
			for path in routing_weights_var.keys():
				routing_weights[path] = routing_weights_var[path].x
			print adj_matrix
			return scaleups, adj_matrix, routing_weights # returns the max scale up factor
		except GurobiError as e:
			print ("MaximizeTrafficScaleUp: Error code " + str(e. errno ) + ": " + str(e))
			return None
		except AttributeError :
			print ("MaximizeTrafficScaleUp: Encountered an attribute error ")
			return None

	def __normalize_routing_weights(self, nblocks, unnormalized_routing_weights):
		normalized_routing_weights = {}
		weight_sum = np.zeros((nblocks, nblocks,))
		for path in unnormalized_routing_weights:
			src = path[0]
			dst = path[-1]
			weight_sum[src][dst] += unnormalized_routing_weights[path]
			normalized_routing_weights[path] = unnormalized_routing_weights[path]
		for path in unnormalized_routing_weights:
			src = path[0]
			dst = path[-1]
			normalized_routing_weights[path] /= weight_sum[src][dst]
		return normalized_routing_weights

	# conducts topology engineering given the representative traffic matrices
	# aurora network - Aurora Network, contains all the parameters
	# training_traffic_snapshots - all the snapshots used for training the topology
	# number of k - which is the number of traffic clusters we use to solve engineer topology
	def topology_engineer_given_representative_TMs(self, representative_tms, all_paths, minimize_multihop=True):
		# first get number of blocks and number of links and link capacity
		nblocks = self.aurora_network.get_num_blocks()
		adj_matrix = np.zeros((nblocks, nblocks))
		list_of_fractional_topologies = [0] * self.numK
		scaled_traffic_matrices = []
		assert(self.numK == len(representative_tms))
		individual_matrix_scaleups = []
		for topol_id in range(self.numK):
			traffic_matrix = np.zeros((nblocks, nblocks,))
			## newly added for robustness			
			mu = self._compute_maximum_traffic_scaleup(representative_tms[topol_id], all_paths)
			individual_matrix_scaleups.append(mu)
			for i in range(nblocks):
				for j in range(nblocks):
					if i != j:
						traffic_matrix[i][j] = mu * representative_tms[topol_id][i][j]
			scaled_traffic_matrices.append(traffic_matrix)
		scale_up = self._maximize_scaleup_factor_for_all_traffic_matrices(scaled_traffic_matrices, all_paths)
		individual_scaleups, adj_matrix, routing_weights = self._maximize_individual_scaleup_factor_for_all_TMs(scaled_traffic_matrices, scale_up, all_paths)
		normalized_routing_weights = self.__normalize_routing_weights(nblocks, routing_weights)
		if minimize_multihop:
			adj_matrix = self._minimize_multihop(scaled_traffic_matrices, individual_scaleups, all_paths)
		adj_matrix = self._reinforce_fractional_topology(adj_matrix)
		return adj_matrix, normalized_routing_weights
		print("Individual scale up with ONE topology : {}".format(individual_scaleups))
		individual_scaleups_uniform = self._test_individual_scaleup_factor_for_all_TMs_uniform_topology(scaled_traffic_matrices, scale_up, all_paths)
		print("Individual scale up with UNIFORM topology : {}".format(individual_scaleups))
		print("\n\n\n Global scale up is : {}".format(scale_up))
		print("Individual TM scale ups : {} \n\n\n".format(individual_matrix_scaleups))
		if minimize_multihop:
			adj_matrix = self._minimize_multihop(scaled_traffic_matrices, individual_scaleups, all_paths)
		adj_matrix = self._reinforce_fractional_topology(adj_matrix)
		return adj_matrix