import sys, os, copy, math
from topology_engineer import *
sys.path.append("..")
#import aurora_network
from gurobipy import *

'''		
Design fractional topology based on 
traffic clusters using LP formulation
'''
#combines multiple fractional topology into one
class RobustMultiTrafficTopologyEngineer(TopologyEngineer):
	def __init__(self, aurora_network, reconfig_length, training_length, all_paths, all_traffic_snapshots, numK, ignore_flows=None):
		num_snapshots = len(all_traffic_snapshots)
		TopologyEngineer.__init__(self, aurora_network, num_snapshots)
		self.flow_pairs_to_ignore = []
		if ignore_flows is not None:
			self.flow_pairs_to_ignore = ignore_flows
		self.toe_class = TopologyEngineeringClass.ROBUST_TOE
		self.training_length = training_length
		self.reconfig_length = reconfig_length
		self.numK = numK
		self.num_snapshots = len(all_traffic_snapshots)
		self.all_traffic_snapshots = all_traffic_snapshots
		self.all_paths = all_paths
		## NOTE: have to rezero the cached logical topologies here because when we initialize the parent class,
		## 		it will assume that the topology is static (see TopologyEngineer class above).
		self.cached_logical_topologies = {}
		return

		# must be called before running other code 
	def cache_logical_topologies(self, round_to_integer=False):
		self.cached_logical_topologies[(0, self.training_length - 1)] = self._generate_uniform_logical_topology()
		## Start computing the logical topologies
		current_time = self.training_length
		print("Caching logical topologies for robust ToE.....")
		while current_time < self.num_snapshots:
			ending_time = min(current_time + self.reconfig_length - 1, self.num_snapshots - 1)
			training_snapshots = self.all_traffic_snapshots[current_time - self.training_length : current_time]
			adj_matrix = self._topology_engineer(training_snapshots, self.all_paths)
			if round_to_integer:
				self.cached_logical_topologies[(current_time, ending_time)] = self.aurora_network.round_fractional_topology_giant_switch(adj_matrix, self.flow_pairs_to_ignore)
			else:
				self.cached_logical_topologies[(current_time, ending_time)] = adj_matrix
			current_time += self.reconfig_length
		print("Caching logical topologies: COMPLETE \n")
		print("first logical topology: \n{}\n".format(self.cached_logical_topologies[(self.training_length, self.training_length + self.reconfig_length - 1)]))
		self.aurora_network.print_stats(self.cached_logical_topologies[(self.training_length, self.training_length + self.reconfig_length - 1)])
		return

	def get_filename_param(self):
		return "robusttoe_r{}t{}k{}".format(self.reconfig_length, self.training_length, self.numK)

	# returns a string description of this class
	def get_string(self):
		return "Robust multi traffic ToE with K clusters : {}".format(self.numK)

	def is_static(self):
		return False

	# internal method
	# Should not be called by user, it is only called as a subroutine by topology_engineer
	def _combine_fractional_topologies(self, list_of_topologies):
		nblocks = self.aurora_network.get_num_blocks()
		model = Model("combine fractional topologies")
		model.setParam( 'OutputFlag', False )
		alpha = model.addVar(lb=0., ub=GRB.INFINITY, obj=1., vtype=GRB.CONTINUOUS ,name="alpha")
		adj_matrix = np.zeros((nblocks, nblocks))
		combined_fractional_topology = {} # stores the fractional link count optimization variables
		for i in range(nblocks):
			for j in range(nblocks):
				if (i != j):
					combined_fractional_topology[(i,j,)] = model.addVar(lb=0., ub=GRB.INFINITY, obj=1., vtype=GRB.CONTINUOUS ,name="alpha")
					for topol_id in range(len(list_of_topologies)):
						model.addConstr(lhs=combined_fractional_topology[(i,j,)], sense=GRB.GREATER_EQUAL, rhs=alpha * float(list_of_topologies[topol_id][i][j]))
		## add pod radix constraints
		for block in range(nblocks):
			row_constraint = LinExpr()
			col_constraint = LinExpr()
			nlinks = self.aurora_network.get_num_links(block)
			for target_block in range(nblocks):
				if target_block != block:
					row_constraint.add(combined_fractional_topology[(block, target_block,)], mult=1.)
					col_constraint.add(combined_fractional_topology[(target_block, block,)], mult=1.)
			model.addConstr(lhs=row_constraint, sense=GRB.LESS_EQUAL, rhs=nlinks)
			model.addConstr(lhs=col_constraint, sense=GRB.LESS_EQUAL, rhs=nlinks)
		model.setObjective(alpha, GRB.MAXIMIZE)
		try: 
			model.optimize()
			for i in range(nblocks):
				for j in range(nblocks):
					if (i != j):
						adj_matrix[i][j] = combined_fractional_topology[(i, j)].x
			return adj_matrix
		except GurobiError as e:
			print ("CombineFractionalTopologies: Error code " + str(e. errno ) + ": " + str(e))
			return None
		except AttributeError :
			print ("CombineFractionalTopologies: Encountered an attribute error ")
			return None

	# given a scale up factor, figure out the routing that minimizes two-hop paths if they exists
	## Todo(jason) : fix this and debug
	def _minimize_multihop(self, traffic_matrix, scaling_factor, all_paths):
		## using QP to reduce multihop reliance
		nblocks = self.aurora_network.get_num_blocks()
		model = Model("minimize multihop")
		model.setParam( 'OutputFlag', False )
		interpod_link_counts = [None] * nblocks
		routing_weights = {}
		link_capacity_constraints = [None] * nblocks
		## setup the link constraints and all optization variables, 
		## which are the link counts and routing variables
		objective_function = QuadExpr()
		for i in range(nblocks):
			interpod_link_counts[i] = [None] * nblocks
			link_capacity_constraints[i] = [None] * nblocks
			for j in range(nblocks):
				if (i != j):
					link_capacity_constraints[i][j] = LinExpr()
					upper_bound = min(self.aurora_network.get_num_links(i), self.aurora_network.get_num_links(j))
					interpod_link_counts[i][j] = model.addVar(lb=0, ub=upper_bound, obj=0, vtype=GRB.CONTINUOUS, name="lc" + str(i) + "_" + str(j))
					weight_index = 0
					for path in all_paths[i][j]:
						var = model.addVar(lb=0, ub=GRB.INFINITY, obj=0., vtype=GRB.CONTINUOUS, name="w_{}".format(path))
						routing_weights[path] = var
						# if multihop path, then add to objective function
						if len(path) > 2: 
							objective_function.add(var * var, mult=1.)

		## add radix degree constraints
		for pod in range(nblocks):
			row_constraint = LinExpr()
			col_constraint = LinExpr()
			nlinks = self.aurora_network.get_num_links(pod)
			for target_pod in range(nblocks):
				if target_pod != pod:
					row_constraint.add(interpod_link_counts[pod][target_pod], mult=1.)
					col_constraint.add(interpod_link_counts[target_pod][pod], mult=1.)
			model.addConstr(lhs=row_constraint, sense=GRB.LESS_EQUAL, rhs=nlinks)
			model.addConstr(lhs=col_constraint, sense=GRB.LESS_EQUAL, rhs=nlinks)

		## add achievability of flows for all i j pairs
		for i in range(nblocks):
			for j in range(nblocks):
				if (i != j):
					flow_achievability_constr = LinExpr()
					for path in all_paths[i][j]:
						flow_achievability_constr.add(routing_weights[path], mult=1.)
						path_len = len(path)
						curr_node = path[0]
						for next_node_index in range(1, path_len, 1):
							next_node = path[next_node_index]
							link_capacity_constraints[curr_node][next_node].add(routing_weights[path], mult=1.)
							curr_node = next_node
					model.addConstr(lhs=scaling_factor * traffic_matrix[i][j], sense=GRB.LESS_EQUAL, rhs=flow_achievability_constr)
		
		## add link capacity limit for all paths constraints
		for i in range(nblocks):
			for j in range(nblocks):
				if (i != j):
					capacity = min(self.aurora_network.get_link_capacity(i), self.aurora_network.get_link_capacity(i))
					model.addConstr(lhs=link_capacity_constraints[i][j], sense=GRB.LESS_EQUAL, rhs=interpod_link_counts[i][j] * capacity)
		# set up the objective function
		model.setObjective(objective_function, GRB.MINIMIZE)
		# start optimizing
		adj_matrix = np.zeros( (nblocks, nblocks,) )
		try: 
			model.optimize()
			status = model.status
			#print("Status : {}".format(status))
			for i in range(nblocks):
				for j in range(nblocks):
					if (i != j):
						adj_matrix[i][j] = interpod_link_counts[i][j].x
			#print adj_matrix
			return adj_matrix
		except GurobiError as e:
			print ("MinimizeMultihop: Error code " + str(e. errno ) + ": " + str(e))
		except AttributeError as e1:
			print ("MinimizeMultihop: Encountered an attribute error : " + str(e1))
	
	## done - ish
	# first step of topology design which solves the LP that maximizes traffic scale up
	def _maximize_traffic_scaleup(self, traffic_matrix, all_inter_block_paths):
		nblocks = self.aurora_network.get_num_blocks()
		model = Model("maximize traffic scale up")
		model.setParam( 'OutputFlag', False )
		mu = model.addVar(lb=0, ub=GRB.INFINITY, obj=1., vtype=GRB.CONTINUOUS, name="mu")
		fractional_topology_var = [None] * nblocks
		routing_weights_var = [None] * nblocks
		link_capacity_constraints = [0] * nblocks
		## setup the link constraints and all optization variables, 
		## which are the link counts and routing variables
		for i in range(nblocks):
			fractional_topology_var[i] = [None] * nblocks
			routing_weights_var[i] = [None] * nblocks
			link_capacity_constraints[i] = [None] * nblocks
			for j in range(nblocks):
				if i != j:
					link_capacity_constraints[i][j] = LinExpr()
					upper_bound = min(self.aurora_network.get_num_links(i), self.aurora_network.get_num_links(j))
					fractional_topology_var[i][j] = model.addVar(lb=0, ub=upper_bound, obj=0, vtype=GRB.CONTINUOUS, name="lc" + str(i) + ":" + str(j))
					routing_weights_var[i][j] = [] 
					weight_index = 0
					for path in all_inter_block_paths[i][j]:
						var = model.addVar(lb=0, ub=GRB.INFINITY, obj=0, vtype=GRB.CONTINUOUS, name="w_{}".format(path))
						routing_weights_var[i][j].append(var)
						weight_index += 1

		## add radix degree constraints
		for pod in range(nblocks):
			row_constraint = LinExpr()
			col_constraint = LinExpr()
			for target_pod in range(nblocks):
				if target_pod != pod:
					row_constraint.add(fractional_topology_var[pod][target_pod], mult=1.)
					col_constraint.add(fractional_topology_var[target_pod][pod], mult=1.)
			model.addConstr(lhs=row_constraint, sense=GRB.LESS_EQUAL, rhs=float(self.aurora_network.get_num_links(pod)))
			model.addConstr(lhs=col_constraint, sense=GRB.LESS_EQUAL, rhs=float(self.aurora_network.get_num_links(pod)))

		## add achievability of flows for all i j pairs
		for i in range(nblocks):
			for j in range(nblocks):
				if (i != j):
					flow_achievability_constr = LinExpr()
					for weight_index in range(len(all_inter_block_paths[i][j])):
						flow_achievability_constr.add(routing_weights_var[i][j][weight_index], 1.)
						path_len = len(all_inter_block_paths[i][j][weight_index])
						curr_node = all_inter_block_paths[i][j][weight_index][0]
						for next_node_index in range(1, path_len, 1):
							next_node = all_inter_block_paths[i][j][weight_index][next_node_index]
							link_capacity_constraints[curr_node][next_node].add(routing_weights_var[i][j][weight_index], 1.)
							curr_node = all_inter_block_paths[i][j][weight_index][next_node_index]
					model.addConstr(lhs=mu * traffic_matrix[i][j], sense=GRB.LESS_EQUAL, rhs=flow_achievability_constr)
		
		## add link capacity limit for all paths constraints
		for i in range(nblocks):
			for j in range(nblocks):
				if (i != j):
					capacity = min(self.aurora_network.get_link_capacity(i),self.aurora_network.get_link_capacity(j))
					model.addConstr(lhs=link_capacity_constraints[i][j], sense=GRB.LESS_EQUAL, rhs=fractional_topology_var[i][j] * capacity)

		# set up the objective function
		model.setObjective(mu, GRB.MAXIMIZE)
		# start optimizing
		try: 
			model.optimize()
			return mu.x # returns the max scale up factor
		except GurobiError as e:
			print ("MaximizeTrafficScaleUp: Error code " + str(e. errno ) + ": " + str(e))
			return None
		except AttributeError :
			print ("MaximizeTrafficScaleUp: Encountered an attribute error ")
			return None

	## Computes the cluster centroids for a bunch of historical traffic matrices
	## These will then be used for topology engineering
	def _compute_clusters(self, training_points, run_pca_before_clustering=True, num_clustering_components=20):
		if len(training_points) <= self.numK:
			return training_points
		traffic_points_for_training = training_points
		principle_components = None
		nblocks = self.aurora_network.get_num_blocks()
		for point in training_points:
			for entry in point:
				assert(entry >= 0)
		if run_pca_before_clustering:
			pca = PCA(n_components=num_clustering_components)
			traffic_points_for_training = pca.fit_transform(training_points)
			principle_components = pca.components_
		kmeans = KMeans(n_clusters=self.numK, random_state=0).fit(traffic_points_for_training)
		cluster_centroids = kmeans.cluster_centers_
		# transform the clusters (which are in the dimension-reduced space) back to the dimension of traffic points
		traffic_centroid_points = []
		if run_pca_before_clustering:
			#print("Shape of principle components: {}".format(principle_components.shape))
			traffic_centroid_points = np.matmul(cluster_centroids, principle_components)
			traffic_centroid_points = np.array(traffic_centroid_points)
		else:
			traffic_centroid_points = cluster_centroids
		#assert(traffic_centroid_points.shape == (self.numK, self.aurora_network.get_num_blocks() * (self.aurora_network.get_num_blocks() - 1)))
		for centroid_offset in range(len(traffic_centroid_points)):
			for i in range(len(traffic_centroid_points[centroid_offset])):
				if traffic_centroid_points[centroid_offset][i] < 0:
					traffic_centroid_points[centroid_offset][i] = 0.
		return traffic_centroid_points



	## Computes the cluster centroids for a bunch of historical traffic matrices
	## These will then be used for topology engineering
	def _compute_clusters_compare(self, training_points, num_clustering_components=20):
		traffic_points_for_training = training_points
		principle_components = None
		nblocks = self.aurora_network.get_num_blocks()
		#if False:
		
		pca1 = PCA(n_components=num_clustering_components)
		traffic_points_for_training = pca1.fit_transform(training_points)
		principle_components = pca1.components_
		kmeans = KMeans(n_clusters=self.numK, random_state=0).fit(traffic_points_for_training)
		cluster_centroids1 = kmeans.cluster_centers_
		# transform the clusters (which are in the dimension-reduced space) back to the dimension of traffic points
		traffic_centroid_points1 = np.matmul(cluster_centroids1 , principle_components)
		traffic_centroid_points1 = np.array(traffic_centroid_points1)
		


		pca2 = PCA(n_components=nblocks * (nblocks - 1))
		traffic_points_for_training = pca2.fit_transform(training_points)
		principle_components = pca2.components_
		kmeans = KMeans(n_clusters=self.numK, random_state=0).fit(traffic_points_for_training)
		cluster_centroids2 = kmeans.cluster_centers_
		# transform the clusters (which are in the dimension-reduced space) back to the dimension of traffic points
		traffic_centroid_points2 = np.matmul(cluster_centroids2 , principle_components)
		traffic_centroid_points2 = np.array(traffic_centroid_points2)

		#assert(traffic_centroid_points.shape == (self.numK, self.aurora_network.get_num_blocks() * (self.aurora_network.get_num_blocks() - 1)))
		for ind in range(self.numK):
			print("")
			print("traffic centroid point (PCA) : {}".format(traffic_centroid_points1[ind]))
			print("traffic centroid point (no PCA) : {}".format(traffic_centroid_points2[ind]))
			print("")
		return traffic_centroid_points

	# conducts topology engineering
	# aurora network - Aurora Network, contains all the parameters
	# training_traffic_snapshots - all the snapshots used for training the topology
	# number of k - which is the number of traffic clusters we use to solve engineer topology
	def _topology_engineer(self, training_traffic_snapshots, all_paths, spare_traffic=0.1, fraction_threshold=0.002):
		# first get number of blocks and number of links and link capacity
		nblocks = self.aurora_network.get_num_blocks()
		adj_matrix = np.zeros((nblocks, nblocks))
		# first, run k means clustering and get the clusters, and normalize to 1
		traffic_points = np.array([self.aurora_network.get_traffic_vector_from_traffic_snapshot(x, normalize_to=1.) for x in training_traffic_snapshots])
		# collect the k traffic matrices
		#traffic_matrices = [self.aurora_network.transform_traffic_point_to_traffic_matrix(x) for x in traffic_points]
		traffic_cluster_centroid_points = self._compute_clusters(traffic_points, run_pca_before_clustering=False)
		(nrows, ncols) = traffic_cluster_centroid_points.shape
		#assert(nrows == self.numK)
		list_of_fractional_topologies = [0] * nrows
		scale_up = 0
		#for topol_id in range(self.numK):
		for topol_id in range(nrows):
			traffic_matrix = self.aurora_network.transform_traffic_point_to_traffic_matrix(traffic_cluster_centroid_points[topol_id])
			## newly added for robustness
			traffic_sum = sum([sum(x) for x in traffic_matrix])
			reset_num_entries = 0
			for i in range(nblocks):
				for j in range(nblocks):
					if i != j and self.flow_pairs_to_ignore is not None and (i,j) not in self.flow_pairs_to_ignore:
						if traffic_matrix[i][j] < fraction_threshold * traffic_sum:
							reset_num_entries += 1
			if reset_num_entries > 0:
				scratch_traffic = (spare_traffic * traffic_sum) / (1 - spare_traffic) / float(reset_num_entries)
				for i in range(nblocks):
					for j in range(nblocks):
						if i != j and self.flow_pairs_to_ignore is not None and (i,j) not in self.flow_pairs_to_ignore:
							if traffic_matrix[i][j] < fraction_threshold * traffic_sum:
								traffic_matrix[i][j] = scratch_traffic
			## newly added for robustness
			mu = self._maximize_traffic_scaleup(traffic_matrix, all_paths)
			#print("mu {} is : {}".format(topol_id, mu))
			filtered_logical_topology = self._minimize_multihop(traffic_matrix, mu, all_paths)
			relaxation = 0.99
			while filtered_logical_topology is None:
				filtered_logical_topology = self._minimize_multihop(traffic_matrix, relaxation * mu, all_paths)
				relaxation -= 0.01
			list_of_fractional_topologies[topol_id] = filtered_logical_topology
			#list_of_fractional_topologies[topol_id] = self._minimize_multihop(traffic_matrix, 0.98 * mu, all_paths)
		adj_matrix = self._combine_fractional_topologies(list_of_fractional_topologies)
		adj_matrix = self._reinforce_fractional_topology(adj_matrix)
		return adj_matrix

	# conducts topology engineering given the representative traffic matrices
	# aurora network - Aurora Network, contains all the parameters
	# training_traffic_snapshots - all the snapshots used for training the topology
	# number of k - which is the number of traffic clusters we use to solve engineer topology
	def topology_engineer_given_representative_TMs(self, representative_tms, all_paths):
		# first get number of blocks and number of links and link capacity
		nblocks = self.aurora_network.get_num_blocks()
		num_representative_tms = len(representative_tms)
		adj_matrix = np.zeros((nblocks, nblocks))
		list_of_fractional_topologies = [0] * num_representative_tms
		scale_up = 0
		#for topol_id in range(self.numK):
		for topol_id in range(num_representative_tms):
			traffic_matrix = representative_tms[topol_id]
			mu = self._maximize_traffic_scaleup(traffic_matrix, all_paths)
			filtered_logical_topology = self._minimize_multihop(traffic_matrix, mu, all_paths)
			relaxation = 0.99
			while filtered_logical_topology is None:
				filtered_logical_topology = self._minimize_multihop(traffic_matrix, relaxation * mu, all_paths)
				relaxation -= 0.01
			list_of_fractional_topologies[topol_id] = filtered_logical_topology
		adj_matrix = self._combine_fractional_topologies(list_of_fractional_topologies)
		adj_matrix = self._reinforce_fractional_topology(adj_matrix)
		return adj_matrix