import sys, os, copy, math
from topology_engineer import *
sys.path.append("..")
#import aurora_network
from gurobipy import *

'''		
Design fractional topology based on 
traffic clusters using LP formulation

Solves ONE topology for all K traffic matrices without scaling them up first
Also this version solves for the MLU directly, rather than uses the traffic scale up factor
to solve for the ideal topology. Therefore, we can immediately solve for the routing weights 
as well
NOTE: This is the best version for tail MLU so far, when used in conjuction with CritMat to find
		the cluster HEADS that bounds all other TMs in said cluster.

The TE that should pair best with this ToE algorithm is the robust_multi_cluster_traffic_engineer.py
class, since it has the exact same implementation except the TE already knows what the logical topology is
'''
#combines multiple fractional topology into one
class RobustMultiTrafficTopologyEngineerImplementationV3(TopologyEngineer):
	def __init__(self, aurora_network, reconfig_length, training_length, all_paths, all_traffic_snapshots, numK, minimize_multihop=True, return_predicted_mlu=False):
		num_snapshots = len(all_traffic_snapshots)
		TopologyEngineer.__init__(self, aurora_network, num_snapshots)
		self.toe_class = TopologyEngineeringClass.ROBUST_TOE
		self.training_length = training_length
		self.reconfig_length = reconfig_length
		self.numK = numK
		self.num_snapshots = len(all_traffic_snapshots)
		self.all_traffic_snapshots = all_traffic_snapshots
		self.all_paths = all_paths
		self.minimize_multihop = minimize_multihop
		self.return_predicted_mlu = return_predicted_mlu
		## NOTE: have to rezero the cached logical topologies here because when we initialize the parent class,
		## 		it will assume that the topology is static (see TopologyEngineer class above).
		self.cached_logical_topologies = {}
		return

		# must be called before running other code 
	def cache_logical_topologies(self):
		self.cached_logical_topologies[(0, self.training_length - 1)] = self._generate_uniform_logical_topology()
		## Start computing the logical topologies
		current_time = self.training_length
		print("Caching logical topologies for robust ToE Implementation V3.....")
		while current_time < self.num_snapshots:
			ending_time = min(current_time + self.reconfig_length - 1, self.num_snapshots - 1)
			training_snapshots = self.all_traffic_snapshots[current_time - self.training_length : current_time]
			adj_matrix = self._topology_engineer(training_snapshots, self.all_paths)
			self.cached_logical_topologies[(current_time, ending_time)] = adj_matrix
			current_time += self.reconfig_length
		print("Caching logical topologies: COMPLETE \n")
		return

	def get_filename_param(self):
		return "rmtoev3_r{}t{}k{}".format(self.reconfig_length, self.training_length, self.numK)

	# returns a string description of this class
	def get_string(self):
		return "Robust multi traffic ToE (V3) with K clusters : {}".format(self.numK)

	def is_static(self):
		return False

	# given a scale up factor, figure out the routing that minimizes two-hop paths if they exists
	## Todo(jason) : fix this and debug
	def _minimize_multihop(self, traffic_matrices, mlu, all_paths):
		## using QP to reduce multihop reliance
		nblocks = self.aurora_network.get_num_blocks()
		num_tm = len(traffic_matrices)
		model = Model("minimize multihop")
		model.setParam( 'OutputFlag', False )
		interpod_link_counts = [None] * nblocks
		routing_weights_var = {}
		link_capacity_constraints = [None] * num_tm
		for tm_index in range(num_tm):
			link_capacity_constraints[tm_index] = [None] * nblocks
			for i in range(nblocks):
				link_capacity_constraints[tm_index][i] = [None] * nblocks
				for j in range(nblocks):
					if i != j:
						link_capacity_constraints[tm_index][i][j] = LinExpr()

		## setup the link constraints and all optization variables, 
		## which are the link counts and routing variables
		objective_function = QuadExpr()
		for i in range(nblocks):
			interpod_link_counts[i] = [None] * nblocks
			for j in range(nblocks):
				if (i != j):
					upper_bound = min(self.aurora_network.get_num_links(i), self.aurora_network.get_num_links(j))
					interpod_link_counts[i][j] = model.addVar(lb=0, ub=upper_bound, obj=0, vtype=GRB.CONTINUOUS, name="lc" + str(i) + ":" + str(j))
					routing_weight_sum = LinExpr()
					for path in all_paths[i][j]:
						var = model.addVar(lb=0, ub=1, obj=0., vtype=GRB.CONTINUOUS, name="w_{}".format(path))
						routing_weights_var[path] = var
						routing_weight_sum += var
						# if multihop path, then add to objective function
						if len(path) > 2: 
							objective_function += (var * var)
					model.addConstr(lhs=routing_weight_sum, sense=GRB.EQUAL, rhs=1)

		## add radix degree constraints
		for pod in range(nblocks):
			row_constraint = LinExpr()
			col_constraint = LinExpr()
			nlinks = self.aurora_network.get_num_links(pod)
			for target_pod in range(nblocks):
				if target_pod != pod:
					row_constraint.add(interpod_link_counts[pod][target_pod], mult=1.)
					col_constraint.add(interpod_link_counts[target_pod][pod], mult=1.)
			model.addConstr(lhs=row_constraint, sense=GRB.LESS_EQUAL, rhs=nlinks)
			model.addConstr(lhs=col_constraint, sense=GRB.LESS_EQUAL, rhs=nlinks)
		
		## add link capacity limit for all paths constraints
		for tm_index in range(num_tm):
			for i in range(nblocks):
				for j in range(nblocks):
					if (i != j):
						for path in all_paths[i][j]:
							path_len = len(path)
							curr_node = path[0]
							for next_node_index in range(1, path_len, 1):
								next_node = path[next_node_index]
								link_capacity_constraints[tm_index][curr_node][next_node] += (routing_weights_var[path] * traffic_matrices[tm_index][i][j])
								curr_node = next_node
		for tm_index in range(num_tm):
			for i in range(nblocks):
				for j in range(nblocks):
					if (i != j):
						capacity = min(self.aurora_network.get_link_capacity(i), self.aurora_network.get_link_capacity(j))
						model.addConstr(lhs=link_capacity_constraints[tm_index][i][j], sense=GRB.LESS_EQUAL, rhs=mlu * interpod_link_counts[i][j] * capacity)

		# set up the objective function
		model.setObjective(objective_function, GRB.MINIMIZE)
		# start optimizing
		adj_matrix = np.zeros( (nblocks, nblocks,) )
		routing_weights = {}
		try: 
			model.optimize()
			for i in range(nblocks):
				for j in range(nblocks):
					if (i != j):
						adj_matrix[i][j] = interpod_link_counts[i][j].x
			for i in range(nblocks):
				for j in range(nblocks):
					if i != j:
						for path in all_paths[i][j]:
							routing_weights[path] = routing_weights_var[path].x
			#print adj_matrix
		except GurobiError as e:
			print ("MinimizeMultihop: Error code " + str(e. errno ) + ": " + str(e))
		except AttributeError :
			print ("MinimizeMultihop: Encountered an attribute error ")
		return adj_matrix, routing_weights

	# given an MLU, find the topology/routing weights such that the average hop count in the worst case
	# is minimized. This is an LP formulation that directly minimizes hop count, rather than previously done
	# which is implicit through the use of QP to minimize non-minimal path weights
	def _minimize_multihop_direct(self, traffic_matrices, mlu, all_paths):
		## using QP to reduce multihop reliance
		nblocks = self.aurora_network.get_num_blocks()
		num_tm = len(traffic_matrices)
		model = Model("minimize multihop directly")
		model.setParam( 'OutputFlag', False )
		interpod_link_counts = [None] * nblocks
		routing_weights_var = {}
		max_ahc = model.addVar(lb=1, ub=2, obj=0, vtype=GRB.CONTINUOUS, name="max_ahc")
		link_capacity_constraints = [None] * num_tm
		for tm_index in range(num_tm):
			link_capacity_constraints[tm_index] = [None] * nblocks
			for i in range(nblocks):
				link_capacity_constraints[tm_index][i] = [None] * nblocks
				for j in range(nblocks):
					if i != j:
						link_capacity_constraints[tm_index][i][j] = LinExpr()

		## setup the link constraints and all optization variables, 
		## which are the link counts and routing variables
		for i in range(nblocks):
			interpod_link_counts[i] = [None] * nblocks
			for j in range(nblocks):
				if (i != j):
					upper_bound = min(self.aurora_network.get_num_links(i), self.aurora_network.get_num_links(j))
					interpod_link_counts[i][j] = model.addVar(lb=0, ub=upper_bound, obj=0, vtype=GRB.CONTINUOUS, name="lc" + str(i) + ":" + str(j))
					routing_weight_sum = LinExpr()
					for path in all_paths[i][j]:
						routing_weights_var[path] = model.addVar(lb=0, ub=GRB.INFINITY, obj=0., vtype=GRB.CONTINUOUS, name="w_{}".format(path))
						routing_weight_sum += routing_weights_var[path]
					model.addConstr(lhs=routing_weight_sum, sense=GRB.EQUAL, rhs=1)

		## add radix degree constraints
		for pod in range(nblocks):
			row_constraint = LinExpr()
			col_constraint = LinExpr()
			nlinks = self.aurora_network.get_num_links(pod)
			for target_pod in range(nblocks):
				if target_pod != pod:
					row_constraint.add(interpod_link_counts[pod][target_pod], mult=1.)
					col_constraint.add(interpod_link_counts[target_pod][pod], mult=1.)
			model.addConstr(lhs=row_constraint, sense=GRB.LESS_EQUAL, rhs=nlinks)
			model.addConstr(lhs=col_constraint, sense=GRB.LESS_EQUAL, rhs=nlinks)
		
		## add link capacity limit for all paths constraints
		for tm_index in range(num_tm):
			for i in range(nblocks):
				for j in range(nblocks):
					if (i != j):
						for path in all_paths[i][j]:
							path_len = len(path)
							curr_node = path[0]
							for next_node_index in range(1, path_len, 1):
								next_node = path[next_node_index]
								link_capacity_constraints[tm_index][curr_node][next_node] += (routing_weights_var[path] * traffic_matrices[tm_index][i][j])
								curr_node = next_node
		for tm_index in range(num_tm):
			for i in range(nblocks):
				for j in range(nblocks):
					if (i != j):
						capacity = min(self.aurora_network.get_link_capacity(i), self.aurora_network.get_link_capacity(j))
						model.addConstr(lhs=link_capacity_constraints[tm_index][i][j], sense=GRB.LESS_EQUAL, rhs=mlu * interpod_link_counts[i][j] * capacity)

		## maximum average hop count constraint
		for tm_index in range(num_tm):
			hop_count = LinExpr()
			traffic_matrix_sum = sum([sum(x) for x in traffic_matrices[tm_index]])
			for i in range(nblocks):
				for j in range(nblocks):
					if i != j:
						traffic_matrix_sum += traffic_matrices[tm_index][i][j]
						for path in all_paths[i][j]:
							if len(path) > 2:
								hop_count += ((2. * float(traffic_matrices[tm_index][i][j]) / traffic_matrix_sum) * routing_weights_var[path])
							else:
								hop_count += ((float(traffic_matrices[tm_index][i][j]) / traffic_matrix_sum) * routing_weights_var[path])
			model.addConstr(lhs=hop_count, sense=GRB.LESS_EQUAL, rhs=max_ahc)

		# set up the objective function
		model.setObjective(max_ahc, GRB.MINIMIZE)
		# start optimizing
		adj_matrix = np.zeros( (nblocks, nblocks,) )
		routing_weights = {}
		try: 
			model.optimize()
			print("worst case Average Hop Count : {}".format(max_ahc.x))
			for i in range(nblocks):
				for j in range(nblocks):
					if (i != j):
						adj_matrix[i][j] = interpod_link_counts[i][j].x
			for i in range(nblocks):
				for j in range(nblocks):
					if i != j:
						for path in all_paths[i][j]:
							routing_weights[path] = routing_weights_var[path].x
			#print adj_matrix
		except GurobiError as e:
			print ("MinimizeMultihop: Error code " + str(e. errno ) + ": " + str(e))
		except AttributeError :
			print ("MinimizeMultihop: Encountered an attribute error ")
		return adj_matrix, routing_weights

	## could maybe try to have multiple sets of routing weights, each corresponding to a specific TM
	def _maximize_scaleup_for_all_TMs_transformed(self, traffic_matrices, all_inter_block_paths):
		nblocks = self.aurora_network.get_num_blocks()
		num_tm = len(traffic_matrices)
		model = Model("maximize traffic scale up")
		model.setParam( 'OutputFlag', False )
		beta = model.addVar(lb=0, ub=GRB.INFINITY, obj=1., vtype=GRB.CONTINUOUS, name="beta")
		fractional_topology_var = [None] * nblocks
		routing_weights_var_hat = {}
		link_capacity_constraints = [None] * num_tm
		for m in range(num_tm):
			link_capacity_constraints[m] = [None] * nblocks
			for i in range(nblocks):
				link_capacity_constraints[m][i] = [None] * nblocks
				for j in range(nblocks):
					if i != j:
						link_capacity_constraints[m][i][j] = LinExpr()
		## setup the link constraints and all optization variables, 
		## which are the link counts and routing variables
		for i in range(nblocks):
			fractional_topology_var[i] = [None] * nblocks
			for j in range(nblocks):
				if i != j:
					upper_bound = min(self.aurora_network.get_num_links(i), self.aurora_network.get_num_links(j))
					fractional_topology_var[i][j] = model.addVar(lb=0, ub=upper_bound, obj=0, vtype=GRB.CONTINUOUS, name="lc" + str(i) + ":" + str(j))
					routing_weight_sum = LinExpr()
					for path in all_inter_block_paths[i][j]:
						var = model.addVar(lb=0, ub=GRB.INFINITY, obj=0, vtype=GRB.CONTINUOUS, name="w_{}".format(path))
						routing_weights_var_hat[path] = var
						routing_weight_sum += var
					model.addConstr(lhs=routing_weight_sum, sense=GRB.EQUAL, rhs=beta)

		## add radix degree constraints
		for pod in range(nblocks):
			row_constraint = LinExpr()
			col_constraint = LinExpr()
			for target_pod in range(nblocks):
				if target_pod != pod:
					row_constraint.add(fractional_topology_var[pod][target_pod], mult=1.)
					col_constraint.add(fractional_topology_var[target_pod][pod], mult=1.)
			model.addConstr(lhs=row_constraint, sense=GRB.LESS_EQUAL, rhs=float(self.aurora_network.get_num_links(pod)))
			model.addConstr(lhs=col_constraint, sense=GRB.LESS_EQUAL, rhs=float(self.aurora_network.get_num_links(pod)))

		## add achievability of flows for all i j pairs
		for tm_index in range(num_tm):
			for i in range(nblocks):
				for j in range(nblocks):
					if i != j:
						for path in all_inter_block_paths[i][j]:
							path_len = len(path)
							curr_node = path[0]
							for next_node_index in range(1, path_len, 1):
								next_node = path[next_node_index]
								link_capacity_constraints[tm_index][curr_node][next_node] += (routing_weights_var_hat[path] * traffic_matrices[tm_index][i][j])
								curr_node = next_node
		
		## add link capacity limit for all paths constraints
		for tm_index in range(num_tm):
			for i in range(nblocks):
				for j in range(nblocks):
					if (i != j):
						capacity = min(self.aurora_network.get_link_capacity(i), self.aurora_network.get_link_capacity(j))
						model.addConstr(lhs=link_capacity_constraints[tm_index][i][j], sense=GRB.LESS_EQUAL, rhs=fractional_topology_var[i][j] * capacity)

		# set up the objective function
		model.setObjective(beta, GRB.MAXIMIZE)
		# start optimizing
		try: 
			model.optimize()
			alpha = 1. / beta.x
			print("Worst case scale up : {}".format(alpha))
			
			routing_weights = {}
			for i in range(nblocks):
				for j in range(nblocks):
					if i != j:
						for path in all_inter_block_paths[i][j]:
							routing_weights[path] = routing_weights_var_hat[path].x * alpha
			adj_matrix = np.zeros((nblocks, nblocks))
			for i in range(nblocks):
				for j in range(nblocks):
					if i != j:
						adj_matrix[i][j] = fractional_topology_var[i][j].x
			return beta.x, adj_matrix, routing_weights # returns the max scale up factor
		except GurobiError as e:
			print ("MaximizeTrafficScaleUp: Error code " + str(e. errno ) + ": " + str(e))
			return None
		except AttributeError :
			print ("MaximizeTrafficScaleUp: Encountered an attribute error ")
			return None

	# conducts topology engineering given the representative traffic matrices
	# aurora network - Aurora Network, contains all the parameters
	# training_traffic_snapshots - all the snapshots used for training the topology
	# number of k - which is the number of traffic clusters we use to solve engineer topology
	def topology_engineer_given_representative_TMs(self, representative_tms, all_paths):
		# first get number of blocks and number of links and link capacity
		nblocks = self.aurora_network.get_num_blocks()
		adj_matrix = np.zeros((nblocks, nblocks))
		#assert(self.numK == len(representative_tms))
		beta_value, adj_matrix_backup, routing_weights = self._maximize_scaleup_for_all_TMs_transformed(representative_tms, all_paths)
		print("beta : {}, mlu : {}".format(beta_value, 1./beta_value))
		if self.minimize_multihop:
			adj_matrix, routing_weights = self._minimize_multihop(representative_tms, 1./beta_value, all_paths)
			#adj_matrix_backup, routing_weights = self._minimize_multihop_direct(representative_tms, 1./beta_value, all_paths)
		#adj_matrix = self._reinforce_fractional_topology(adj_matrix_backup)
		adj_matrix = self.progressive_filling(adj_matrix)
		if adj_matrix is None:
			adj_matrix = adj_matrix_backup
		if self.return_predicted_mlu:
			return adj_matrix, routing_weights, 1./beta_value
		return adj_matrix, routing_weights