import sys, os, copy, math
from topology_engineer import *
sys.path.append("..")
#import aurora_network
from gurobipy import *
'''		
Design fractional topology based on 
traffic clusters using LP formulation
'''
## Provides a weaker guarantee using Cauchy Swartz inequality. Should perform better than strong Bounded Topology Engineer
## if the traffic matrices are within the bounded hull
class BoundedTopologyEngineerWeak(TopologyEngineer):
	def __init__(self, aurora_network, reconfig_length, training_length, all_paths, all_traffic_snapshots, numK):
		num_snapshots = len(all_traffic_snapshots)
		TopologyEngineer.__init__(self, aurora_network, num_snapshots)
		self.toe_class = TopologyEngineeringClass.ROBUST_TOE
		self.training_length = training_length
		self.reconfig_length = reconfig_length
		self.numK = numK
		self.num_snapshots = len(all_traffic_snapshots)
		self.all_traffic_snapshots = all_traffic_snapshots
		self.all_paths = all_paths
		## NOTE: have to rezero the cached logical topologies here because when we initialize the parent class,
		## 		it will assume that the topology is static (see TopologyEngineer class above).
		self.cached_logical_topologies = {}
		return

		# must be called before running other code 
	def cache_logical_topologies(self):
		self.cached_logical_topologies[(0, self.training_length - 1)] = self._generate_uniform_logical_topology()
		## Start computing the logical topologies
		current_time = self.training_length
		print("Caching logical topologies for robust ToE Implementation V2.......")
		while current_time < self.num_snapshots:
			ending_time = min(current_time + self.reconfig_length - 1, self.num_snapshots - 1)
			training_snapshots = self.all_traffic_snapshots[current_time - self.training_length : current_time]
			adj_matrix = self._topology_engineer(training_snapshots, self.all_paths)
			self.cached_logical_topologies[(current_time, ending_time)] = adj_matrix
			current_time += self.reconfig_length
		print("Caching logical topologies: COMPLETE \n")
		return

	def get_filename_param(self):
		return "bwcmptoeweak_r{}t{}".format(self.reconfig_length, self.training_length)

	# returns a string description of this class
	def get_string(self):
		return "Bounded ToE based ToE : {}".format(self.numK)

	def is_static(self):
		return False

	# conducts topology engineering
	# aurora network - Aurora Network, contains all the parameters
	# training_traffic_snapshots - all the snapshots used for training the topology
	# number of k - which is the number of traffic clusters we use to solve engineer topology
	def _topology_engineer(self, training_traffic_snapshots, all_paths, spare_traffic=0.1):
		# first get number of blocks and number of links and link capacity
		nblocks = self.aurora_network.get_num_blocks()
		adj_matrix = np.zeros((nblocks, nblocks))
		# first, run k means clustering and get the clusters, and normalize to 1
		traffic_points = np.array([self.aurora_network.get_traffic_vector_from_traffic_snapshot(x, normalize_to=1.) for x in training_traffic_snapshots])
		# collect the k traffic matrices
		#traffic_matrices = [self.aurora_network.transform_traffic_point_to_traffic_matrix(x) for x in traffic_points]
		traffic_cluster_centroid_points = self._compute_clusters(traffic_points, run_pca_before_clustering=False)
		list_of_fractional_topologies = [0] * self.numK
		scaled_traffic_matrices = []
		for topol_id in range(self.numK):
			traffic_matrix = self.aurora_network.transform_traffic_point_to_traffic_matrix(traffic_cluster_centroid_points[topol_id])

			## newly added for robustness			
			traffic_sum = sum([sum(x) for x in traffic_matrix])
			reset_num_entries = 0
			for i in range(nblocks):
				for j in range(nblocks):
					if i != j:
						if traffic_matrix[i][j] < 0.0001:
							reset_num_entries += 1
			for i in range(nblocks):
				for j in range(nblocks):
					if i != j:
						if traffic_matrix[i][j] < 0.0001:
							traffic_matrix[i][j] = (spare_traffic * traffic_sum) / (1 - spare_traffic) / float(reset_num_entries)
			## newly added for robustness

			mu = self._compute_maximum_traffic_scaleup(traffic_matrix, all_paths)
			for i in range(nblocks):
				for j in range(nblocks):
					if i != j:
						traffic_matrix[i][j] *= mu
			scaled_traffic_matrices.append(traffic_matrix)
		scale_up = self._maximize_scaleup_factor_for_all_traffic_matrices(scaled_traffic_matrices, all_paths)
		adj_matrix = self._minimize_multihop(scaled_traffic_matrices, [scale_up] * len(scaled_traffic_matrices), all_paths)
		adj_matrix = self._reinforce_fractional_topology(adj_matrix)
		return adj_matrix

	def __normalize_routing_weights(self, nblocks, unnormalized_routing_weights):
		normalized_routing_weights = {}
		weight_sum = np.zeros((nblocks, nblocks,))
		for path in unnormalized_routing_weights:
			src = path[0]
			dst = path[-1]
			weight_sum[src][dst] += unnormalized_routing_weights[path]
			normalized_routing_weights[path] = unnormalized_routing_weights[path]
		for path in unnormalized_routing_weights:
			src = path[0]
			dst = path[-1]
			normalized_routing_weights[path] /= weight_sum[src][dst]
		return normalized_routing_weights

	# conducts topology engineering given the representative traffic matrices
	# aurora network - Aurora Network, contains all the parameters
	# training_traffic_snapshots - all the snapshots used for training the topology
	# number of k - which is the number of traffic clusters we use to solve engineer topology
	def topology_engineer_given_representative_TMs(self, representative_tms, all_paths, minimize_multihop=True, scale_up_tm=False):
		# first get number of blocks and number of links and link capacity
		nblocks = self.aurora_network.get_num_blocks()
		adj_matrix = np.zeros((nblocks, nblocks))
		list_of_fractional_topologies = [0] * self.numK
		scaled_traffic_matrices = []
		assert(self.numK == len(representative_tms))
		link_capacity = self.aurora_network.get_link_capacity(0)
		adj_matrix = self._bounded_wcmp_path_weights(nblocks, link_capacity, representative_tms)
		adj_matrix = self._scale_up_matrix(adj_matrix)
		adj_matrix = self._reinforce_fractional_topology(adj_matrix)
		return adj_matrix

	def _scale_up_matrix(self, adj_matrix):
		nblocks = self.aurora_network.get_num_blocks()
		scaled_up_matrix = np.zeros((nblocks, nblocks))
		scale_up = 1.
		model = Model("Bounded WCMP ToE Scale up process")
		model.setParam( 'OutputFlag', False )
		mu = model.addVar(lb=0., ub=GRB.INFINITY, obj=1, vtype=GRB.CONTINUOUS, name="scale up")
		## Step 1 : add the row sum and col sum constraints
		for pod in range(nblocks):
			pod_num_links = self.aurora_network.get_num_links(pod)
			row_sum_constraint = 0
			col_sum_constraint = 0
			for target_pod in range(nblocks):
				if target_pod != pod:
					row_sum_constraint += adj_matrix[pod][target_pod]
					col_sum_constraint += adj_matrix[target_pod][pod]
			model.addConstr(lhs=row_sum_constraint * mu, sense=GRB.LESS_EQUAL, rhs=pod_num_links)
			model.addConstr(lhs=col_sum_constraint * mu, sense=GRB.LESS_EQUAL, rhs=pod_num_links)
		model.setObjective(mu, GRB.MAXIMIZE)
		try:
			model.optimize()
			scale_up = mu.x
			print("Bounded ToE scale up : {}".format(scale_up))
		except GurobiError as e:
			print ("Bounded ToE WCMP Scale up- Error code " + str(e. errno ) + ": " + str(e))
		except AttributeError :
			print ("Bounded ToE WCMP Scale up- Encountered an attribute error ")
		for i in range(nblocks):
			for j in range(nblocks):
				if i != j:
					scaled_up_matrix[i][j] = adj_matrix[i][j] * scale_up
		print("Printing scaled up matrix of topology: \n{}".format(scaled_up_matrix))
		return scaled_up_matrix

	def _bounded_wcmp_path_weights(self, nblocks, link_capacity, traffic_matrices):
		egress_bound = [0] * nblocks
		ingress_bound = [0] * nblocks
		## Step 1 : compute the pod egress and ingress bounds
		max_tm = np.zeros((nblocks, nblocks,))
		for tm in traffic_matrices:
			for pod in range(nblocks):
				pod_egress = sum(tm[pod])
				pod_ingress = 0
				for target_pod in range(nblocks):
					pod_ingress += tm[target_pod][pod]
					max_tm[pod][target_pod] = max(max_tm[pod][target_pod], tm[pod][target_pod])
				egress_bound[pod] = max(egress_bound[pod], pod_egress)
				ingress_bound[pod] = max(ingress_bound[pod], pod_ingress)
		var_ij_matrix = self.__find_variance(traffic_matrices)
		# Step 2 : Initialize the optimization variables
		model = Model("Bounded WCMP ToE Weak")
		model.setParam( 'OutputFlag', False )
		mlu = model.addVar(lb=0., ub=GRB.INFINITY, obj=0, vtype=GRB.CONTINUOUS, name="mlu")
		routing_weights_var = [None] * (1 + nblocks)
		for index in range(len(routing_weights_var)):
			routing_weights_var[index] = model.addVar(lb=0., ub=1, obj=0, vtype=GRB.CONTINUOUS, name="w_{}".format(index))
		w = model.addVar(lb=0., ub=GRB.INFINITY, obj=0, vtype=GRB.CONTINUOUS, name="w")
		## Step 3 : Start programming the constraints
		## Constraint Type 1 : weights must sum to w
		routing_weight_sum_constr = LinExpr()
		for index in range(len(routing_weights_var)):
			routing_weight_sum_constr += routing_weights_var[index]
		model.addConstr(lhs=routing_weight_sum_constr, sense=GRB.EQUAL, rhs=w)	
		
		## Constraint Type 1.5 : the (w + ...)^+ = max(0, w) type constraints
		z_vars = [None] * nblocks
		for i in range(nblocks):
			z_vars[i] = [None] * nblocks
			for j in range(nblocks):
				if i != j:
					## TODO (jason) : the constraints bound has to be paid attention to, especially the lower bound
					z_vars[i][j] = model.addVar(lb=0., ub=GRB.INFINITY, obj=0, vtype=GRB.CONTINUOUS, name="z_{}".format(i, j))
		
		## Constraint Type 2 : mlu constraint
		for i in range(nblocks):
			for j in range(nblocks):
				if i != j:
					mlu_constraint = LinExpr()
					Zi = egress_bound[i]
					Zj = ingress_bound[j]
					Uij = max_tm[i][j]
					w0 = routing_weights_var[0]
					wi = routing_weights_var[i + 1]
					wj = routing_weights_var[j + 1]
					mlu_constraint = Zi * wj + Zj * wi
					term = Uij
					if var_ij_matrix[i][j] == 0:
						if Uij == 0:
							term = 0
						else:
							term = GRB.INFINITY
					else:
						term /= var_ij_matrix[i][j]
					absl_eqn = w * term - Uij
					neg_absl_eqn = Uij - w * term
					mlu_constraint += -0.5 * (absl_eqn - z_vars[i][j])
					model.addConstr(lhs=mlu_constraint, sense=GRB.LESS_EQUAL, rhs=mlu * link_capacity)
					## z_vars constraints
					model.addConstr(lhs=absl_eqn, sense=GRB.LESS_EQUAL, rhs=z_vars[i][j])
					model.addConstr(lhs=neg_absl_eqn, sense=GRB.LESS_EQUAL, rhs=z_vars[i][j])

		## Step 4 : insert the objective function
		model.setObjective(mlu, GRB.MINIMIZE)
		try:
			model.optimize()
			block_pair_utilization = np.zeros((nblocks, nblocks))
			routing_weights_sum = np.zeros((nblocks, nblocks))
			for i in range(nblocks):
				for j in range(nblocks):
					if i != j:
						Zi = egress_bound[i]
						Zj = ingress_bound[j]
						Uij = max_tm[i][j]
						vij = var_ij_matrix[i][j]
						term = Uij
						if var_ij_matrix[i][j] == 0:
							if Uij == 0:
								term = 0
							else:
								term = GRB.INFINITY
						else:
							term /= var_ij_matrix[i][j]
						direct_utilization = max(Uij - w.x * term, 0) + routing_weights_var[i + 1].x * Zj + routing_weights_var[j + 1].x * Zi
						block_pair_utilization[i][j] = direct_utilization
						#for n in range(nblocks):
						#	weight_sum += routing_weights_var[1 + n].x
						#routing_weights_sum[i][j] = weight_sum
			#for i in range(nblocks):
			#	for j in range(nblocks):
			#		if i != j:
			#			#print("w_{}_{} : {}".format(i, j, routing_weights_var[offset].x))
			#print(block_pair_utilization)
			#print("Routing Weight Sum\n{}".format(routing_weights_sum))
			return block_pair_utilization
		except GurobiError as e:
			print ("Bounded ToE WCMP Weaker - Error code " + str(e. errno ) + ": " + str(e))
		except AttributeError :
			print ("Bounded ToE WCMP Weaker - Encountered an attribute error ")
		return None

	## finds the variance of each traffic matrix's i,j entry along the time domain
	def __find_variance(self, traffic_matrices):
		nblocks = self.aurora_network.get_num_blocks()
		var_ij_matrix = np.zeros((nblocks, nblocks))
		num_snapshots = len(traffic_matrices)
		for i in range(nblocks):
			for j in range(nblocks):
				if i != j:
					timeseries = [0] * num_snapshots		
					for snapshot_index in range(num_snapshots):
						timeseries[snapshot_index] = traffic_matrices[snapshot_index][i][j]
					var_ij = np.var(timeseries)
					var_ij_matrix[i][j] = var_ij

		print(var_ij_matrix)
		return var_ij_matrix

