import sys, os, copy, math
from gurobipy import *
import numpy as np
from sets import Set
sys.path.append("..")
#import aurora_network
from topology_engineering_class import *

# Base class for topology. Essentially generates a uniform topology
class TopologyEngineer(object):
	# takes in a num_snapshots argument so that it will know what is the logical topology 
	# based on the snapshot used
	def __init__(self, aurora_network, num_snapshots):
		self.aurora_network = aurora_network
		self.training_length = 0
		self.reconfig_length = num_snapshots
		self.num_snapshots = num_snapshots
		self.cached_logical_topologies = {} # caching logical topologies based on time intervals
		self.toe_class = TopologyEngineeringClass.STATIC_UNIFORM
		self.cached_logical_topologies[(0, num_snapshots - 1)] = self._generate_uniform_logical_topology()
		return

	def get_filename_param(self):
		return "static"

	def topology_engineering_class(self):
		return self.toe_class

	def is_static(self):
		return True
	
	# returns a string description of this class
	def get_string(self):
		return "Static topology (i.e. no ToE)"

	def get_topology_intervals(self):
		return sorted(self.cached_logical_topologies.keys(), key=lambda start_time: start_time[0])

	def get_training_length(self):
		return self.training_length

	def get_reconfig_length(self):
		return self.reconfig_length

	def _generate_uniform_logical_topology(self):
		nblocks = self.aurora_network.get_num_blocks()
		adj_matrix = np.zeros( (nblocks, nblocks,) )
		for i in range(nblocks):
			src_num_links = self.aurora_network.get_num_links(i)
			for j in range(nblocks):
				if i != j:
					dst_num_links = self.aurora_network.get_num_links(j)
					adj_matrix[i][j] = min(float(src_num_links)/(nblocks - 1), float(dst_num_links)/(nblocks - 1))
					adj_matrix[i][j] -= 1.
		return adj_matrix
		
	# get the logical topology at time index t. Logical topology is represented
	# as an adjacency matrix (i.e. 2D numpy array)
	def get_logical_topology_at_time(self, time_index):
		adj_matrix = None
		assert(time_index < self.num_snapshots)
		for (start_time, end_time) in self.cached_logical_topologies.keys():
			if time_index >= start_time and time_index <= end_time:
				adj_matrix = self.cached_logical_topologies[(start_time, end_time)]
				break
		return adj_matrix

	# returns an array of starting times for each topology
	def get_topology_starting_times(self):
		starting_timeslots = []
		for (start_time, _) in self.cached_logical_topologies.keys():
			starting_timeslots.append(start_time)
		return sorted(starting_timeslots)

	def _reinforce_fractional_topology(self, adj_matrix):
		model = Model("Reinforce FractionalTopology")
		model.setParam( 'OutputFlag', False )
		nblocks = self.aurora_network.get_num_blocks()
		leftover_incoming = [0.] * nblocks
		leftover_outgoing = [0.] * nblocks
		reinforced_links = [None] * nblocks
		objective_function = QuadExpr()
		for i in range(nblocks):
			reinforced_links[i] = [None] * nblocks
			row_sum = 0.
			col_sum = 0.
			for j in range(nblocks):
				if i != j:
					upper_bound = min(self.aurora_network.get_num_links(i), self.aurora_network.get_num_links(j))
					reinforced_links[i][j] = model.addVar(lb=0., ub=upper_bound, obj=0., vtype=GRB.CONTINUOUS, name="rlc" + str(i) + "_" + str(j))
					diff = LinExpr()
					diff = upper_bound - reinforced_links[i][j]
					objective_function.add(diff * diff, mult=1.)
					row_sum += adj_matrix[i][j]
					col_sum += adj_matrix[j][i]
			leftover_outgoing[i] = self.aurora_network.get_num_links(i) - row_sum
			leftover_outgoing[i] = max(0, leftover_outgoing[i])
			
			#assert(leftover_outgoing[i] >= 0)
			leftover_incoming[i] = self.aurora_network.get_num_links(i) - col_sum
			leftover_incoming[i] = max(0, leftover_incoming[i])
			#assert(leftover_incoming[i] >= 0)
		for i in range(nblocks):
			row_constraint = LinExpr()
			col_constraint = LinExpr()
			for j in range(nblocks):
				if i != j :
					row_constraint.add(reinforced_links[i][j], mult=1.)
					col_constraint.add(reinforced_links[j][i], mult=1.)
			model.addConstr(lhs=row_constraint, sense=GRB.LESS_EQUAL, rhs=leftover_outgoing[i])
			model.addConstr(lhs=col_constraint, sense=GRB.LESS_EQUAL, rhs=leftover_incoming[i])
		model.setObjective(objective_function, GRB.MINIMIZE)
		try: 
			model.optimize()
			for i in range(nblocks):
				for j in range(nblocks):
					if (i != j):
						adj_matrix[i][j] += reinforced_links[i][j].x
			return adj_matrix
		except GurobiError as e:
			print ("ReinforceFractionalTopology: Error code " + str(e. errno ) + ": " + str(e))
		except AttributeError :
			print ("ReinforceFractionalTopology: Encountered an attribute error ")
		return None

	def __approx_equal(self, val1, val2):
		equal = False
		if abs(val1 - val2) <= 1E-5:
			equal = True
		return equal

	def __scale_up_matrix(self, matrix, valid_pairs, outgoing_links, outgoing_links_available, incoming_links, incoming_links_available):
		if len(valid_pairs) == 0:
			return matrix
		nblocks = self.aurora_network.get_num_blocks()

		block_outgoing_constraints = {}
		block_incoming_constraints = {}
		for pod in range(nblocks):
			block_outgoing_constraints[pod] = LinExpr()
			block_incoming_constraints[pod] = LinExpr()

		model = Model("Progressive Filling")
		model.setParam( 'OutputFlag', False )
		alpha = model.addVar(lb=0, ub=GRB.INFINITY, obj=1, vtype=GRB.CONTINUOUS, name="scale_up")
		link_count_pairs_vars = {}
		for i in range(nblocks):
			for j in range(nblocks):
				if i != j:
					if outgoing_links_available[i] and incoming_links_available[j]:
						link_count_pairs_vars[(i,j)] = model.addVar(lb=0, ub=GRB.INFINITY, obj=1, vtype=GRB.CONTINUOUS, name="scale_up")
						block_outgoing_constraints[i] += (matrix[i][j] * alpha)
						block_incoming_constraints[j] += (matrix[i][j] * alpha)
					else:
						block_outgoing_constraints[i] += (matrix[i][j])
						block_incoming_constraints[j] += (matrix[i][j])
		#block_outgoing_constraints[i] += link_count_pairs_vars[(i,j)]
		#block_incoming_constraints[j] += link_count_pairs_vars[(i,j)]

		## Add in the link count sum constraints so that the scaled up matrix still preserves the total egress and 
		## ingress link counts
		for pod in range(nblocks):
			num_links = self.aurora_network.get_num_links(pod)
			if outgoing_links_available[pod]:
				#model.addConstr(lhs=block_outgoing_constraints[pod], sense=GRB.LESS_EQUAL, rhs=num_links - outgoing_links[pod])
				model.addConstr(lhs=block_outgoing_constraints[pod], sense=GRB.LESS_EQUAL, rhs=num_links)
			if incoming_links_available[pod]:
				#model.addConstr(lhs=block_incoming_constraints[pod], sense=GRB.LESS_EQUAL, rhs=num_links - incoming_links[pod])
				model.addConstr(lhs=block_incoming_constraints[pod], sense=GRB.LESS_EQUAL, rhs=num_links)

		model.setObjective(alpha, GRB.MAXIMIZE)
		try: 
			model.optimize()
			adj_matrix = copy.deepcopy(matrix)
			multiplier = alpha.x
			assert(multiplier >= 1)
			for i, j in valid_pairs:
				adj_matrix[i][j] = adj_matrix[i][j] * multiplier
			return adj_matrix
		except GurobiError as e:
			print ("ScalingUpMatrix: Error code " + str(e. errno ) + ": " + str(e))
		except AttributeError :
			print ("ScalingUpMatrix: Encountered an attribute error ")
		return None

	def progressive_filling(self, current_adj_matrix):
		nblocks = self.aurora_network.get_num_blocks()
		## Initialize the adj_matrix to be the same as current_adj_matrix
		adj_matrix = copy.deepcopy(current_adj_matrix)
		outgoing_links_num = [0] * nblocks
		incoming_links_num = [0] * nblocks
		for pod in range(nblocks):
			for target_pod in range(nblocks):
				if target_pod != pod:
					outgoing_links_num[pod] += current_adj_matrix[pod][target_pod]
					incoming_links_num[pod] += current_adj_matrix[target_pod][pod]

		while True:
			outgoing_links = [0] * nblocks
			incoming_links = [0] * nblocks
			outgoing_links_available = [True] * nblocks
			incoming_links_available = [True] * nblocks
			for pod in range(nblocks):
				for target_pod in range(nblocks):
					if target_pod != pod:
						outgoing_links[pod] += adj_matrix[pod][target_pod]
						incoming_links[pod] += adj_matrix[target_pod][pod]
				if self.__approx_equal(outgoing_links[pod], self.aurora_network.get_num_links(pod)):
					outgoing_links_available[pod] = False
				if self.__approx_equal(incoming_links[pod], self.aurora_network.get_num_links(pod)):
					incoming_links_available[pod] = False
			valid_pairs = Set()
			for i in range(nblocks):
				for j in range(nblocks):
					if i != j and outgoing_links_available[i] and incoming_links_available[j]:
						valid_pairs.add((i,j))
			if len(valid_pairs) == 0:
				break
			adj_matrix_tmp = self.__scale_up_matrix(adj_matrix, valid_pairs, outgoing_links, outgoing_links_available, incoming_links, incoming_links_available)
			## Check for failures here
			if adj_matrix_tmp is not None:
				adj_matrix = adj_matrix_tmp
			else:
				break
		for i in range(nblocks):
			for j in range(nblocks):
				if i != j:
					assert(adj_matrix[i][j] >= current_adj_matrix[i][j])
		return adj_matrix

	# conducts topology engineering given the representative traffic matrices
	# aurora network - Aurora Network, contains all the parameters
	# training_traffic_snapshots - all the snapshots used for training the topology
	# number of k - which is the number of traffic clusters we use to solve engineer topology
	def topology_engineer_given_representative_TMs(self, representative_tms, all_paths):
		# first get number of blocks and number of links and link capacity
		return self._generate_uniform_logical_topology(), None
