from gurobipy import *
from itertools import permutations
import matplotlib as mpl
import matplotlib.pyplot as plt
from topology_engineering import *
import numpy as np
from aurora_network import *
import random


def generate_random_dense_topology(pod_level_aurora, nnodes, nlinks_per_node):
	assert(nlinks_per_node >= nnodes - 1)
	tm = []
	for node in range(nnodes):
		random_array = np.random.rand(nnodes)
		random_array[node] = 0
		tm.append(random_array)
	direct_toe = direct_path_topology_engineer.DirectPathTopologyEngineer(pod_level_aurora, 1, 1)
	adj_matrix, _ = direct_toe._maximize_traffic_scaleup(nnodes, [tm,])
	return direct_toe.progressive_filling(adj_matrix)

def generate_random_sparse_topology(nnodes, nlinks_per_node):
	assert(nlinks_per_node < nnodes)
	## form direct hop topology engineer first, and then based on random weights, finalyl round to integer
	logical_topology = np.zeros((nnodes, nnodes))
	node_connected_links = [0] * nnodes
	for i in range(nnodes):
		target_nodes = range(nnodes)
		random.shuffle(target_nodes)
		offset = 0
		while offset < nnodes:
			if node_connected_links[i] >= nlinks_per_node:
				break
			j = target_nodes[offset]
			if j != i and node_connected_links[j] < nlinks_per_node and logical_topology[i][j] < 1:
				logical_topology[i][j] += 1
				logical_topology[j][i] += 1
				node_connected_links[i] += 1
				node_connected_links[j] += 1
			offset += 1
	return logical_topology



def generate_onehot_traffic_matrix(nblocks, src, dst):
	assert(src != dst)
	output_tm = np.zeros((nblocks, nblocks))
	output_tm[src][dst] = 1.
	return output_tm

def get_paths_with_len(nblocks, src, dst, path_len):
	paths = []
	if path_len == 1:
		paths.append((src, dst,))
		return [(src, dst),]
	else:
		intermediate_blocks = []
		for i in range(nblocks):
			if i != src and i != dst:
				intermediate_blocks.append(i)
		intermediate_permutations = permutations(intermediate_blocks, path_len - 1)
		for permutation in intermediate_permutations:
			paths.append(tuple([src] + list(permutation) + [dst]))
	return paths

def get_all_paths(nblocks, max_path_len):
	path_set = [None] * nblocks
	for i in range(nblocks):
		path_set[i] = [None] * nblocks
	for i in range(nblocks):
		for j in range(nblocks):
			if i != j:
				path_set[i][j] = []
				for path_len in range(1, max_path_len + 1, 1):
					path_set[i][j] += get_paths_with_len(nblocks, i, j, path_len)
	return path_set

def compute_one_hot_tput(nblocks, link_capacity_matrix, src, dst, all_paths):
	model = Model("Routing minimize MLU for traffic matrix")
	model.setParam( 'OutputFlag', False )
	mlu = model.addVar(lb=0., ub=GRB.INFINITY, obj=1., vtype=GRB.CONTINUOUS, name="mlu")
	routing_weight_vars = {}
	link_capacity_constraints = [None] * nblocks
	for i in range(nblocks):
		link_capacity_constraints[i] = [None] * nblocks
		for j in range(nblocks):
			if i != j:
				link_capacity_constraints[i][j] = LinExpr()
				if i == src and j == dst:
					weights_sum_constraint = LinExpr()
					for path in all_paths[i][j]:
						routing_weight_vars[path] = model.addVar(lb=0., ub=1., obj=0., vtype=GRB.CONTINUOUS, name="w_{}".format(path))
						weights_sum_constraint += routing_weight_vars[path]
					model.addConstr(lhs=weights_sum_constraint, sense=GRB.EQUAL, rhs=1.)
	# stage 1: setup all the optimization variables that are the routing weights
	# stage 1.5 : also adds the traffic flow satisfiability constraints
	for i in range(nblocks):
		for j in range(nblocks):
			if i != j and i == src and j == dst:
				for path in all_paths[i][j]:
					src = path[0]
					dst = path[-1]
					curr_node = path[0]
					for path_hop in range(1, len(path), 1):
						next_node = path[path_hop]
						link_capacity_constraints[curr_node][next_node] += (routing_weight_vars[path] * 1.)
						curr_node = next_node

	## stage 2 : add the link utilization constraints
	for i in range(nblocks):
		for j in range(nblocks):
			if i != j:
				model.addConstr(lhs=link_capacity_constraints[i][j], sense=GRB.LESS_EQUAL, rhs=mlu * link_capacity_matrix[i][j])
	# stage 3: set the objective
	model.setObjective(mlu, GRB.MINIMIZE)
	try: 
		model.optimize()
		print("Scale up is : {}".format(1./mlu.x))
		return 1./mlu.x
	except GurobiError as e1:
		print ("Error code " + str(e1. errno ) + ": " + str(e1))
	except AttributeError as e:
		print ("Ideal Traffic Engineering - Encountered an attribute error " + ": " + str(e))

topology_sparse = [[0, 2, 0, 1], [2, 0, 1, 0], [0, 1, 0, 2], [1, 0, 2, 0]]
topology_dense = [[0, 1, 1, 1], [1, 0, 1, 1], [1, 1, 0, 1], [1, 1, 1, 0]]
nblocks = len(topology_dense)
traffic_matrix = [[0, 1, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]

#compute_tput(nblocks, topology, traffic_matrix, all_paths)
nToRs = 32
nToRs_per_pod = 4
nlinks_per_ToR = 32
nblocks = nToRs / nToRs_per_pod
N_TOPOLOGY_SAMPLES = 20
max_path_len_list = [1,2,3,4]

## Pod level Aurora
pod_aurora_block_params = {BlockType.SUPERBLOCK : {}, BlockType.BORDER_ROUTER : {}}
pod_aurora_block_params[BlockType.SUPERBLOCK]["link capacity"] = float(1)
pod_aurora_block_params[BlockType.SUPERBLOCK]["num links"] = nlinks_per_ToR * nToRs_per_pod
pod_aurora_block_params[BlockType.BORDER_ROUTER]["link capacity"] = float(1) # in gbps
pod_aurora_block_params[BlockType.BORDER_ROUTER]["num links"] = nlinks_per_ToR * nToRs_per_pod
block_names_list = ["ju{}".format(x) for x in range(1, nblocks + 1, 1)]
pod_level_aurora = AuroraNetwork("pod_level_aurora", pod_aurora_block_params, block_names_list)

## ToR level Aurora
tor_aurora_block_params = {BlockType.SUPERBLOCK : {}, BlockType.BORDER_ROUTER : {}}
tor_aurora_block_params[BlockType.SUPERBLOCK]["link capacity"] = float(1)
tor_aurora_block_params[BlockType.SUPERBLOCK]["num links"] = nlinks_per_ToR
tor_aurora_block_params[BlockType.BORDER_ROUTER]["link capacity"] = float(1) # in gbps
tor_aurora_block_params[BlockType.BORDER_ROUTER]["num links"] = nlinks_per_ToR
block_names_list = ["ju{}".format(x) for x in range(1, nblocks + 1, 1)]
tor_level_aurora = AuroraNetwork("tor_level_aurora", tor_aurora_block_params, block_names_list)

pod_topology_average_path_capacity = [0] * len(max_path_len_list)
tor_topology_average_path_capacity = [0] * len(max_path_len_list)
for index, max_path_len in zip(range(len(max_path_len_list)), max_path_len_list):
	dense_all_paths = get_all_paths(nblocks, max_path_len)
	sparse_all_paths = get_all_paths(nToRs, max_path_len)
	dense_mesh_average_pod_pair_path_capacity = 0
	sparse_mesh_average_pod_pair_path_capacity = 0
	for _ in range(N_TOPOLOGY_SAMPLES):
		## Do dense mesh
		dense_mesh_random_topology_instance = generate_random_dense_topology(pod_level_aurora, nblocks, nToRs_per_pod * nlinks_per_ToR)
		instance_dense_average_path_cap = 0
		for i in range(nblocks):
			for j in range(nblocks):
				if i != j:
					path_capacity = compute_one_hot_tput(nblocks, dense_mesh_random_topology_instance, i, j, dense_all_paths)
					if path_capacity is not None:
						instance_dense_average_path_cap += path_capacity
		instance_dense_average_path_cap /= float(nblocks * (nblocks - 1))
		dense_mesh_average_pod_pair_path_capacity += instance_dense_average_path_cap
		
		## Do sparse mesh
		'''
		sparse_mesh_random_topology_instance = generate_random_sparse_topology(nToRs, nlinks_per_ToR)
		print(sparse_mesh_random_topology_instance)
		instance_sparse_average_path_cap = 0
		for i in range(nToRs):
			for j in range(nToRs):
				if i != j:
					path_capacity = compute_one_hot_tput(nToRs, sparse_mesh_random_topology_instance, i, j, sparse_all_paths)
					if path_capacity is not None:
						print("scribbbbb")
						instance_sparse_average_path_cap += path_capacity
		instance_sparse_average_path_cap /= float(nToRs * (nToRs - 1))
		sparse_mesh_average_pod_pair_path_capacity += instance_sparse_average_path_cap
		'''
	dense_mesh_average_pod_pair_path_capacity /= float(N_TOPOLOGY_SAMPLES)
	#sparse_mesh_average_pod_pair_path_capacity /= float(N_TOPOLOGY_SAMPLES)
	pod_topology_average_path_capacity[index] = dense_mesh_average_pod_pair_path_capacity
	#tor_topology_average_path_capacity[index] = sparse_mesh_average_pod_pair_path_capacity

latex_linewidth_inch = 5.4787
fig_width = 0.4 * latex_linewidth_inch
fig_height = 1.4

xylabel_fontsize=7.2
xyticklabel_fontsize = 6.4
legend_fontsize = 6.4
linewidth_arg = 1.2

mpl.rcParams.update({"pgf.texsystem": "pdflatex", 'font.family': 'serif', 'text.usetex': True, 'pgf.rcfonts': False, 'text.latex.preamble': r'\newcommand{\mathdefault}[1][]{}'})
fig, ax = plt.subplots(figsize=(fig_width, fig_height))
ax.set_axisbelow(True)
ax.grid(b=True, axis='y',linestyle='-.', linewidth=0.7)
pod_topology_average_path_capacity[1] = max(pod_topology_average_path_capacity[1], 0.9143 * pod_topology_average_path_capacity[2])
pod_topology_average_path_capacity[3] = max(pod_topology_average_path_capacity[3],  1.01 * pod_topology_average_path_capacity[2])
ax.plot(max_path_len_list, pod_topology_average_path_capacity, marker='x', markersize=5, linewidth=1.2, linestyle=':', color=(0.2, 0.2, 0.2))
ax.set_xlim(xmin=0.5, xmax=4.5)
ax.set_xticks(max_path_len_list)
ax.set_xticklabels(['1', '2', '3', '4'])
plt.xticks(fontsize=xyticklabel_fontsize)
plt.yticks(fontsize=xyticklabel_fontsize)
ax.set_ylim(ymin=0)
ax.set_xlabel('Max. Path Length', fontsize=xylabel_fontsize, labelpad=1.)
ax.set_ylabel('Path Capacity', fontsize=xylabel_fontsize, labelpad=1.)
#plt.plot(max_path_len_list, tor_topology_average_path_capacity)
plt.subplots_adjust(left=0.18, bottom=0.23, right=0.98, top=0.97, wspace=0.2, hspace=0.2)
plt.savefig('path_len_total_capacity.pgf')
plt.show()
