from decimal import *
from ast import literal_eval
import random
import json
import math
import numpy

# setup the required precision for the problem

def set_precision (g, p):
	getcontext().prec = str(p)[::-1].find(".")*len(g.es)*10

# compute a demand between every pair of two distinct nodes

def compute_integral_demand (g, d):
	D = dict()
	for s in range(len(g.vs)):
		for t in range(len(g.vs)):
			if s!=t: D[s, t] = d
	return D

# compute a demand for a fat tree

def compute_tacas_tree_demand (g, d):

# compute the cor nodes of the tree

	n = int(math.sqrt(len(g.vs)))
	D = dict()

# create the demands for each cor node of the network

	for i in range(n):
		for j in range(n):
			D [i, n*(i+1)+j] = d
			D [n*(i+1)+j, i] = d
	return D

# use phi to compute all probability weights from 0 to max_k (included) in a fixed set of edges

def compute_probabilities (phi, edges, max_k):
	return [phi(edges, range(p)) for p in range(max_k+1)]


# 生成杨辉三角
def compute_binomial_coefficients (n):
	coefficients = list()
	coefficients.append([1])
	coefficients.append([1, 1])
	last_row = coefficients[-1]
	for i in range(2, n+1):
		row = [last_row[j-1]+last_row[j] for j in range(1, len(last_row))]
		row.insert(0, 1)
		row.append(1)
		coefficients.append(row)
		last_row = row

	return coefficients


# compute the complement weight within n edges with a failure scenario f and at most k link failures

def compute_complement_weight (coefficients, probabilities, n, f, k):
	return sum([coefficients[n][i]*probabilities[len(f)+i] for i in range(1+min(n, k-len(f)))])

# compute the probability of a given failure scenario

def compute_scenario_probability (edges, f, p) :
	return (p**len(f)) * ((1-p)**(len(edges)-len(f)))

# compute the weight of the scenario as the inverse of its number of edges

def compute_scenario_weight (g, f) :
	return 10**(-len(f))

# order edges by depth first search

def order_edges_by_dfs (g, edges, s, t):

	edges = set(edges)
	pending = [s]
	res = []
	passed = set()

# search while some nodes are unexplored

	while pending:
		n = pending[0]
		incident = list(filter(lambda e: e in edges and not e in passed, g.incident(n)))
		if not incident : pending.pop(0)
		else:
			e = incident.pop(0)
			res.append(e)
			passed.add(e)
			if g.es[e].target!=t:pending.insert(0, g.es[e].target)
	return res

# order edges by breadth first search

def order_edges_by_bfs (g, edges, s, t):

	edges = set(edges)
	pending = [s]
	res = []
	passed = set()

# search while some nodes are unexplored

	while pending:
		n = pending[0]
		incident = list(filter(lambda e: e in edges and not e in passed, g.incident(n)))
		if not incident : pending.pop(0)
		else:
			e = incident.pop(0)
			res.append(e)
			passed.add(e)
			if g.es[e].target!=t:pending.append(g.es[e].target)

	return res

# load a list of demands from a given file

def load_demands (filename):

# load the content of the given file

	f = open(filename)
	demands = json.load(f)
	f.close()
	if not isinstance(demands, list): raise TypeError("Demands must be a json list.")
	D = list()

# convert each demand to a frozen set

	for demand in demands:
		if not isinstance(demand, list): raise TypeError("Demand "+str(demand)+" is not a list.")
		d = set()

		for pair in demand:
			if not isinstance(pair, list): raise TypeError(str(pair)+" is not a list")
			elif len(pair)!=2: raise ValueError(str(pair)+" must contain 2 items.")
			else: d.add(tuple(pair))

		d = frozenset(d)
		D.append(d)
	return D

# load a reward function from a json file

def load_rewards (filename):

# open the provided file and load its content

	f = open(filename)
	reward = json.load(f)
	f.close()
	if not isinstance(reward, dict): raise TypeError("The reward function is not a json dict.")
	W = dict()

# convert each pair of the reward function to a python tuple

	for key in reward : W[literal_eval(key)] = reward[key]
	return W

# compute the demand groups for a fat tree of size n

def compute_fat_tree_demand_groups (n, f):

# init the demands and the rewards

	demands = list()
	rewards = dict()
	random.seed(5*n**2)
	demand_number = 1+int(f*2*n**3)

# for each leaf create a group of demands to all cor routers

	for unit in range(2*n):
		for src in range(n):
			cor_demand = set()
			for dst in range(n):
				for cor in range(n*dst, n*dst+n):
					cor_demand.add((3*n**2+unit*n+src, cor))
					rewards[3*n**2+unit*n+src, cor] = 1 + int(random.uniform(1, n+1))

# add a demand from src to dst if they are different

				if src!=dst: 
					demands.append(frozenset({(3*n**2+n*unit+src, 3*n**2+n*unit+dst)}))
					rewards[3*n**2+n*unit+src, 3*n**2+n*unit+dst] = n+1
					if len(demands)==demand_number:return demands, rewards

			cor_demand = frozenset(cor_demand)
			demands.append(cor_demand)
			if len(demands)==demand_number:return demands, rewards
	return demands, rewards

# compute the demand groups for a tacas tree of size n

def compute_tacas_tree_demand_groups (n):

# init the demands and the rewards

	demands = list()
	rewards = dict()
	random.seed(n*n)

# for each leaf create a group of demands to all cor routers

	for unit in range(1, n+1):
		for src in range(n):
			cor_demand = set()
			for dst in range(n):
				cor_demand.add((unit*n+src, dst))
				rewards[unit*n+src, dst] = 1 + int(random.uniform(1, n+1))

# add a demand from src to dst if they are different

				if src!=dst: 
					demands.append(frozenset({(n*unit+src, n*unit+dst)}))
					rewards[n*unit+src, n*unit+dst] = n+1

			cor_demand = frozenset(cor_demand)
			demands.append(cor_demand)
	return demands, rewards

# compute the demand groups for a bcube of size n

def compute_bcube_demand_groups (n):

# init the demands and the rewards

	demands = list()
	rewards = dict()
	random.seed(n*n)

# for each leaf create a group of demands to all cor routers

	for unit in range(2, n+2):
		for src in range(n):
			cor_demand = set()
			for dst in range(n):
				cor_demand.add((unit*n+src, dst))
				rewards[unit*n+src, dst] = 1 + int(random.uniform(1, n+1))

# add a demand from src to dst if they are different

				if src!=dst: 
#					demands.append(frozenset({(n*unit+src, n*unit+dst)}))
					rewards[n*unit+src, n*unit+dst] = n+1

			cor_demand = frozenset(cor_demand)
			demands.append(cor_demand)
	return demands, rewards

# compute the labels of a fat tree topology

def compute_fat_tree_labels (n):
	return ["C" if i<n**2 else ("A" if i<3*n**2 else "E") for i in range(5*n**2)]

# compute the labels of a tacas tree topology

def compute_tacas_tree_labels (n):
	return ["C" if i<n else "L" for i in range(n*(n+1))]

# compute the labels of a bcube topology

def compute_bcube_labels (n):
	return ["C" if i<n else ("c" if i<2*n else "L") for i in range(n*(n+2))]

# compute the labels of a xpander topology

def compute_xpander_labels (d, k):
	return ["C" if i%(k+1)==0 else "L" for i in range((d+1)*(k+1))]

# compute the farthest possible pair demand groups in a given graph

def compute_farthest_pair_demand_groups(g, d, rate, distances):

# compute the list of the n farthest pairs of distinct nodes

	pairs = [(u, v) for u in range(len(g.vs)) for v in set(range(len(g.vs)))-{u}]
	pairs = list(filter(lambda p:distances[p]!=numpy.inf, pairs))
	pairs.sort(key=lambda pair: distances[pair[0], pair[1]])

	n = int(len(pairs)*rate)
	pairs = pairs[len(pairs)-n:]

# init the set of demands and the reward function

	D = list()
	W = dict()
	random.seed(len(g.vs))

	for s, t in pairs:
		D.append(frozenset({(s, t)}))
		W[s, t] = d
	return D, W

# compute single way pointing labels

def compute_single_way_pointing_labels (g, W, distances):

# init the set of candidate vertices

	flowed_vertices = set().union(*W.keys())
	vertices = set(range(len(g.vs))) - flowed_vertices
	way_point = None
	min_distance = numpy.inf

# for each candidate vertex compute its average distance to all flowed vertices

	for v in vertices:
		distance = 0
		for s, t in W:
			half_st = distances[s, t]/2
			distance += abs(distances[s, v]-half_st) + abs(distances[v, t]-half_st)

# if the average distance of the vertex to all pairs is smaller than the current min register it

			if distance<min_distance:
				way_point = v
				min_distance = distance

# finally compute and return the list of labels

	labels = ["W" if v==way_point else "." for v in range(len(g.vs))]
	return labels

# compute single way pointing labels

def compute_double_way_pointing_labels (g, W, distances):

# init the set of candidate vertices

	flowed_vertices = set().union(*W.keys())
	vertices = set(range(len(g.vs))) - flowed_vertices

	way_point1 = None
	way_point2 = None

	min_distance1 = numpy.inf
	min_distance2 = numpy.inf

# for each candidate vertex compute its average distance to all flowed vertices

	for v in vertices:
		distance = 0
		for s, t in W:
			half_st = distances[s, t]/2
			distance += abs(distances[s, v]-half_st) + abs(distances[v, t]-half_st)

# if the average distance of the vertex to all pairs is smaller than the current min register it

			if distance<min_distance1:
				way_point1 = v
				min_distance1 = distance
			elif distance<min_distance2:
				way_point2 = v
				min_distance2 = distance

# finally compute and return the list of labels

	labels = ["U" if v==way_point1 else ("V" if v==way_point2 else ".") for v in range(len(g.vs))]
	return labels

# insert a node in a list sorted by distance to a target t

def insert_on_distance (l, v, t, distances):

# first of all if the list is empty just insert the node and return

	if not l:
		l.append(v)
		return

# otherwise search the place where inserting the new node by dichotomy  

	inf = 0
	sup = len(l)
	ind = sup // 2
	d = distances[v, t]

	while inf<sup and sup>ind and (distances[l[ind-1], t]<d or distances[l[ind], t]>d):
		if distances[l[ind], t]>d: inf = ind
		else: sup = ind
		ind = (inf + sup + 1) // 2

	l.insert(ind, v)