import sys, os, copy, math
import numpy as np
sys.path.append("..")
sys.path.append("../..")
from path_selector import *
from aurora_network import *
import topology_engineering.ideal_topology_engineer as ideal_toe
import traffic_engineering.ideal_traffic_engineer as ideal_te
import facebook_dcn_traffic.utility
import CritMat.critmat as critical_matrix_module
from evaluation.performance_evaluator import *
from multiprocessing import Pool
from proto import *

from topology_engineering import *
from traffic_engineering import *


RUN_NETBENCH_SIMS = False
NETBENCH_DIRECTORY = os.path.expanduser('~') + "/github/mteh/netbench"

def read_in_facebook_traffic(trace_file_name, network_ids_filename, aggregation_window):
	valid_network_ids = facebook_dcn_traffic.utility.read_in_network_ids(network_ids_filename)
	traffic_matrices, timestamps = facebook_dcn_traffic.utility.read_traffic_matrix_protobuf(trace_file_name, considered_network_id=valid_network_ids, return_timestamps=True)
	print("Number of traffic matrices : {}".format(len(traffic_matrices)))
	for tm in traffic_matrices:
		for i in range(len(tm)):
			for j in range(len(tm)):
				tm[i][j] = float(tm[i][j]) * 8 / 1E6 / float(aggregation_window)
	return traffic_matrices, valid_network_ids, timestamps

# Writes the wcmp weights file
def write_wcmp_weight_file(wcmp_path_weights_filename, nblocks, routing_weights, traffic_matrix):
	str_builder = ""
	#format must be : path len, weight, node1, node2, .... node p, when p = path len
	for path in routing_weights.keys():
		src = path[0]
		dst = path[-1]
		if traffic_matrix[src][dst] == 0:
			continue
		weight = routing_weights[path]
		path_len = len(path)
		if weight >= 0.000001:
			line = "{},{}".format(path_len, weight)
			for node in path:
				line += ",{}".format(node)
			str_builder += (line + "\n")
	with open(wcmp_path_weights_filename, "w+") as f:
		f.write(str_builder)
	return

# Writes the traffic communication probability file
def write_traffic_probability_file(traffic_probabilities_filename, nblocks, traffic_matrix):
	normalized_tm = np.zeros((nblocks, nblocks, ))
	traffic_sum = sum([sum(x) for x in traffic_matrix])
	for i in range(nblocks):
		for j in range(nblocks):
			if traffic_matrix[i][j] > 0:
				normalized_tm[i][j] = traffic_matrix[i][j] / traffic_sum
	str_builder = "#tor_pair_id,src,dst,pdf_num_bytes\n"
	pair = 0
	for i in range(nblocks):
		for j in range(nblocks):
			if i != j and traffic_matrix[i][j] > 0:
				str_builder += ("{},{},{},{}\n".format(pair, i, j, "{:.6E}".format(normalized_tm[i][j])))
				pair += 1
	str_builder += "\n"	
	with open(traffic_probabilities_filename, "w+") as f:
		f.write(str_builder)
	return

## Writes the .topology file 
def write_topology_file(topology_filename, adj_matrix):
	nblocks = len(adj_matrix)
	str_builder = "# aurora with number of pods: {}".format(nblocks)
	str_builder += "\n"
	num_switches = nblocks
	str_builder += ("|V|={}".format(num_switches) + "\n")
	# form a full-mesh
	total_links = sum([sum(x) for x in adj_matrix])
	num_edges = nblocks * (nblocks - 1)
	str_builder += ("|E|={}".format(int(total_links)) + "\n")
	str_builder += ("ToRs=incl_range(" + str(0) + "," + str(num_switches - 1) + ")\n")
	str_builder += ("Servers=incl_range(" + str(0) + "," + str(num_switches - 1) + ")\n")
	str_builder += ("Switches=set()\n\n")
	for src in range(nblocks):
		for dst in range(nblocks):
			lc = 0.
			while lc < adj_matrix[src][dst]:
				str_builder += ("{} {}".format(src, dst) + "\n")
				lc += 1
	with open(topology_filename, "w+") as f:
		f.write(str_builder)
	return

# Writes the simulation parameters into the .properties file
def write_netbench_property_file(config_filename, 
								topology_file, 
								traffic_probability_file, 
								wcmp_weight_filename, 
								output_dir, 
								output_base_dir, 
								lambda_flow_arrival_rate, 
								uniform_flow_size_average_bytes=23199798,
								transport_layer="basic",
								network_link_capacity_gbps=40):
	# Topology 
	str_builder = "# Topology\n"
	str_builder += ("scenario_topology_file=" + topology_file + "\n") 
	#str_builder += ("scenario_topology_extend_with_servers=regular\n") 
	#str_builder += ("scenario_topology_extend_servers_per_tl_node=" + str(1) + "\n")
	str_builder += "\n"

	# Run info 
	str_builder += "# Run Info\n"
	# str_builder += "run_time_s=2\n"
	str_builder += "run_time_s=1\n"
	str_builder += "run_folder_name={}\n".format(output_dir)
	str_builder += "run_folder_base_dir={}\n".format(output_base_dir)
	#str_builder += "analysis_command=python analyze.py\n"
	str_builder += "enable_log_flow_throughput=true\n"
	## also enable rtt 
	str_builder += "enable_smooth_rtt=true\n"
	str_builder += "seed=8278897294\n"
	#str_builder += "scenario_topology_extend_with_servers=regular\n"
	#str_builder += "scenario_topology_extend_servers_per_tl_node=1\n"
	str_builder += "\n"

	# Network device
	str_builder += "# Network Device\n"
	str_builder += "transport_layer={}\n".format(transport_layer)
	str_builder += "network_device=wcmp_switch\n"
	str_builder += "network_device_routing=wcmp\n"
	str_builder += "wcmp_path_weights_filename={}\n".format(wcmp_weight_filename)
	str_builder += "network_device_intermediary=identity\n"
	str_builder += "\n"

	#injection_queue_multiplier = max(1, round(float(injection_link_capacity_gbps)/float(network_link_capacity_gbps)))
	#injection_queue_multiplier = int(injection_queue_multiplier)
	# Link & output port
	str_builder += "# Link & output port\n"
	#str_builder += "output_port=ecn_tail_drop\n"
	str_builder += "output_port=ecn_tail_drop_diff_queue_size\n"

	str_builder += "output_port_max_queue_size_bytes=50000\n"
	str_builder += "output_port_ecn_threshold_k_bytes=10000\n"
	#str_builder += "injection_queue_multiplier={}\n".format(injection_queue_multiplier)
	#str_builder += "link=perfect_simple_different_injection_bandwidth\n"
	str_builder += "link=perfect_simple\n"
	str_builder += "link_delay_ns=600\n"
	str_builder += "link_bandwidth_bit_per_ns={}\n".format(network_link_capacity_gbps)
	#str_builder += "injection_link_bandwidth_bit_per_ns={}\n".format(injection_link_capacity_gbps)
	str_builder += "\n"
	lambda_flow_arrival_rate_used = max(lambda_flow_arrival_rate, 1.) 
	#Traffic
	str_builder += "# Traffic\n"
	str_builder += "traffic=poisson_arrival\n"	
	str_builder += "traffic_lambda_flow_starts_per_s={}\n".format(int(round(lambda_flow_arrival_rate_used)))
	#str_builder += "traffic_flow_size_dist=pfabric_data_mining_upper_bound\n" 
	str_builder += "traffic_flow_size_dist=uniform\n" 
	str_builder += "traffic_flow_size_dist_uniform_mean_bytes={}\n".format(int(uniform_flow_size_average_bytes))
	str_builder += ("traffic_probabilities_file=" + traffic_probability_file +"\n\n")
	with open(config_filename, 'w+') as f:
		f.write(str_builder)
	return



def same_traffic_matrix_different_hop_count(aurora_network, 
											all_interblock_paths, 
											traffic_matrix, 
											list_of_average_hop_counts, 
											simulated_load_levels, 
											transport_layer="basic",
											uniform_flow_size_average_bytes=23199798,
											network_link_capacity_gbps=10):
	nblocks = aurora_network.get_num_blocks()
	perfect_toe = direct_path_topology_engineer.DirectPathTopologyEngineer(aurora_network, 1, 1, numK=1)
	#perfect_toe = ideal_toe.IdealTopologyEngineer(aurora_network, all_interblock_paths, [])
	perfect_te = ideal_te.IdealTrafficEngineer(aurora_network, all_interblock_paths)
	#ideal_fractional_topology = perfect_toe._topology_engineer(traffic_snapshots[timestamp_index], all_interblock_paths)
	#fractional_topology = perfect_toe.topology_engineer_given_TM(traffic_matrix, all_interblock_paths)
	fractional_topology, _ = perfect_toe.topology_engineer_given_representative_TMs([traffic_matrix], all_interblock_paths)

	for i in range(nblocks):
		for j in range(nblocks):
			if i != j:
				if fractional_topology[i][j] == 0:
					fractional_topology[i][j] = 1.
				else:
					fractional_topology[i][j] = math.ceil(fractional_topology[i][j])
	print(fractional_topology)
	print(traffic_matrix)
	average_block_egress_num_links = 0
	block_egress_link = [0] * nblocks
	for i in range(nblocks):
		for j in range(nblocks):
			if i != j:
				block_egress_link[i] += fractional_topology[i][j]
	average_block_egress_num_links = sum(block_egress_link)/float(nblocks)
	average_flow_size_in_gbps = float(uniform_flow_size_average_bytes) * 8 / 1E9
	## Figure out the lambda flow start per sec rate for each load level
	total_flow_arrival_rate_list = []
	for load_level in simulated_load_levels:
		per_server_injection_rate = average_block_egress_num_links * network_link_capacity_gbps * load_level / average_flow_size_in_gbps
		total_flow_arrival_rate_list.append(per_server_injection_rate * nblocks)
	### Uniform Topology
	if not os.path.isdir("./diff_ahc_{}".format(transport_layer)):
		os.mkdir("./diff_ahc_{}".format(transport_layer))
	topology_filename = "topol_nblock{}.topology".format(nblocks)
	write_topology_file("diff_ahc_{}/".format(transport_layer) + topology_filename, fractional_topology)
	traffic_filename = "traffic_nblock{}.txt".format(nblocks)
	write_traffic_probability_file("diff_ahc_{}/".format(transport_layer) + traffic_filename, nblocks, traffic_matrix)
	current_working_directory = os.getcwd()
	for ahc in list_of_average_hop_counts:
		ahc_string = "{:.2f}".format(ahc)
		ahc_string = ahc_string.replace('.', 'p')
		if not os.path.isdir("diff_ahc_{}/ahc{}".format(transport_layer, ahc_string)):
			os.mkdir("diff_ahc_{}/ahc{}".format(transport_layer, ahc_string))
		## Write WCMP weight file for the given AHC
		wcmp_filename = "wcmp_weights_nblock{}.txt".format(nblocks)
		routing_weights = perfect_te.compute_path_weights_fix_avehopcount(fractional_topology, traffic_matrix, ahc)
		write_wcmp_weight_file("diff_ahc_{}/ahc{}/".format(transport_layer, ahc_string) + wcmp_filename, nblocks, routing_weights, traffic_matrix)
		for load_level, arrival_rate in zip(simulated_load_levels, total_flow_arrival_rate_list):	
			load_level_str = "{:.2f}".format(load_level)
			load_level_str = load_level_str.replace('.', 'p')
			property_filename = "sim_parameters_load{}.properties".format(load_level_str)
			write_netbench_property_file("diff_ahc_{}/ahc{}/".format(transport_layer, ahc_string) + property_filename, 
										current_working_directory + "/diff_ahc_{}/".format(transport_layer) + topology_filename, 
										current_working_directory + "/diff_ahc_{}/".format(transport_layer) +  traffic_filename, 
										current_working_directory + "/diff_ahc_{}/ahc{}/".format(transport_layer, ahc_string) + wcmp_filename, 
										"load{}".format(load_level_str),
										current_working_directory + "/diff_ahc_{}/ahc{}".format(transport_layer, ahc_string), 
										arrival_rate,
										uniform_flow_size_average_bytes=uniform_flow_size_average_bytes,
										transport_layer=transport_layer,
										network_link_capacity_gbps=network_link_capacity_gbps)
			if RUN_NETBENCH_SIMS:
				os.chdir(NETBENCH_DIRECTORY)
				os.system('java -jar -ea NetBench.jar {}/diff_ahc_{}/ahc{}/sim_parameters_load{}.properties'.format(current_working_directory, transport_layer, ahc_string, load_level_str))
				os.chdir(current_working_directory)
	return

# arrival rate
# total_traffic(gbps) = arrival_rate * ave_flow size

if __name__ == "__main__":
	## decide on the cluster name here
	if len(sys.argv) > 1:
		for argument in sys.argv[1:]:
			if argument[:2] == "-r":
				RUN_NETBENCH_SIMS = True

	transport_layer_protocol_name = "simple_dctcp"
	cluster_name = "B"
	cluster_alias = "database"
	aggregation_window = 1
	# list_of_average_hop_counts = [1.1, 1.2, 1.4, 1.6, 1.8, 1.9]
	list_of_average_hop_counts = [1.9]
	# simulated_load_levels = [0.2, 0.4, 0.6, 0.8,]
	simulated_load_levels = [0.2]
	print(NETBENCH_DIRECTORY)
	## simply change the clusters here
	if cluster_name == "A":
		cluster_alias = "database"
	elif cluster_name == "B":
		cluster_alias = "web"
	elif cluster_name == "C":
		cluster_alias = "hadoop"
	elif cluster_name == "combined":
		cluster_alias = "combined"
	else:
		print("Unrecognized ")
		sys.exit()
	tm_snapshots_protobuf_filename = "/home/gdp/github/mteh/facebook_dcn_traffic/traffic_matrices/cluster{}/{}_aggregationwindow_{}.pb".format(cluster_name, cluster_alias, aggregation_window)
	valid_network_ids_filename = "/home/gdp/github/mteh/facebook_dcn_traffic/traffic_matrices/cluster{}/cluster{}_pods.txt".format(cluster_name, cluster_name)

	print("Reading facebook dcn {} cluster traces...".format(cluster_alias))
	traffic_matrices, valid_network_ids, _ = read_in_facebook_traffic(tm_snapshots_protobuf_filename, valid_network_ids_filename, aggregation_window)
	traffic_matrices = traffic_matrices[1:-1]
	print("Reading facebook cluster... COMPLETE")
	number_of_pods = len(valid_network_ids)
	nblocks = number_of_pods

	traffic_matrix = traffic_matrices[10]

	## Topology link number 
	per_node_pair_num_links = 10
	# link_capacity = 40
	link_capacity = 10

	block_params = {BlockType.SUPERBLOCK : {}, BlockType.BORDER_ROUTER : {}}
	block_params[BlockType.SUPERBLOCK]["link capacity"] = float(link_capacity)
	block_params[BlockType.SUPERBLOCK]["num links"] = float((number_of_pods - 1) * per_node_pair_num_links)
	block_params[BlockType.BORDER_ROUTER]["link capacity"] = float(link_capacity) # in gbps
	block_params[BlockType.BORDER_ROUTER]["num links"] = float((number_of_pods - 1) * per_node_pair_num_links)

	## Declare the aurora network
	block_names_list = ["ju{}".format(x) for x in range(1, number_of_pods + 1, 1)]
	aurora_network = AuroraNetwork("fb_cluster_{}".format(cluster_name), block_params, block_names_list)
	## find the direct and indirect paths
	path_selector = PathSelector(aurora_network, use_multihop=True)
	all_paths = path_selector.get_all_paths()

	same_traffic_matrix_different_hop_count(aurora_network, 
											all_paths, 
											traffic_matrix, 
											list_of_average_hop_counts, 
											simulated_load_levels, 
											transport_layer = transport_layer_protocol_name,
											uniform_flow_size_average_bytes=23199798,
											network_link_capacity_gbps=10)
