import os, sys, wget                                                     
from multiprocessing import Pool                                                
from enum import Enum
import bz2
from bz2 import decompress
import traffic_snapshot_pb2 as ts_pb2
#import utility as util

class Granularity(Enum):
	POD = 0
	IP = 1
	RACK = 2

## global variables
aggregation_period_in_seconds = 500
starting_timestamp = 1475305200
ending_timestamp = 1475391600
granularity = Granularity.POD
include_intercluster_traffic = False

def GetBZFilenameFromURL(url):
	components = url.split('/')
	bzFilename = components[5].split('?')[0]
	if components[5][0] == 't':
		bzFilename = components[6].split('?')[0]
	return bzFilename

def parse_single_trace_file(trace_file_name, start_time_stamp, end_time_stamp, number_of_timeslots, granularity):
	tm_snapshots = []
	for _ in range(number_of_timeslots):
		tm_snapshots.append({})
	start_to_end_timestamp_diff = end_time_stamp - start_time_stamp
	bz2file = bz2.BZ2File(trace_file_name, "r")
	min_timestamp = sys.maxint
	for line in bz2file:
		str_vect = line.split("\t")
		isInterDataCenter = (int(str_vect[14]) > 0)
		isInterCluster = (int(str_vect[13]) > 0)
		if len(str_vect) != 15 or isInterDataCenter or (isInterCluster and not include_intercluster_traffic):
			continue
		timestamp = int(str_vect[0])
		min_timestamp = min(timestamp, min_timestamp)
		packet_size = int(str_vect[1]) # in KB
		if str_vect[7] == "\\N" or str_vect[8] == "\\N" or str_vect[9] == "\\N" or str_vect[10] == "\\N" or str_vect[11] == "\\N" or str_vect[12] == "\\N":
			continue
		src = -1
		dst = -1
		if granularity == Granularity.POD:
			src = int("0x" + str_vect[11], 0)
			dst = int("0x" + str_vect[12], 0)
		elif granularity == Granularity.IP:
			src = int("0x" + str_vect[2], 0)
			dst = int("0x" + str_vect[3], 0)
		elif granularity == Granularity.RACK:
			src = int("0x" + str_vect[9], 0)
			dst = int("0x" + str_vect[10], 0)
		#srcIP = int("0x" + str_vect[2], 0)
		#dstIP = int("0x" + str_vect[3], 0)
		#srcL4Port = int("0x" + str_vect[4], 0)
		#dstL4Port = int("0x" + str_vect[5], 0)
		#ipProtocol = int(str_vect[6])
		#srcHostPrefix = int("0x" + str_vect[7], 0)
		#dstHostPrefix = int("0x" + str_vect[8], 0)
		#srcRack = int("0x" + str_vect[9], 0)
		#dstRack = int("0x" + str_vect[10], 0)
		#srcPod = int("0x" + str_vect[11], 0)
		#dstPod = int("0x" + str_vect[12], 0)
		time_index = int(float(timestamp - start_time_stamp) / start_to_end_timestamp_diff * number_of_timeslots)
		time_index = min(time_index, number_of_timeslots - 1)
		time_index = max(time_index, 0)
		if (src, dst) not in tm_snapshots[time_index]:
			tm_snapshots[time_index][(src, dst)] = packet_size
		else:
			tm_snapshots[time_index][(src, dst)] += packet_size
	return tm_snapshots, min_timestamp

## given a traffic matrix protobuf
def aggregate_trace_from_basic_traces(protobuf_filename, base_protobuf_filename, new_trace_aggregation_window):
	assert(new_trace_aggregation_window >= 1)
	# Read the existing address book.
	base_traffic_snapshots = ts_pb2.TrafficFlowTimeseries()
	try:
		f = open(base_protobuf_filename, "r")
		base_traffic_snapshots.ParseFromString(f.read())
		## now it is time to transfer all the parameters in the protobuf file into the current object
		f.close()
	except IOError:
		print("Could not open file: {}".format(directory + "/" + filename))
		return
	new_traffic_snapshots = ts_pb2.TrafficFlowTimeseries()
	counter = 0
	aggregated_snapshot_traffic_flows = {}
	for i in range(len(base_traffic_snapshots.snapshots)):
		if counter == 0:
			aggregated_snapshot_traffic_flows = {}
			aggregated_snapshot = new_traffic_snapshots.snapshots.add()
			aggregated_snapshot.timestamp = base_traffic_snapshots.snapshots[i].timestamp
			aggregated_snapshot.time_index = len(new_traffic_snapshots.snapshots) - 1
		for flow in base_traffic_snapshots.snapshots[i].flows:
			src = flow.src
			dst = flow.dst
			if (src, dst) in aggregated_snapshot_traffic_flows:
				aggregated_snapshot_traffic_flows[(src, dst)] += flow.size
			else:
				aggregated_snapshot_traffic_flows[(src, dst)] = flow.size
		if counter == new_trace_aggregation_window - 1:
			aggregated_snapshot = new_traffic_snapshots.snapshots[-1]
			for (src_id, dst_id) in aggregated_snapshot_traffic_flows:
				flow = aggregated_snapshot.flows.add()
				flow.src = src_id
				flow.dst = dst_id
				flow.size = aggregated_snapshot_traffic_flows[(src_id, dst_id)]
		counter = (counter + 1) % new_trace_aggregation_window
	## finally, export the file
	with open(protobuf_filename, 'wb') as f:
		f.write(new_traffic_snapshots.SerializeToString())
	return 


## Parses the files and then returns a list of traffic snapshots
def parse_trace_files_collection(trace_file_urls):
	#os.system('python {}'.format(process))
	#print('parent process:', os.getppid())
	#print('process id:', os.getpid())
	#number_of_snapshots = configurations["number of snapshots"]
	#starting_timestamp = configurations["starting timestamp"]
	#ending_timestamp = configurations["ending timestamp"]
	#granularity = configurations["granularity"]
	tm_snapshots_collection = []
	min_timestamp = sys.maxint
	for _ in range(number_of_snapshots):
		tm_snapshots_collection.append({})
	for trace_file_url in trace_file_urls:
		try:
			wget.download(trace_file_url)
			trace_file_bz_name = GetBZFilenameFromURL(trace_file_url)
			tm_snapshots, timestamp_min = parse_single_trace_file(trace_file_bz_name, starting_timestamp, ending_timestamp, number_of_snapshots, granularity)
			min_timestamp = min(timestamp_min, min_timestamp)
			for tm_snapshot, timestamp_index in zip(tm_snapshots, range(number_of_snapshots)):
				for src, dst in tm_snapshot:
					if (src, dst) not in tm_snapshots_collection[timestamp_index]:
						tm_snapshots_collection[timestamp_index][(src, dst)] = tm_snapshot[(src, dst)]
					else:
						tm_snapshots_collection[timestamp_index][(src, dst)] += tm_snapshot[(src, dst)]
			os.remove(trace_file_bz_name)
		except:
			print("Unable to download trace url: {}".format(trace_file_url))
	print("process : {} has min timestamp : {}".format(os.getpid(), min_timestamp))
	return tm_snapshots_collection

def export_to_pb(directory, output_filename, all_tm_snapshots):
	timeseries_snapshots = ts_pb2.TrafficFlowTimeseries()
	for tm_snapshot, time_index in zip(all_tm_snapshots, range(len(all_tm_snapshots))):
		snapshot = timeseries_snapshots.snapshots.add()
		snapshot.time_index = time_index
		snapshot.timestamp = starting_timestamp + (time_index * aggregation_period_in_seconds)
		for (src_id, dst_id) in tm_snapshot.keys():
			flow = snapshot.flows.add()
			flow.src = src_id
			flow.dst = dst_id
			flow.size = tm_snapshot[(src_id, dst_id)]
	with open(directory + "/" + output_filename, 'wb') as f:
		f.write(timeseries_snapshots.SerializeToString())
	return


def do_combined_clique():
	# Read the existing address book.
	combined_traffic_snapshots = ts_pb2.TrafficFlowTimeseries()
	combined_traffic_snapshots_filename = "traffic_matrices/clustercombined/combined_aggregationwindow_1.pb"
	try:
		f = open(combined_traffic_snapshots_filename, "r")
		combined_traffic_snapshots.ParseFromString(f.read())
		## now it is time to transfer all the parameters in the protobuf file into the current object
		f.close()
	except IOError:
		print("Could not open file: {}".format(directory + "/" + filename))
		return
	if not os.path.isdir("traffic_matrices/clustercombinedclique"):
		os.mkdir("traffic_matrices/clustercombinedclique")
	combined_clique_snapshots = ts_pb2.TrafficFlowTimeseries()
	block_id_to_clustername_map = {3267594696L : "A", 3535813097L : "A", 3451287067L : "A", 748632111L : "A", 1196037794L : "B", 3718652260L : "B", 2245899653L : "B", 1859028304L : "B", 765053137L : "B", 2826181397L : "B", 2130580533L : "B", 3628623295L : "B", 59095618L : "C",3579829636L : "C",94494127L : "C",3204300529L : "C",2565564243L : "C",431341974L : "C",940823895L : "C",1570068484L : "C",633437950L : "C"}
	print("starting")
	unseen_set = set()
	for i in range(len(combined_traffic_snapshots.snapshots)):
		print("snapshot {}".format(i))
		combined_snapshot = combined_traffic_snapshots.snapshots[i]
		combined_clique_snapshot = combined_clique_snapshots.snapshots.add()
		combined_clique_snapshot.timestamp = combined_snapshot.timestamp
		combined_clique_snapshot.time_index = combined_snapshot.time_index
		for traffic_flow in combined_snapshot.flows:
			src_id = traffic_flow.src
			dst_id = traffic_flow.dst
			src_cluster_id = -1
			if src_id in block_id_to_clustername_map:
				src_cluster_id = block_id_to_clustername_map[src_id]
			dst_cluster_id = -1
			if dst_id in block_id_to_clustername_map:
				dst_cluster_id = block_id_to_clustername_map[dst_id]
			if src_cluster_id == -1:
				unseen_set.add(src_id)
				continue
			if dst_cluster_id == -1:
				unseen_set.add(dst_id)
				continue
			if src_cluster_id == dst_cluster_id:
				print("booyeah")
				added_flow = combined_clique_snapshot.flows.add()
				added_flow.src = src_id
				added_flow.dst = dst_id
				added_flow.size = traffic_flow.size
	print(unseen_set)
	## finally, export the file
	with open("traffic_matrices/clustercombinedclique/combinedclique_aggregationwindow_1.pb", 'wb') as f:
		f.write(combined_clique_snapshots.SerializeToString())

if __name__ == "__main__":
	number_of_processes = 3

	## arguments - 
	## 1) output directory
	## 2) cluster name
	cluster_name = "combined"
	aggregation_period_in_seconds = 30


	cluster_alias = ""
	if cluster_name == "A":
		cluster_alias = "database"
	elif cluster_name == "B":
		cluster_alias = "web"
	elif cluster_name == "C":
		cluster_alias = "hadoop"
	elif cluster_name == "combined":
		cluster_alias = "combined"
		include_intercluster_traffic = True
	output_directory = "/Users/minyee/src/facebook_dcn_traffic/traffic_matrices/cluster{}".format(cluster_name)
	url_list_filename = "cluster{}_url.txt".format(cluster_name)
	all_file_urls = []
	output_filename = cluster_alias + "_aggregationwindow_{}.pb".format(aggregation_period_in_seconds)
	


	



	## first check if we are trying to compute snapshots that are already computed
	## or that the prerequisite based sampled is met
	if os.path.isfile(output_directory + "/" + output_filename):
		print("The file already exists")
		exit()
	if aggregation_period_in_seconds > 1 and os.path.isfile(output_directory + "/" + cluster_alias + "_aggregationwindow_{}.pb".format(1)):
		aggregate_trace_from_basic_traces(output_directory + "/" + output_filename, output_directory + "/" + cluster_alias + "_aggregationwindow_{}.pb".format(1), aggregation_period_in_seconds)
		exit()



	with open(url_list_filename, 'r') as f:
		for line in f:
			if len(line) > 0:
				all_file_urls.append(line)

	number_of_snapshots = 0
	while starting_timestamp + (number_of_snapshots * aggregation_period_in_seconds) <= ending_timestamp:
		number_of_snapshots += 1

	
	#exit()
	total_number_of_files = len(all_file_urls)
	number_of_processes = min(number_of_processes, total_number_of_files)
	number_of_files_to_parse_per_process = total_number_of_files / number_of_processes
	number_of_files_to_parse_per_process_list = [number_of_files_to_parse_per_process] * number_of_processes
	leftover = total_number_of_files - number_of_files_to_parse_per_process * number_of_processes
	for i in range(leftover):
		number_of_files_to_parse_per_process_list[i] += 1
	assert(sum(number_of_files_to_parse_per_process_list) == total_number_of_files)

	process_arguments = []
	file_offset = 0
	for process_id in range(number_of_processes):
		url_list = all_file_urls[file_offset : file_offset + number_of_files_to_parse_per_process_list[process_id]]
		process_arguments.append(url_list)
		file_offset += number_of_files_to_parse_per_process_list[process_id]

	#for i in range(len(process_arguments)):
	#	print("\nProcess {}".format(i))
	#	print("args : {}".format(process_arguments[i]))
	#xit()

	pool = Pool(processes=number_of_processes)
	print("Done printing results\n\n\n\n\n")
	output_results = pool.map(parse_trace_files_collection, process_arguments)
	pool.close()
	pool.join()
	tm_snapshots_collection = []
	for _ in range(number_of_snapshots):
		tm_snapshots_collection.append({})
	for tm_snapshots in output_results:
		for tm_snapshot, timestamp_index in zip(tm_snapshots, range(number_of_snapshots)):
			for src, dst in tm_snapshot:
				if (src, dst) not in tm_snapshots_collection[timestamp_index]:
					tm_snapshots_collection[timestamp_index][(src, dst)] = tm_snapshot[(src, dst)]
				else:
					tm_snapshots_collection[timestamp_index][(src, dst)] += tm_snapshot[(src, dst)]


	output_filename = cluster_alias + "_aggregationwindow_{}.pb".format(aggregation_period_in_seconds)
	export_to_pb(output_directory, output_filename, tm_snapshots_collection)
	print("Completed exporting")

	traffic_sum = [0] * len(tm_snapshots_collection)
	for traffic_snapshot, tm_index in zip(tm_snapshots_collection, range(len(tm_snapshots_collection))):
		snapshot_sum = 0
		for (src, dst) in traffic_snapshot:
			snapshot_sum += traffic_snapshot[(src, dst)]
		traffic_sum[tm_index] = snapshot_sum
	
	import matplotlib.pyplot as plt
	plt.plot(traffic_sum)
	plt.ylabel("Traffic Sum (Kbytes)")
	plt.xlabel("Time Index")
	plt.show()

#pool.map(run_process, other) 
#ah a a captain black cat ah a a 