## contains the various python functions used for parsing the traces of facebook's data centers
import os, sys
import math
from enum import Enum
#import urllib
#import urllib2
#import requests
import wget
import bz2, shutil
import traffic_snapshot_pb2 as ts_pb2
from bz2 import decompress
'''
Note: Cluster-A is for Database, Cluster-B is for Web servers, and Cluster-C is used as Hadoop servers.

Trace file is tsv (tab-seperated vector) with each line having the following information
	timestamp (unix timestamp in seconds)
	packet length (1)
	anonymized(2) src/dst IP
	anonymized src/dst L4 Port
	IP protocol
	anonymized src/dst hostprefix (3)
	anonymized src/dst Rack
	anonymized src/dst Pod
	intercluster
	interdatacenter
'''

##########################################################################################################################################
##########################################################################################################################################
##########################################################################################################################################
# 
# Utility Functions (begin)
#
##########################################################################################################################################
##########################################################################################################################################
##########################################################################################################################################

# The types of a packet can be, whether if it's between datacenters, 
# between pods within the same datacenter, and finally within a pod
class PacketType(Enum):
	INTERDCN = 0
	INTERPOD = 1
	INTRAPOD = 2

'''
granularity of the traffic matrix
'''
class PacketGranularity(Enum):
	RACK2RACK = 0
	IP2IP = 1
	POD2POD = 2

'''
Given two lists that are sorted based some some tuple entry, merges the lists together 
such that the final list is also sorted
'''
def SortTwoSortedLists(list1, list2, sortingKey):
	l1 = len(list1)
	l2 = len(list2)
	if l1 == 0:
		return list2
	elif l2 == 0:
		return list1
	finalList = [0] * (l1 + l2)
	ind1 = 0
	ind2 = 0
	for i in range(l1 + l2):
		if ind1 >= l1:
			finalList[i] = list2[ind2]
			ind2 += 1
		elif ind2 >= l2:
			finalList[i] = list1[ind1]
			ind1 += 1
		elif list1[ind1][sortingKey] >= list2[ind2][sortingKey]:
			finalList[i] = list2[ind2]
			ind2 += 1
		else:
			finalList[i] = list1[ind1]
			ind1 += 1
	assert(ind1 + ind2 == l1 + l2)
	return finalList

def GetBZFilenameFromURL(url):
	components = url.split('/')
	bzFilename = components[5].split('?')[0]
	if components[5][0] == 't':
		bzFilename = components[6].split('?')[0]
	return bzFilename

##########################################################################################################################################
##########################################################################################################################################
##########################################################################################################################################
# 
# Utility Functions (end)
#
##########################################################################################################################################
##########################################################################################################################################
##########################################################################################################################################


def ReadCombinedTxtFile(filename):
	minTS = sys.maxint
	maxTS = 0
	packetsVector = []
	#counter = 0
	with open(filename) as f:
		print f
		separator = " "
		for line in f:
			#if counter >= 100000:
			#	break
			strVect = line.split(separator)
			timestamp = int(strVect[0])
			packetLength = int(strVect[1])
			srcIP = int(strVect[2])
			dstIP = int(strVect[3])
			srcRack = int(strVect[4])
			dstRack = int(strVect[5])
			srcPod = int(strVect[6])
			dstPod = int(strVect[7])
			minTS = min(minTS, timestamp)
			maxTS = max(maxTS, timestamp)
			packetsVector.append((timestamp, packetLength, srcIP, dstIP, srcRack, dstRack, srcPod, dstPod, minTS, maxTS,))
			#counter += 1
	return packetsVector, minTS, maxTS
			#packet[1], packet[2], packet[3], packet[4], packet[5], packet[6], packet[7]
'''
Parses a trace file and returns a list of tuples containing 
'''
def ReadTraceFile(filename):
	packetSeries = []
	if filename[-4:] == ".bz2":
		bz2file = bz2.BZ2File(filename, "r")
		for line in bz2file:
			strVect = line.split('	')
			timestamp = int(strVect[0])
			packetLength = int(strVect[1]) # in KB
			srcIP = int("0x" + strVect[2], 0)
			dstIP = int("0x" + strVect[3], 0)
			srcL4Port = int("0x" + strVect[4], 0)
			dstL4Port = int("0x" + strVect[5], 0)
			ipProtocol = int(strVect[6])
			if strVect[7] == "\\N" or strVect[8] == "\\N" or strVect[9] == "\\N" or strVect[10] == "\\N" or strVect[11] == "\\N" or strVect[12] == "\\N":
				continue
			srcHostPrefix = int("0x" + strVect[7], 0)
			dstHostPrefix = int("0x" + strVect[8], 0)
			srcRack = int("0x" + strVect[9], 0)
			dstRack = int("0x" + strVect[10], 0)
			srcPod = int("0x" + strVect[11], 0)
			dstPod = int("0x" + strVect[12], 0)
			isInterDataCenter = (int(strVect[14]) > 0)
			isInterCluster = (int(strVect[13]) > 0)
			packetType = None
			## conditions to quit:
			# Check what type of packet is this
			if isInterDataCenter:
				packetType = PacketType.INTERDCN
			elif isInterCluster:
				packetType = PacketType.INTERPOD
			else:
				packetType = PacketType.INTRAPOD
			packetTuple = (timestamp, packetLength, srcIP, dstIP, srcRack, dstRack, srcPod, dstPod, packetType)
			packetSeries.append(packetTuple)
	else:
		separator = " "
		with open(filename) as f:
			for line in f:
				strVect = line.split(separator)
				timestamp = int(strVect[0])
				packetLength = int(strVect[1]) # in KB
				srcIP = int("0x" + strVect[2], 0)
				dstIP = int("0x" + strVect[3], 0)
				srcL4Port = int("0x" + strVect[4], 0)
				dstL4Port = int("0x" + strVect[5], 0)
				ipProtocol = int(strVect[6])
				if strVect[7] == "\\N" or strVect[8] == "\\N" or strVect[9] == "\\N" or strVect[10] == "\\N" or strVect[11] == "\\N" or strVect[12] == "\\N":
					continue
				srcHostPrefix = int("0x" + strVect[7], 0)
				dstHostPrefix = int("0x" + strVect[8], 0)
				srcRack = int("0x" + strVect[9], 0)
				dstRack = int("0x" + strVect[10], 0)
				srcPod = int("0x" + strVect[11], 0)
				dstPod = int("0x" + strVect[12], 0)
				isInterDataCenter = (int(strVect[14]) > 0)
				isInterCluster = (int(strVect[13]) > 0)
				packetType = None
				## conditions to quit:


				# Check what type of packet is this
				if isInterDataCenter:
					packetType = PacketType.INTERDCN
				elif isInterCluster:
					packetType = PacketType.INTERPOD
				else:
					packetType = PacketType.INTRAPOD
				packetTuple = (timestamp, packetLength, srcIP, dstIP, srcRack, dstRack, srcPod, dstPod, packetType)
				packetSeries.append(packetTuple)
	return packetSeries

'''
snapshotFrequency - the amount of time in seconds between every snapshot
'''
def FormTrafficSnapshotsFromPacketSeries(packetSeries, snapshotFrequency, packetGranularity):
	timestampSortedPacketSeries = sorted(packetSeries, key=lambda tup: tup[0])
	totalDuration = timestampSortedPacketSeries[0][0] - allTimestamps[-1][0] # the final timestamp - initial timestamp
	nSnapshots = int(math.ceil(float(totalDuration) / float(snapshotFrequency)))
	# Determine the granularity of traffic matrix, defaults to inter-IP
	srcIndexInPacketTuple = 2
	dstIndexInPacketTuple = 3
	if packetGranularity == PacketGranularity.RACK2RACK:
		srcIndexInPacketTuple = 4
		dstIndexInPacketTuple = 5
	elif packetGranularity == PacketGranularity.IP2IP:
		srcIndexInPacketTuple = 2
		dstIndexInPacketTuple = 3
	elif packetGranularity == PacketGranularity.POD2POD:
		srcIndexInPacketTuple = 6
		dstIndexInPacketTuple = 7
	else:
		Exception("Does not recognize this type of packet granularity: {}".format(packetGranularity))
	# Initialize the traffic matrix snapshots
	trafficSnapshots = [0] * nSnapshots
	for snapshot in range(nSnapshots):
		trafficSnapshots[snapshot] = {}

	snapshotOffset = 0
	endTimeCurrInterval = timestampSortedPacketSeries[0][0] + snapshotFrequency
	nodes = set()
	for packetTuple in timestampSortedPacketSeries:
		# ignore packets between DCNs
		if packetTuple[8] == PacketType.INTERDCN:
			continue
		ts = packetTuple[0]
		while ts >= endTimeCurrInterval:
			endTimeCurrInterval += snapshotFrequency
			snapshotOffset += 1 
		src = packetTuple[srcIndexInPacketTuple]
		dst = packetTuple[dstIndexInPacketTuple]
		## add to set
		if src not in nodes:
			nodes.add(src)
		if dst not in nodes:
			nodes.add(dst)
		if src not in trafficSnapshots[snapshotOffset]:
			trafficSnapshots[snapshotOffset][src] = {}
		if dst not in trafficSnapshots[snapshotOffset][src]:
			trafficSnapshots[snapshotOffset][src][dst] = 0
		trafficSnapshots[snapshotOffset][src][dst] += packetTuple[1]
	return trafficSnapshots, nodes

def AggregatePacketsIntoFlows(packetSequence, minTS, maxTS, perTimeSlotDuration, nodeGranularity="IP", outputFileName=None):
	srcIndexInPacketTupleIP = 2
	dstIndexInPacketTupleIP = 3
	srcIndexInPacketTupleRack = 4
	dstIndexInPacketTupleRack = 5
	srcIndexInPacketTuplePod = 6
	dstIndexInPacketTuplePod = 7

	aggregatedFlowP2P = {}
	aggregatedFlowR2R = {}
	aggregatedFlowI2I = {}
	timeslot = 0
	## Initializes 
	while (timeslot * perTimeSlotDuration + minTS < maxTS):
		aggregatedFlowP2P[timeslot] = {}
		aggregatedFlowI2I[timeslot] = {}
		aggregatedFlowR2R[timeslot] = {}
		timeslot += 1
	totalTimeslots = timeslot
	# now start binning the packets into flows of each timeslot
	for packet in packetSequence:
		srcIP = packet[srcIndexInPacketTupleIP]
		dstIP = packet[dstIndexInPacketTupleIP]
		srcRack = packet[srcIndexInPacketTupleRack]
		dstRack = packet[dstIndexInPacketTupleRack]
		srcPod = packet[srcIndexInPacketTuplePod]
		dstPod = packet[dstIndexInPacketTuplePod]
		#srcPod = packet[3]
		#dstPod = packet[4]
		ts = packet[0]
		# compute the index of the timeslot
		timeslotIndex = (ts - minTS)/perTimeSlotDuration
		if (srcIP,dstIP,) not in aggregatedFlowI2I[timeslotIndex]:
			aggregatedFlowI2I[timeslotIndex][(srcIP, dstIP,)] = packet[1]
		else:
			aggregatedFlowI2I[timeslotIndex][(srcIP, dstIP,)] += packet[1]
		if (srcRack,dstRack,) not in aggregatedFlowR2R[timeslotIndex]:
			aggregatedFlowR2R[timeslotIndex][(srcRack, dstRack,)] = packet[1]
		else:
			aggregatedFlowR2R[timeslotIndex][(srcRack, dstRack,)] += packet[1]
		if (srcPod,dstPod,) not in aggregatedFlowP2P[timeslotIndex]:
			aggregatedFlowP2P[timeslotIndex][(srcPod, dstPod,)] = packet[1]
		else:
			aggregatedFlowP2P[timeslotIndex][(srcPod, dstPod,)] += packet[1]

	if outputFileName is not None:
		# now, we want to export the aggregated traffic into bins
		# step one here is to builder a string
		# generate and populate the 
		pbts_p2p = ts_pb2.TrafficFlowTimeseries()
		for timeslotIndex in range(totalTimeslots):
			snapshot = pbts_p2p.snapshots.add()
			snapshot.time_index = timeslotIndex
			snapshot.timestamp = minTS + timeslotIndex * perTimeSlotDuration
			for flow_tuple in aggregatedFlowP2P[timeslotIndex]:
				flow = snapshot.flows.add()
				flow.src = flow_tuple[0]
				flow.dst = flow_tuple[1]
				flow.size = aggregatedFlowP2P[timeslotIndex][flow_tuple]
		print("Writing to protobuf with name (Pod to Pod): {}".format(outputFileName))
		with open(outputFileName + "_{}_p2p".format(perTimeSlotDuration) + ".pb", 'wb') as f:
			f.write(pbts_p2p.SerializeToString())

		pbts_r2r = ts_pb2.TrafficFlowTimeseries()
		for timeslotIndex in range(totalTimeslots):
			snapshot = pbts_r2r.snapshots.add()
			snapshot.time_index = timeslotIndex
			snapshot.timestamp = minTS + timeslotIndex * perTimeSlotDuration
			for flow_tuple in aggregatedFlowR2R[timeslotIndex]:
				flow = snapshot.flows.add()
				flow.src = flow_tuple[0]
				flow.dst = flow_tuple[1]
				flow.size = aggregatedFlowR2R[timeslotIndex][flow_tuple]
		print("Writing to protobuf with name (Rack to Rack): {}".format(outputFileName))
		with open(outputFileName + "_{}_r2r".format(perTimeSlotDuration) + ".pb", 'wb') as f:
			f.write(pbts_r2r.SerializeToString())

		pbts_i2i = ts_pb2.TrafficFlowTimeseries()
		for timeslotIndex in range(totalTimeslots):
			snapshot = pbts_i2i.snapshots.add()
			snapshot.time_index = timeslotIndex
			snapshot.timestamp = minTS + timeslotIndex * perTimeSlotDuration
			for flow_tuple in aggregatedFlowI2I[timeslotIndex]:
				flow = snapshot.flows.add()
				flow.src = flow_tuple[0]
				flow.dst = flow_tuple[1]
				flow.size = aggregatedFlowI2I[timeslotIndex][flow_tuple]
		print("Writing to protobuf with name (IP to IP): {}".format(outputFileName))
		with open(outputFileName + "_{}_i2i".format(perTimeSlotDuration) + ".pb", 'wb') as f:
			f.write(pbts_i2i.SerializeToString())
	return
'''
Given the subdirectory belonging to a cluster, downloads and appends all traces together
Input parameters:
1) clusterSubDir - a subdirectory with for a given cluster
2) 
'''
def DownloadAndCombineClusterTraces(clusterSubDir, cluster):
	if cluster != "A" and cluster != "B" and cluster != "C":
		Exception("Unrecognized cluster")
	os.chdir("/home/gdp/github/mteh/facebook_dcn_traffic/" + clusterSubDir)
	clusterURLList = []
	# Step 1: open up the url list first
	with open("fb_altoona_dcn_cluster" + cluster + "_download_url.txt") as f:
		for line in f:
			clusterURLList.append(line)
	## Next, go into each subdir and download and append to established file
	packetSeries = []
	writeEvery = 50
	writeID = 1
	written = 0
	for url in clusterURLList:
		if written == writeEvery:
			written = 0
			with open("cluster{}_combined_write{}.txt".format(cluster, writeID), "w+") as f:
				str_builder = ""
				for packet in packetSeries:
					str_builder += ("{} {} {} {} {} {} {} {} {}\n".format(packet[0], packet[1], packet[2], packet[3], packet[4], packet[5], packet[6], packet[7], packet[8]))
				f.write(str_builder)
			writeID += 1
		try:
			wget.download(url)
			bz2fn = GetBZFilenameFromURL(url)
			print("printing : {}".format(bz2fn))
			packetSeriesTmp = ReadTraceFile(bz2fn)
			orig_len = len(packetSeries)
			#packetSeries = SortTwoSortedLists(packetSeries, packetSeriesTmp, 0)
			
			packetSeries += packetSeriesTmp
			written += 1
			assert(len(packetSeries) == orig_len + len(packetSeriesTmp))
			os.remove(bz2fn)
			print len(packetSeries)
			##(timestamp, packetLength, srcIP, dstIP, srcRack, dstRack, srcPod, dstPod, packetType)
		except:
			print("Could not read")
	with open("cluster{}_combined_write{}.txt".format(cluster, writeID), "w+") as f:
		str_builder = ""
		for packet in packetSeries:
			str_builder += ("{} {} {} {} {} {} {} {} {}\n".format(packet[0], packet[1], packet[2], packet[3], packet[4], packet[5], packet[6], packet[7], packet[8]))
		f.write(str_builder)
	return


def main1():
	cluster = "C"
	print("Reading all files from cluster: ")
	url = "https://scontent-lga3-1.xx.fbcdn.net/v/t39.10445-6/10000000_1711028809221480_5730742929970954240_n.bz2?_nc_cat=109&_nc_oc=AQkXxIKprR5JFAffjJ_cPLttypD4TQCLMO9IQIja_ULSUoznu2p2Jx79uoQskmKS2mA&_nc_ht=scontent-lga3-1.xx&oh=7721e24954611136d913fa2b1d99b738&oe=5E617FB9"
	#urllib.urlretrieve(url)
	#wget.download(url)

	#response = urllib2.urlopen('http://www.example.com/')
	#html = response.read()
	#print(html)
	clusterDir = "./traces/cluster{}/".format(cluster)
	DownloadAndCombineClusterTraces("traces/cluster{}".format(cluster), cluster)
	#r=requests.get(url)
	#print len(r)
	clusterTraceFiles = os.listdir('./traces/cluster{}/'.format(cluster))
	for tracefile in clusterTraceFiles:
		if "cluster{}_combined".format(cluster) in tracefile:
			print tracefile
			packetSeries = ReadTraceFile(clusterDir + tracefile)
			packetSeries = sorted(packetSeries, key=lambda tup: tup[0])
			print("min timestamp: {}".format(packetSeries[0][0]))
			print("max timestamp: {}".format(packetSeries[-1][0]))

## Reads the combined file
def main2():
	cluster = "C"
	clusterDir = "./traces/cluster" + cluster + "/"
	combinedClusterFileName = "cluster" + cluster + "_combined.txt"
	packetsVector = None
	minTS = sys.maxint
	maxTS = -1
	index = 1
	if os.path.isfile(clusterDir + combinedClusterFileName):
		packetsVector, minTS, maxTS = ReadCombinedTxtFile(clusterDir + combinedClusterFileName)
	else:
		packetsVector = []
		combinedClusterFileName = "cluster" + cluster + "_combined_write{}.txt".format(index)
		while os.path.isfile(clusterDir + combinedClusterFileName):
			packets, currminTS, currmaxTS = ReadCombinedTxtFile(clusterDir + combinedClusterFileName)
			packetsVector += packets
			minTS = min(currminTS, minTS)
			maxTS = max(currmaxTS, maxTS)
			index += 1
			combinedClusterFileName = "cluster" + cluster + "_combined_write{}.txt".format(index)
	print("Finished reading packets")
	print("min timestamp: {}, max timestamp: {}".format(minTS, maxTS))
	OneMin = 60 # in seconds
	AggregatePacketsIntoFlows(packetsVector, minTS, maxTS, 5 * OneMin, nodeGranularity="POD", outputFileName="fb_dcn")
	#
	#FormTrafficSnapshotsFromPacketSeries()
	return

if __name__ == "__main__":
	main2()
	#main2()
