#!/usr/bin/python

# visualize shuffled bytes spatial distribution 
# input: task tracker log file

import sys
import re


def getjobid(a):
	A = a.split('_')
	return int(A[2])

def getmachineid(a):
	A = a.split('.')
	b = A[3]
	return (int(b[:b.find(":")]))-1


if len(sys.argv) <= 2:
	print "./COMMAND [job id] [# of log files]?\n"
	sys.exit(-1)


# clienttrace entry format, divided by space
#2010-05-02
#15:28:22,738
#INFO
#org.apache.hadoop.mapred.TaskTracker.clienttrace:
#src:
#10.0.0.2:40060,
#dest:
#10.0.0.58:42262,
#bytes:
#7006193,
#op:
#MAPRED_SHUFFLE,
#cliID:
#attempt_201004131448_3797_m_000029_0


hosts = {}
jobid = int(sys.argv[1])
for i in range(1, (int(sys.argv[2]))+1):
	filename = "hadoop-global-tasktracker-cloud"+str(i)+".log"
	print filename
	for l in open(filename):
		A = l.split()
		src = 0
		dst = 0
		bytes = 0.0
		id = 0		

		# in task tracker file 
		# clienttrace entry has MAPRED_SHUFFLE bytes info
		if len(A) < 4:
			continue

		if not (A[3]).endswith("clienttrace:"):
			continue

		# get job id
		id = getjobid(A[13])
		# skip lines of other job 
		if id != jobid:
			continue

		# get src machine id
		src = getmachineid(A[5])

		# get dst machine id
		dst = getmachineid(A[7])

		# get shuffle bytes
		bytes = float((A[9]).strip(","))
		# MB
		bytes /= (1024*1024)

		# create new entry if needed 
		if not hosts.has_key(src):
			hosts[src] = {}
			hosts[src]["send"] = 0
			hosts[src]["receive"] = 0
		if not hosts.has_key(dst):
			hosts[dst] = {}
			hosts[dst]["send"] = 0
			hosts[dst]["receive"] = 0

		# update bytes count
		hosts[src]["send"] += bytes
		hosts[dst]["receive"] += bytes


# save bytes count 
mf = open('shuffle.dat', 'w')
for k,v in hosts.items():
	mf.write(str(k)+' '+str(v["send"])+' '+str(v["receive"])+'\n')
mf.close()


