#!/usr/bin/python

# visualize shuffled bytes spatial distribution 
# input: task tracker log file

import sys
import re


def getjobid(a):
	A = a.split('_')
	return int(A[2])


def gettaskid(a):
	A = a.split('_')
	return int(A[4])

def gettasktype(a):
	A = a.split('_')
	return A[3]

def getmachineid(a):
	A = a.split('.')
	b = A[3]
	return (int(b[:b.find(":")]))-1


if len(sys.argv) <= 4:
	print "./COMMAND [job id] [task type] [task id] [# of log files]?\n"
	sys.exit(-1)


# tasktracker log clienttrace entry format, divided by space
#2010-05-02
#15:28:22,738
#INFO
#org.apache.hadoop.mapred.TaskTracker.clienttrace:
#src:
#10.0.0.2:40060,
#dest:
#10.0.0.58:42262,
#bytes:
#7006193,
#op:
#MAPRED_SHUFFLE,
#cliID:
#attempt_201004131448_3797_m_000029_0


shuffle = {}
mapin = {}
home = 0
jobid = int(sys.argv[1])
tasktype = sys.argv[2]
taskid = int(sys.argv[3])
for i in range(1, (int(sys.argv[4]))+1):
	filename = "hadoop-global-tasktracker-cloud"+str(i)+".log.2010-05-01"
	print filename
	for l in open(filename):
		A = l.split()
		src = 0
		dst = 0
		type = {}
		bytes = 0.0
		id = 0		

		# in task tracker file 
		# clienttrace entry has MAPRED_SHUFFLE bytes info
		if len(A) < 4:
			continue

		if not (A[3]).endswith("clienttrace:"):
			continue

		# get job id
		id = getjobid(A[13])
		# skip lines of other job 
		if id != jobid:
			continue

		# get task id
		id = gettaskid(A[13])
		if id != taskid:
			continue

		# get task type
		type = gettasktype(A[13])
		if type != tasktype:
			continue

		# get src machine id
		src = getmachineid(A[5])

		# get dst machine id
		dst = getmachineid(A[7])

		# get shuffle bytes
		bytes = float((A[9]).strip(","))
		bytes /= (1024*1024)

		# record shuffle bytes
		if not shuffle.has_key(dst):
			shuffle[dst]={}
			shuffle[dst]["bytes"] = 0

		shuffle[dst]["bytes"] += bytes
		home = i

print home

# data node log
#2010-05-01
#16:08:37,640
#INFO
#org.apache.hadoop.hdfs.server.datanode.DataNode.clienttrace:
#src:
#/10.0.0.2:40010,
#dest:
#/10.0.0.2:55427,
#bytes:
#264192,
#op:
#HDFS_READ,
#cliID:
#DFSClient_attempt_201004131448_3748_m_000035_0,
#srvID:
#DS-421214565-10.0.0.2-40010-1267431068669,
#blockid:
#blk_6763996568684696453_4035332


for i in range(1, (int(sys.argv[4]))+1):
	filename = "hadoop-global-datanode-cloud"+str(i)+".log.2010-05-01"
	print filename
	for l in open(filename):
		A = l.split()
		src = 0
		dst = 0
		type = {}
		bytes = 0.0
		id = 0		

		# in task tracker file 
		# clienttrace entry has MAPRED_SHUFFLE bytes info
		if len(A) < 4:
			continue

		if not (A[3]).endswith("clienttrace:"):
			continue

		# check this op is HDFS_READ
		op = (A[11]).strip(",")
		if op != "HDFS_READ":
			continue

		# check if cliID contain map task id
		B = (A[13]).split("_")
		if len(B) < 3:
			continue

		# get job id
		id = int(B[3])
		# skip lines of other job 
		if id != jobid:
			continue

		# get task id
		id = int(B[5])
		if id != taskid:
			continue

		# get task type
		type = B[4]
		if type != tasktype:
			continue


		# get src machine id
		src = getmachineid(A[5])

		# get dst machine id
		dst = getmachineid(A[7])

		# get shuffle bytes
		bytes = float((A[9]).strip(","))
		bytes /= (1024*1024)

		# record shuffle bytes
		if not mapin.has_key(src):
			mapin[src]={}
			mapin[src]["bytes"] = 0

		mapin[src]["bytes"] += bytes



# save bytes count 
mf = open('mapin.dat', 'w')
for k,v in mapin.items():
	p = str(v["bytes"])
	q = p[:p.find(".")+3]
	mf.write(str(k)+' '+str(home)+' '+q+'\n')
mf.close()

mf = open('shuffle.dat', 'w')
for k,v in shuffle.items():
	p = str(v["bytes"])
	q = p[:p.find(".")+3]
	mf.write(str(home)+' '+str(k)+' '+q+'\n')
mf.close()


