#!/bin/bash
#
# setup hadoop cluster with nodes from the command line
#
# Instructions:
#                first parameter is the hostname allocated as primary name node
#                second parameter hostname is allocate as job tracker and secondary name node
#                third parameter is the number of data nodes in the cluster (task tracker process will be started on these nodes)
#
# 07/24/2012 - created by Trifon Anguelov
# 08/23/2012 - added better handling of data nodes input. enter number of data nodes instead of hostnames for each data node


HADOOP_DIR=/usr/local/hadoop
TMP_DIR=/tmp
NAME_NODE="$1"
JOB_TRACKER="$2"
DATA_NODES_NUM="$3"
DATA_NODE_HOSTNAME="dn"
BLANK=" "


function showBar {
 percDone=$(echo 'scale=2;'$1/$2*100 | bc)
 barLen=$(echo ${percDone%'.00'})
 bar=''
 fills=''
 for (( b=0; b<$barLen; b++ ))
 do
  bar=$bar"="
 done
 blankSpaces=$(echo $((100-$barLen)))
 for (( f=0; f<$blankSpaces; f++ ))
 do
  fills=$fills"."
 done
 echo -ne '['$bar'>'$fills']: '$barLen'%\r'
}



# construct hadoop cluster hostname string
HOST_STRING=${NAME_NODE}${BLANK}${JOB_TRACKER}

for (( i=1; i <= ${DATA_NODES_NUM}; i++ ))
do
    DATA_NODE=${DATA_NODE_HOSTNAME}${i}
    HOST_STRING=${HOST_STRING}${BLANK}${DATA_NODE}
done


# create VMs to host the new hadoop cluster
echo
echo "Creating new VM instances on GCE....."
echo
gcutil addinstance --image=projects/google/images/centos-6-2-v20120621 --machine_type=n1-standard-1 --zone=us-central1-a ${HOST_STRING}
echo
echo "Waiting for new VM instances creation to complete...."
echo

# allow the new VMs creation to complete
for (( i=0; i<=9600; i++ ))
do
 showBar $i 9600
done
echo


# edit Hadoop configuration files with name node, job tracker and name nodes information
cp -R conf /tmp/

find /tmp/conf -name mapred-site.xml -type f -exec sed -i -e "s/job_tracker/${JOB_TRACKER}/g" {} \;
find /tmp/conf -name masters -type f -exec sed -i -e "s/job_tracker/${JOB_TRACKER}/g" {} \;
find /tmp/conf -name core-site.xml -type f -exec sed -i -e "s/name_node/${NAME_NODE}/g" {} \;
echo ${HOST_STRING} | sed "s/${NAME_NODE}//g" | sed "s/${JOB_TRACKER}//g" | tr -s " " "\012" > /tmp/conf/slaves


echo
echo "Creating new configuration archive....."
tar cf hadoop.tar .bashrc /tmp/conf examples


# setup hadoop nodes
for hd in ${HOST_STRING}
do
     gcutil push ${hd} hadoop.tar gethadoop.sh ${TMP_DIR}
     gcutil ssh ${hd} ${TMP_DIR}/gethadoop.sh
done


# format primary name node
echo
echo "Formating name node...."
echo
gcutil ssh ${NAME_NODE} "${HADOOP_DIR}/bin/hadoop namenode -format"
echo 


# start HDFS
gcutil ssh ${NAME_NODE} "${HADOOP_DIR}/bin/start-dfs.sh"
echo

# wait until name node has started
for (( i=0; i<=3600; i++ ))
do
 showBar $i 3600
done
echo


# get name node status
gcutil ssh ${NAME_NODE} "tail -2000 ${HADOOP_DIR}/logs/*-namenode-${NAME_NODE}.log"
echo


# start M/R cluster on job tracker node
gcutil ssh ${JOB_TRACKER} "${HADOOP_DIR}/bin/start-mapred.sh"
echo
gcutil ssh ${JOB_TRACKER} "tail -2000 ${HADOOP_DIR}/logs/*-jobtracker-hadoop2.log"


# wait until job tracker is started
for (( i=0; i<=3600; i++ ))
do
 showBar $i 3600
done
echo


# report the cluster status
gcutil ssh ${NAME_NODE} ${HADOOP_DIR}/bin/hadoop dfsadmin -report
echo
echo "hadoop cluster setup is now complete !"
echo

NAME_NODE_URL=`gcutil listinstances | grep ${NAME_NODE} | cut -d"|" -f7 | tr -d ' '`
JOB_TRACKER_URL=`gcutil listinstances | grep ${JOB_TRACKER} | cut -d"|" -f7 | tr -d ' '`


echo "name node url: http://${NAME_NODE_URL}:50070/" 
echo "job tracker url: http://${JOB_TRACKER_URL}:50030/" 
echo 
echo "data nodes: " `echo ${HOST_STRING} | sed "s/${NAME_NODE}//g" | sed "s/${JOB_TRACKER}//g" | tr -s " " "\012"`


# clean up 
rm -rf /tmp/conf

exit
