#!/bin/bash

# usage (local) : sh hadoop.sh @1 @2 @3 ip1 ip2 ip3
# But : execution locale pour copier le fichier de déploiement de hadoop "install_hadoop.sh" sur les trois instances EC2.

# 1. hadoop sur toutes instances
# 2. mrtoolkit


KEY_FILE=id_rsa-pstam-keypair

if [ $# -gt 6 ] ; then #marche pas
    if [ $1 = "-c" ] ; then
        #config
        exit
    fi
    
    if [ $7 = "--hadoop" ] ; then
        hadoop_only="1"
        echo "hadoop only"

    fi

    if [ $7 = "--jobs" ] ; then
        jobs_only="1"
        echo "jobs only"

    fi


    if [ $1 = "-h" ] ; then
        help
        exit
    fi
fi
false="1"
if [ ! $jobs_only = "1" ] ; then
    


echo "scp -i $KEY_FILE id_rsa-pstam-keypair ubuntu@$1:." 
scp -i $KEY_FILE id_rsa-pstam-keypair.tar ubuntu@$1:. 
echo "cp mrtoolkit $1"
scp -i $KEY_FILE install_mrtoolkit.sh ubuntu@$1:. 
echo "scp -i $KEY_FILE install_hadoop.sh ubuntu@$1:."
scp -i $KEY_FILE install_hadoop.sh ubuntu@$1:.


echo "scp -i $KEY_FILE install_hadoop.sh ubuntu@$2:."
scp -i $KEY_FILE install_hadoop.sh ubuntu@$2:.
scp -i $KEY_FILE install_mrtoolkit.sh ubuntu@$2:. 

echo "scp -i $KEY_FILE install_hadoop.sh ubuntu@$3:."
scp -i $KEY_FILE install_hadoop.sh ubuntu@$3:.
scp -i $KEY_FILE install_mrtoolkit.sh ubuntu@$3:. 

ssh  -o "StrictHostKeyChecking no" -i $KEY_FILE ubuntu@$2 'echo ok'
ssh  -o "StrictHostKeyChecking no" -i $KEY_FILE ubuntu@$3 'echo ok'
echo "ssh -i $KEY_FILE ubuntu@$1 'sudo sh install_hadoop.sh $4 $5 $6'"

# 1. hadoop
# déploiement // : utiliser & (sauf sur dernier des trois)
#  et envoyer
# ssh -i $KEY_FILE ubuntu@$2 "sudo sh install_hadoop.sh $4 $5 $6 master"
# ssh -i $KEY_FILE ubuntu@$3 "sudo sh install_hadoop.sh $4 $5 $6 slave"
# ssh -i $KEY_FILE ubuntu@$1 "sudo sh install_hadoop.sh $4 $5 $6 slave"

echo "==========================="
echo "=== install_mrtoolkit.sh"
echo "==========================="
# 2. mrtoolkit
# ordre important : d'abord le master
ssh -i $KEY_FILE ubuntu@$2 "sudo sh install_mrtoolkit.sh $4 $5 $6 master"
ssh -i $KEY_FILE ubuntu@$3 "sudo sh install_mrtoolkit.sh $4 $5 $6 slave"
ssh -i $KEY_FILE ubuntu@$1 "sudo sh install_mrtoolkit.sh $4 $5 $6 slave"
fi # fin hadoop only

# 3. start hadoop (master puis S)

if [ ! $jobs_only -eq "1" ] ; then
    


echo "Copie du script startHadoop sur les instances..."
START_HADOOP=start_hadoop.sh
scp -i $KEY_FILE $START_HADOOP ubuntu@$1:.
scp -i $KEY_FILE $START_HADOOP ubuntu@$2:.
scp -i $KEY_FILE $START_HADOOP ubuntu@$3:.

echo "Exec sur master..."
ssh -i $KEY_FILE ubuntu@$2 "sudo sh $START_HADOOP $4 $5 $6 master"

echo "==============="
echo "=== Exec sur slave 2 ..."
echo "==============="
ssh -i $KEY_FILE ubuntu@$3 "sudo sh $START_HADOOP $4 $5 $6 slave"
ssh -i $KEY_FILE ubuntu@$1 "sudo sh $START_HADOOP $4 $5 $6 slave"

fi

# 4. start-mapred.sh et lancer les rake {sections, ips, top-files, my_top-ips, hours, etc} uniquement sur le master
    JOBS=start_jobs.sh

    scp -i $KEY_FILE $JOBS ubuntu@$1:.

    echo "rake on master ..."
    ssh -i $KEY_FILE ubuntu@$2 "sudo sh start_jobs.sh $4 $5 $6 master"
    # ssh -i $KEY_FILE ubuntu@$3 "sudo sh start_jobs.sh $4 $5 $6 slave"
    # ssh -i $KEY_FILE ubuntu@$1 "sudo sh start_jobs.sh $4 $5 $6 slave"




# for ip in `cat file`; do
#     $ip
# done