#!/bin/bash
#############################################################################
#  Copyright (C) 2013-2015 Lawrence Livermore National Security, LLC.
#  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
#  Written by Albert Chu <chu11@llnl.gov>
#  LLNL-CODE-644248
#
#  This file is part of Magpie, scripts for running Hadoop on
#  traditional HPC systems.  For details, see https://github.com/llnl/magpie.
#
#  Magpie is free software; you can redistribute it and/or modify it
#  under the terms of the GNU General Public License as published by
#  the Free Software Foundation; either version 2 of the License, or
#  (at your option) any later version.
#
#  Magpie is distributed in the hope that it will be useful, but
#  WITHOUT ANY WARRANTY; without even the implied warranty of
#  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
#  General Public License for more details.
#
#  You should have received a copy of the GNU General Public License
#  along with Magpie.  If not, see <http://www.gnu.org/licenses/>.
#############################################################################

# These are functions to be called by magpie-run

source ${MAGPIE_SCRIPTS_HOME}/magpie/exports/magpie-exports-submission-type
source ${MAGPIE_SCRIPTS_HOME}/magpie/exports/magpie-exports-dirs
source ${MAGPIE_SCRIPTS_HOME}/magpie/exports/magpie-exports-user
source ${MAGPIE_SCRIPTS_HOME}/magpie/lib/magpie-lib-defaults
source ${MAGPIE_SCRIPTS_HOME}/magpie/lib/magpie-lib-node-identification
source ${MAGPIE_SCRIPTS_HOME}/magpie/lib/magpie-lib-hadoop-helper
source ${MAGPIE_SCRIPTS_HOME}/magpie/lib/magpie-lib-run
source ${MAGPIE_SCRIPTS_HOME}/magpie/lib/magpie-lib-paths
source ${MAGPIE_SCRIPTS_HOME}/magpie/lib/magpie-lib-helper

# Return 0 if service up, 1 if not
__Magpie_run_check_hadoop_up ()
{
    cd ${HADOOP_HOME}

    # Ensure namenode isn't in safe mode.
    #
    # We do not use "-safemode wait", b/c we want to inform the user
    # as we're waiting.
    ${hadoopcmdprefix}/${dfsadminscript} dfsadmin -safemode get 2>&1 | grep -q -i "off"
    if [ $? -eq 0 ]
    then
        return 0
    fi

    return 1
}

Magpie_run_start_hadoop () {
    if [ "${HADOOP_SETUP}" == "yes" ] && [ "${magpie_run_prior_startup_successful}" == "true" ]
    then
        local hdfs_was_setup=0

        cd ${HADOOP_HOME}

        if [ ${MAGPIE_JOB_TYPE} != "setuponly" ]
        then
            echo "Starting hadoop"
            local hadoop_should_be_setup=1

            if Magpie_hadoop_filesystem_mode_is_hdfs_type
            then
                if Magpie_hadoop_filesystem_mode_is_hdfs_on_network_type
                then
                    # Sets magpie_networkedhdfspath & magpie_hdfs_stored_version_path
                    Magpie_get_hdfs_stored_version_path
                    # Sets magpie_networkedhdfspath & magpie_hdfs_stored_nodecount_path
                    Magpie_get_hdfs_stored_nodecount_path
                    # Sets magpie_networkedhdfspath & magpie_hdfs_in_use_path
                    Magpie_get_hdfs_in_use_path

                    # achu: Note that all the file checks on
                    # magpie_hdfs_in_use_path,
                    # magpie_hdfs_stored_version_path, and
                    # magpie_hdfs_stored_nodecount_path should
                    # technically be locked checks.  But as flock is
                    # network file system dependent, we choose to do
                    # this unsafely and hope its good enough.

                    # Older versions of Magpie didn't store this variable
                    if [ -f ${magpie_hdfs_in_use_path} ]
                    then
                        echo "**** HDFS Issue ****"
                        echo "Magpie HDFS lock file ${magpie_hdfs_in_use_path} found, indicating another"
                        echo "set of Hadoop daemons is running HDFS out of this path."
                        echo "If you wish to use HDFS in multiple Magpie instances in parallel, you can setup HDFS"
                        echo "on another path."
                        echo "If you are sure this is in error, remove this file and re-submit your job."
                        echo "**** HDFS Issue ****"
                        hadoop_should_be_setup=0
                    fi

                    # Older versions of Magpie didn't store this variable
                    if [ -f ${magpie_hdfs_stored_version_path} ]
                    then
                        local hdfsstoredversion=`cat ${magpie_hdfs_stored_version_path}`

                        # 0 is =, 1 is >, 2 is <
                        Magpie_vercomp ${HADOOP_VERSION} ${hdfsstoredversion}
                        local vercomp_result=$?
                        if [ "${vercomp_result}" != "0" ]
                        then
                            if [ "${vercomp_result}" == "1" ]
                            then
                                echo "**** HDFS Issue ****"
                                echo "HDFS version at mount ${magpie_networkedhdfspath} is older than ${HADOOP_VERSION}."
                                if [ "${HADOOP_JOB}" != "upgradehdfs" ]
                                then
                                    echo "With newer Hadoop versions, you can upgrade HDFS via HADOOP_JOB=upgradehdfs"
                                    echo "Or if you wish to use a newer Hadoop version without upgrading HDFS, you can setup HDFS on another path"
                                fi
                                echo "**** HDFS Issue ****"
                            else
                                echo "**** HDFS Issue ****"
                                echo "HDFS version at mount ${magpie_networkedhdfspath} is newer than ${HADOOP_VERSION}."
                                echo "Please use a newer Hadoop version."
                                echo "Or if you wish to use an older Hadoop version, you can setup HDFS on another path"
                                echo "**** HDFS Issue ****"
                            fi

                            # or ...vercomp_result == 1 && hadoop_mode == upgradehdfs
                            if [ "${vercomp_result}" != "1" ] || [ "${HADOOP_JOB}" != "upgradehdfs" ]
                            then
                                hadoop_should_be_setup=0
                            fi
                        fi
                    fi

                    # Older nodecounts of Magpie didn't store this variable
                    if [ -f ${magpie_hdfs_stored_nodecount_path} ]
                    then
                        local hdfsstorednodecount=`cat ${magpie_hdfs_stored_nodecount_path}`

                        if [ "${HADOOP_WORKER_COUNT}" -lt "${hdfsstorednodecount}" ]
                        then
                            local ninteypercentnodes=`echo "${hdfsstorednodecount} * .9" | bc -l | xargs printf "%1.0f"`
                            echo "**** HDFS Issue ****"
                            echo "HDFS was last built using a larger worker node count of ${hdfsstorednodecount}, compared to this job's ${HADOOP_WORKER_COUNT}"
                            echo "Because of this, it is very likely the HDFS Namenode will not be able to find all HDFS blocks."

                            if [ "${HADOOP_WORKER_COUNT}" -gt "${ninteypercentnodes}" ]
                            then
                                echo "The current worker count of ${HADOOP_WORKER_COUNT} is atleast 90% of ${hdfsstorednodecount}, so HDFS will attempt to be"
                                echo "started.  However, it is not recommended for future runs and there is still a chance HDFS will not start."
                            else
                                echo "If you truly desire to use fewer nodes, setup HDFS on another path or consider going through a decommissioning process to"
                                echo "reduce the number of nodes your HDFS is built on via HADOOP_JOB=decommissionhdfsnodes."
                                hadoop_should_be_setup=0
                            fi
                            echo "**** HDFS Issue ****"
                        fi
                    fi
                fi

                if [ "${MAGPIE_JOB_TYPE}" == "hadoop" ] && [ "${HADOOP_JOB}" == "upgradehdfs" ]
                then
                    local startdfsoptions="-upgrade"
                fi

                if [ "${hadoop_should_be_setup}" == "1" ]
                then
                    # Make variables unspecified for launching
                    Magpie_make_all_local_dirs_unspecified

                    ${hadoopsetupscriptprefix}/start-dfs.sh ${startdfsoptions}
                    hdfs_was_setup=1

                    if Magpie_hadoop_filesystem_mode_is_hdfs_on_network_type
                    then
                        # Sets magpie_networkedhdfspath & magpie_hdfs_in_use_path
                        Magpie_get_hdfs_in_use_path

                        # Because we aren't using flock, this is technically potentially
                        # racy.  One more check to see if the file exists.

                        if [ ! -f ${magpie_hdfs_in_use_path} ]
                        then
                            echo "${MAGPIE_JOB_ID}" > ${magpie_hdfs_in_use_path}
                        fi
                    fi

                    # Make variables specific now within Magpie
                    Magpie_make_all_local_dirs_node_specific
                fi

                if [ "${hdfs_was_setup}" == "1" ] \
                    && Magpie_hadoop_filesystem_mode_is_hdfs_on_network_type
                then
                    # achu: Note that we should really be doing locked
                    # checks/creates here.  But as flock is network
                    # file system dependent, we choose to do this
                    # unsafely and hope its good enough.

                    if [ "${HADOOP_JOB}" != "upgradehdfs" ]
                    then
                        rm -f ${magpie_hdfs_stored_version_path}
                        echo "${HADOOP_VERSION}" > ${magpie_hdfs_stored_version_path}
                    fi

                    if [ -f ${magpie_hdfs_stored_nodecount_path} ]
                    then
                        local nodecount=`cat ${magpie_hdfs_stored_nodecount_path}`
                        if [ "${nodecount}" -lt "${HADOOP_WORKER_COUNT}" ]
                        then
                            nodecount=${HADOOP_WORKER_COUNT}
                        fi
                    else
                        local nodecount=${HADOOP_WORKER_COUNT}
                    fi
                    rm -f ${magpie_hdfs_stored_nodecount_path}
                    echo "${nodecount}" > ${magpie_hdfs_stored_nodecount_path}
                fi
            fi

            if [ "${hadoop_should_be_setup}" == "1" ] \
                && Magpie_hadoop_setup_type_enables_yarn
            then
                # Make variables unspecified for shutdown
                Magpie_make_all_local_dirs_unspecified

                ${hadoopsetupscriptprefix}/start-yarn.sh

                # Make variables specific now within Magpie
                Magpie_make_all_local_dirs_node_specific

                # My rough estimate for setup time is 30 seconds per 128 nodes
                local sleepwait=`expr ${HADOOP_WORKER_COUNT} \/ 128 \* 30`
                if [ ${sleepwait} -lt 30 ]
                then
                    sleepwait=30
                fi
                echo "Waiting ${sleepwait} seconds to allow Hadoop daemons to setup"
                sleep ${sleepwait}
                magpie_run_total_sleep_wait=`expr ${magpie_run_total_sleep_wait} + ${sleepwait}`
            fi
        fi

        if [ "${hadoop_should_be_setup}" == "1" ] || [ ${MAGPIE_JOB_TYPE} == "setuponly" ]
        then
            echo "*******************************************************"
            echo "*"
            echo "* Hadoop Information"
            echo "*"
            echo "* You can view your Hadoop status by launching a web browser and pointing to ..."
            echo "*"
            if Magpie_hadoop_setup_type_enables_yarn
            then
                echo "* Yarn Resource Manager: http://${HADOOP_MASTER_NODE}:${default_yarn_resourcemanager_webapp_address}"
                echo "*"
                echo "* Job History Server: http://${HADOOP_MASTER_NODE}:${default_hadoop_jobhistoryserver_webapp_address}"
                echo "*"
            fi
            if Magpie_hadoop_filesystem_mode_is_hdfs_type
            then
                echo "* HDFS Namenode: http://${HADOOP_MASTER_NODE}:${default_hadoop_hdfs_namenode_httpaddress}"
                echo "* HDFS DataNode: http://<DATANODE>:${default_hadoop_hdfs_datanode_httpaddress}"
                echo "*"
            fi
            if Magpie_hadoop_filesystem_mode_is_hdfs_type
            then
                echo "* HDFS can be accessed directly at:"
                echo "*"
                echo "*   hdfs://${HADOOP_MASTER_NODE}:${default_hadoop_hdfs_namenode_address}"
                echo "*"
            fi
            echo "* To access Hadoop directly, you'll want to:"
            echo "*"
            echo "*   ${MAGPIE_REMOTE_CMD:-ssh}${MAGPIE_REMOTE_CMD_OPTS:+" "}${MAGPIE_REMOTE_CMD_OPTS} ${HADOOP_MASTER_NODE}"
            if echo $MAGPIE_SHELL | grep -q csh
            then
                echo "*   setenv JAVA_HOME \"${JAVA_HOME}\""
                echo "*   setenv HADOOP_HOME \"${HADOOP_HOME}\""
                echo "*   setenv HADOOP_CONF_DIR \"${HADOOP_CONF_DIR}\""
                if [ "${MAGPIE_NO_LOCAL_DIR}" == "yes" ]
                then
                    echo "*   setenv HADOOP_CLIENT_OPTS \"-Djava.io.tmpdir=${HADOOP_LOCAL_SCRATCHSPACE_DIR} -XX:-UsePerfData\""
                fi
            else
                echo "*   export JAVA_HOME=\"${JAVA_HOME}\""
                echo "*   export HADOOP_HOME=\"${HADOOP_HOME}\""
                echo "*   export HADOOP_CONF_DIR=\"${HADOOP_CONF_DIR}\""
                if [ "${MAGPIE_NO_LOCAL_DIR}" == "yes" ]
                then
                    echo "*   export HADOOP_CLIENT_OPTS=\"-Djava.io.tmpdir=${HADOOP_LOCAL_SCRATCHSPACE_DIR} -XX:-UsePerfData\""
                fi
            fi
            echo "*"
            echo "* Then you can do as you please.  For example to interact with the Hadoop filesystem:"
            echo "*"
            if echo ${HADOOP_VERSION} | grep -q -E "2\.[0-9]\.[0-9]"
            then
                echo "*   \$HADOOP_HOME/${hadoopcmdprefix}/hdfs dfs ..."
            else
                echo "*   \$HADOOP_HOME/${hadoopcmdprefix}/hadoop fs ..."
            fi
            echo "*"
            echo "* To launch jobs you'll want to:"
            echo "*"
            echo "*   \$HADOOP_HOME/${hadoopcmdprefix}/hadoop jar ..."
            echo "*"
            if [ "${MAGPIE_JOB_TYPE}" == "setuponly" ]
            then
                echo "* To setup, login and set environment variables per the"
                echo "* instructions above, then run:"
                echo "*"
                if Magpie_hadoop_filesystem_mode_is_hdfs_type
                then
                    echo "*   $HADOOP_HOME/${hadoopsetupscriptprefix}/start-dfs.sh"
                fi
                if Magpie_hadoop_setup_type_enables_yarn
                then
                    echo "*   $HADOOP_HOME/${hadoopsetupscriptprefix}/start-yarn.sh"
                    echo "*   $HADOOP_HOME/${hadoopsetupscriptprefix}/mr-jobhistory-daemon.sh start historyserver"
                fi
                echo "*"
                echo "* To end/cleanup your session & kill all daemons, login and set"
                echo "* environment variables per the instructions above, then run:"
                echo "*"
                if Magpie_hadoop_setup_type_enables_yarn
                then
                    echo "*   \$HADOOP_HOME/${hadoopsetupscriptprefix}/stop-yarn.sh"
                fi
                if Magpie_hadoop_filesystem_mode_is_hdfs_type
                then
                    echo "*   \$HADOOP_HOME/${hadoopsetupscriptprefix}/stop-dfs.sh"
                fi
                if Magpie_hadoop_setup_type_enables_yarn
                then
                    echo "*   \$HADOOP_HOME/${hadoopsetupscriptprefix}/mr-jobhistory-daemon.sh stop historyserver"
                fi
                echo "*"
            fi
            if [ "${MAGPIE_ENVIRONMENT_VARIABLE_SCRIPT}X" != "X" ]
            then
                echo "* If running interactively, sourcing"
                echo "*"
                echo "* ${MAGPIE_ENVIRONMENT_VARIABLE_SCRIPT}"
                echo "*"
                echo "* will set most common environment variables for your job."
                echo "*"
            fi
            echo "*******************************************************"

            if [ "${MAGPIE_ENVIRONMENT_VARIABLE_SCRIPT}X" != "X" ]
            then
                if echo $MAGPIE_SHELL | grep -q csh
                then
                    echo "setenv HADOOP_HOME \"${HADOOP_HOME}\"" >> ${MAGPIE_ENVIRONMENT_VARIABLE_SCRIPT}
                    echo "setenv HADOOP_CONF_DIR \"${HADOOP_CONF_DIR}\"" >> ${MAGPIE_ENVIRONMENT_VARIABLE_SCRIPT}
                    echo "setenv HADOOP_LOG_DIR \"${HADOOP_LOG_DIR}\"" >> ${MAGPIE_ENVIRONMENT_VARIABLE_SCRIPT}
                    echo "setenv HADOOP_MASTER_NODE \"${HADOOP_MASTER_NODE}\"" >> ${MAGPIE_ENVIRONMENT_VARIABLE_SCRIPT}
                    echo "setenv HADOOP_WORKER_COUNT \"${HADOOP_WORKER_COUNT}\"" >> ${MAGPIE_ENVIRONMENT_VARIABLE_SCRIPT}
                    echo "setenv HADOOP_WORKER_CORE_COUNT \"${HADOOP_WORKER_CORE_COUNT}\"" >> ${MAGPIE_ENVIRONMENT_VARIABLE_SCRIPT}
                    echo "setenv HADOOP_WORKER_COUNT \"${HADOOP_WORKER_COUNT}\"" >> ${MAGPIE_ENVIRONMENT_VARIABLE_SCRIPT}
                    echo "setenv HADOOP_WORKER_CORE_COUNT \"${HADOOP_WORKER_CORE_COUNT}\"" >> ${MAGPIE_ENVIRONMENT_VARIABLE_SCRIPT}
                    if Magpie_hadoop_filesystem_mode_is_hdfs_type
                    then
                        echo "setenv HADOOP_NAMENODE \"${HADOOP_NAMENODE}\"" >> ${MAGPIE_ENVIRONMENT_VARIABLE_SCRIPT}
                        echo "setenv HADOOP_NAMENODE_PORT \"${HADOOP_NAMENODE_PORT}\"" >> ${MAGPIE_ENVIRONMENT_VARIABLE_SCRIPT}
                    fi
                    echo "setenv HADOOP_VERSION \"${HADOOP_VERSION}\"" >> ${MAGPIE_ENVIRONMENT_VARIABLE_SCRIPT}
                    if [ "${MAGPIE_NO_LOCAL_DIR}" == "yes" ]
                    then
                        echo "setenv HADOOP_CLIENT_OPTS \"-Djava.io.tmpdir=${HADOOP_LOCAL_SCRATCHSPACE_DIR} -XX:-UsePerfData\"" >> ${MAGPIE_ENVIRONMENT_VARIABLE_SCRIPT}
                    fi
                else
                    echo "export HADOOP_HOME=\"${HADOOP_HOME}\"" >> ${MAGPIE_ENVIRONMENT_VARIABLE_SCRIPT}
                    echo "export HADOOP_CONF_DIR=\"${HADOOP_CONF_DIR}\"" >> ${MAGPIE_ENVIRONMENT_VARIABLE_SCRIPT}
                    echo "export HADOOP_LOG_DIR=\"${HADOOP_LOG_DIR}\"" >> ${MAGPIE_ENVIRONMENT_VARIABLE_SCRIPT}
                    echo "export HADOOP_MASTER_NODE=\"${HADOOP_MASTER_NODE}\"" >> ${MAGPIE_ENVIRONMENT_VARIABLE_SCRIPT}
                    echo "export HADOOP_WORKER_COUNT=\"${HADOOP_WORKER_COUNT}\"" >> ${MAGPIE_ENVIRONMENT_VARIABLE_SCRIPT}
                    echo "export HADOOP_WORKER_CORE_COUNT=\"${HADOOP_WORKER_CORE_COUNT}\"" >> ${MAGPIE_ENVIRONMENT_VARIABLE_SCRIPT}
                    echo "export HADOOP_WORKER_COUNT=\"${HADOOP_WORKER_COUNT}\"" >> ${MAGPIE_ENVIRONMENT_VARIABLE_SCRIPT}
                    echo "export HADOOP_WORKER_CORE_COUNT=\"${HADOOP_WORKER_CORE_COUNT}\"" >> ${MAGPIE_ENVIRONMENT_VARIABLE_SCRIPT}
                    if Magpie_hadoop_filesystem_mode_is_hdfs_type
                    then
                        echo "export HADOOP_NAMENODE=\"${HADOOP_NAMENODE}\"" >> ${MAGPIE_ENVIRONMENT_VARIABLE_SCRIPT}
                        echo "export HADOOP_NAMENODE_PORT=\"${HADOOP_NAMENODE_PORT}\"" >> ${MAGPIE_ENVIRONMENT_VARIABLE_SCRIPT}
                    fi
                    echo "export HADOOP_VERSION=\"${HADOOP_VERSION}\"" >> ${MAGPIE_ENVIRONMENT_VARIABLE_SCRIPT}
                    if [ "${MAGPIE_NO_LOCAL_DIR}" == "yes" ]
                    then
                        echo "export HADOOP_CLIENT_OPTS=\"-Djava.io.tmpdir=${HADOOP_LOCAL_SCRATCHSPACE_DIR} -XX:-UsePerfData\"" >> ${MAGPIE_ENVIRONMENT_VARIABLE_SCRIPT}
                    fi
                fi
                echo "" >> ${MAGPIE_ENVIRONMENT_VARIABLE_SCRIPT}
            fi
        fi

        if [ ${MAGPIE_JOB_TYPE} != "setuponly" ] \
            && Magpie_hadoop_filesystem_mode_is_hdfs_type
        then
            if [ "${hdfs_was_setup}" == "1" ]
            then
                # Return 0 if service up, 1 if not
                Magpie_check_service_up "Hadoop" "__Magpie_run_check_hadoop_up"

                if [ $? -eq 0 ]
                then
                    magpie_run_hadoop_should_be_torndown=1
                    magpie_run_hadoop_setup_successful=1
                else
                    magpie_run_hadoop_should_be_torndown=1
                    magpie_run_hadoop_setup_successful=0
                    magpie_run_prior_startup_successful=false
                fi
            else
                magpie_run_hadoop_should_be_torndown=0
                magpie_run_hadoop_setup_successful=0
                magpie_run_prior_startup_successful=false
            fi
        else
            magpie_run_hadoop_should_be_torndown=1
            magpie_run_hadoop_setup_successful=1
        fi

        # Setup job history server after namenode comes up, it may need to
        # write/create in HDFS
        if [ ${MAGPIE_JOB_TYPE} != "setuponly" ] && [ "${magpie_run_hadoop_setup_successful}" == "1" ]
        then
            if Magpie_hadoop_setup_type_enables_yarn
            then
                if echo ${HADOOP_VERSION} | grep -q -E "3\.[0-9]\.[0-9]"
                then
                    ${hadoopcmdprefix}/mapred --config "${HADOOP_CONF_DIR}" --daemon start historyserver
                else
                    ${hadoopsetupscriptprefix}/mr-jobhistory-daemon.sh start historyserver
                fi
            fi
        fi

        # A number of applications assume the user home directory has been
        # created.  If it hasn't yet, lets create it for the user.
        if [ "${magpie_run_hadoop_setup_successful}" == "1" ] && [ ${MAGPIE_JOB_TYPE} != "setuponly" ]
        then
            if Magpie_hadoop_filesystem_mode_is_hdfs_type
            then
                ${hadoopcmdprefix}/hadoop fs -ls "/user/${USER}" >& /dev/null
                if [ $? -ne 0 ]
                then
                    echo "User home directory /user/${USER} not found, creating it"
                    ${hadoopcmdprefix}/hadoop fs -mkdir -p "/user/${USER}"
                fi
            fi
        fi
    else
        magpie_run_hadoop_should_be_torndown=0
        magpie_run_hadoop_setup_successful=1
    fi
}

Magpie_run_hadoop () {
    if [ "${HADOOP_JOB}" == "terasort" ]
    then
        echo "*******************************************************"
        echo "* Running Terasort"
        echo "*******************************************************"
        ${MAGPIE_SCRIPTS_HOME}/magpie/run/magpie-run-execute script ${MAGPIE_SCRIPTS_HOME}/magpie/job/magpie-job-hadoop-terasort &
        local scriptpid=$!
        Magpie_wait_script_sigusr2_on_job_timeout ${scriptpid}
    elif [ "${HADOOP_JOB}" == "upgradehdfs" ]
    then
        echo "*******************************************************"
        echo "* Entering Hadoop ${HADOOP_JOB} mode"
        echo "*******************************************************"
        ${MAGPIE_SCRIPTS_HOME}/magpie/run/magpie-run-execute scriptnokill ${MAGPIE_SCRIPTS_HOME}/magpie/job/magpie-job-hadoop-upgradehdfs &
        local scriptpid=$!
        Magpie_wait_script_sigusr2_on_job_timeout ${scriptpid}

        if [ $? -eq 0 ]
        then
            # Sets magpie_hdfs_stored_version_path
            Magpie_get_hdfs_stored_version_path

            if [ "${magpie_hdfs_stored_version_path}X" != "X" ]
            then
                rm -f ${magpie_hdfs_stored_version_path}
                echo "${HADOOP_VERSION}" > ${magpie_hdfs_stored_version_path}
            fi
        fi
    elif [ "${HADOOP_JOB}" == "decommissionhdfsnodes" ]
    then
        echo "*******************************************************"
        echo "* Entering Hadoop ${HADOOP_JOB} mode"
        echo "*******************************************************"
        ${MAGPIE_SCRIPTS_HOME}/magpie/run/magpie-run-execute scriptnokill ${MAGPIE_SCRIPTS_HOME}/magpie/job/magpie-job-hadoop-decommissionhdfsnodes &
        local scriptpid=$!
        Magpie_wait_script_sigusr2_on_job_timeout ${scriptpid}

        if [ $? -eq 0 ]
        then
            # Sets magpie_hdfs_stored_nodecount_path
            Magpie_get_hdfs_stored_nodecount_path

            rm -f ${magpie_hdfs_stored_nodecount_path}
            echo "${HADOOP_DECOMMISSION_HDFS_NODE_SIZE}" > ${magpie_hdfs_stored_nodecount_path}
        fi
    else
        Magpie_output_internal_error "HADOOP_JOB = ${HADOOP_JOB} not handled"
    fi
}

Magpie_run_stop_hadoop () {
    if [ "${HADOOP_SETUP}" == "yes" ] && [ "${magpie_run_hadoop_should_be_torndown}" == "1" ]
    then
        if [ ${MAGPIE_JOB_TYPE} != "setuponly" ]
        then
            cd ${HADOOP_HOME}

            echo "Stopping hadoop"

            # Make variables unspecified for shutdown
            Magpie_make_all_local_dirs_unspecified

            if Magpie_hadoop_setup_type_enables_yarn
            then
                ${hadoopsetupscriptprefix}/stop-yarn.sh
                if echo ${HADOOP_VERSION} | grep -q -E "3\.[0-9]\.[0-9]"
                then
                    ${hadoopcmdprefix}/mapred --config "${HADOOP_CONF_DIR}" --daemon stop historyserver
                else
                    ${hadoopsetupscriptprefix}/mr-jobhistory-daemon.sh stop historyserver
                fi
            fi

            # Make variables specific now within Magpie
            Magpie_make_all_local_dirs_node_specific

            if Magpie_hadoop_filesystem_mode_is_hdfs_type
            then
                if [ "${magpie_run_hadoop_setup_successful}" == "1" ]
                then
                    # If this is a one time run, don't care about
                    # shutting down hdfs cleanly.
                    if [ "${MAGPIE_ONE_TIME_RUN}" != "yes" ]
                    then
                        echo "Saving namespace before shutting down hdfs ..."

                        command="${hadoopcmdprefix}/${dfsadminscript} dfsadmin -safemode enter"
                        echo "Running $command" >&2
                        $command

                        command="${hadoopcmdprefix}/${dfsadminscript} dfsadmin -saveNamespace"
                        echo "Running $command" >&2
                        $command

                        command="${hadoopcmdprefix}/${dfsadminscript} dfsadmin -safemode leave"
                        echo "Running $command" >&2
                        $command
                    fi
                fi

                # Make variables unspecified for shutdown
                Magpie_make_all_local_dirs_unspecified

                ${hadoopsetupscriptprefix}/stop-dfs.sh

                # Make variables specific now within Magpie
                Magpie_make_all_local_dirs_node_specific

                # Sets magpie_networkedhdfspath & magpie_hdfs_in_use_path
                Magpie_get_hdfs_in_use_path

                if [ -f ${magpie_hdfs_in_use_path} ]
                then
                    local magpiejobid=`cat ${magpie_hdfs_in_use_path}`

                    if [ "${MAGPIE_JOB_ID}" == "${magpiejobid}" ]
                    then
                        rm -f ${magpie_hdfs_in_use_path}
                    fi
                fi
            fi
        fi
    fi

    magpie_run_hadoop_teardown_complete=1
}
