#!/bin/env bash

usr=root
password=root
hadoop_version="3.1.4"
hadoop_home=/data/hadoop
hadoop_config_path=/data/hadoop/hadoop/etc/hadoop
namenode="192.168.1.50"
secondarynamenode="192.168.1.51"
cluster="192.168.1.99 192.168.1.50 192.168.1.51"

#check expect
rpm -q expect &>/dev/null
if [ $? -ne 0 ]; then
	sudo yum -y install expect
fi

rpm -q ntpdate &>/dev/null
if [ $? -ne 0 ]; then
	sudo yum -y install ntpdate
    ntpdate ntp4.aliyun.com
fi

#check pub key
if [ ! -f "${HOME}"/.ssh/id_rsa ]; then
	ssh-keygen -P "" -f "${HOME}"/.ssh/id_rsa
	echo "rsa key is ok"
fi


ssh_copy_id(){    
for ip in $1; do
    {
        ping -c1 -w1 "${ip}" &>/dev/null
        if [ $? -eq 0 ];then
            echo "${ip} is pong"
						/usr/bin/expect<<-EOF
						set timeout 10
						spawn ssh-copy-id -i ${HOME}/.ssh/id_rsa.pub $usr@$ip
						expect {
								"yes/no" { send "yes\r"; exp_continue }
								"password:" { send "$password\r" }
						}
						expect eof
						EOF
        fi
    }
done
wait
}



mk_hadoop_dir(){
    if [ ! -d "${hadoop_home}" ]; then
        echo "make hadoop dirs"
        mkdir -p ${hadoop_home}
        mkdir -p ${hadoop_home}/data
        mkdir -p ${hadoop_home}/socket
    fi
}

scp_hadoop_tar_file(){
    for ip in $cluster; do
        echo "scp file to ${ip}"
        scp ${hadoop_home}/hadoop*.tar.gz root@"${ip}":${hadoop_home}
    done
}

alias_hadoop(){
    if [ ! -d "${hadoop_home}/hadoop" ]; then
        echo "alias hadoop file dir"
        #ln -s /data/hadoop/hadoop-3.1.4 /data/hadoop/hadoop
        ln -s ${hadoop_home}/hadoop-${hadoop_version} ${hadoop_home}/hadoop
    fi
}


setup_hadoop_file(){
    if [ "" != "$1" ]; then
        ssh_copy_id "$1"
    fi
    mk_hadoop_dir
    if [ "" != "$1" ]; then
        scp_hadoop_tar_file "$1"
    fi
}



backup_hadoop_etc_file(){
    #\rm ${hadoop_config_path}/*.bak
    if [ ! -f "${hadoop_config_path}/*.bak" ]; then
        echo "bak hadoop config file"
        cp "${hadoop_config_path}"/hadoop-env.sh "${hadoop_config_path}"/hadoop-env.sh.bak
        cp "${hadoop_config_path}"/core-site.xml "${hadoop_config_path}"/core-site.xml.bak
        cp "${hadoop_config_path}"/hdfs-site.xml "${hadoop_config_path}"/hdfs-site.xml.bak
        cp "${hadoop_config_path}"/mapred-site.xml "${hadoop_config_path}"/mapred-site.xml.bak
        cp "${hadoop_config_path}"/yarn-site.xml "${hadoop_config_path}"/yarn-site.xml.bak
        cp "${hadoop_config_path}"/kms-site.xml "${hadoop_config_path}"/kms-site.xml.bak
        cp "${hadoop_config_path}"/kms-env.sh "${hadoop_config_path}"/kms-env.sh.bak 
    fi
}

rewrite_hadoop_env_sh(){
    echo "hadoop_env_sh"
    cat << EOF >"${hadoop_config_path}"/hadoop-env.sh
export JAVA_HOME=${JAVA_HOME}
export HADOOP_OS_TYPE=${HADOOP_OS_TYPE:-$(uname -s)}
export HDFS_NAMENODE_USER=${usr}
export HDFS_DATANODE_USER=${usr}
export HDFS_SECONDARYNAMENODE_USER=${usr}
export HDFS_RESOURCEMANAGER_USER=${usr}
export HDFS_NODEMANAGER_USER=${usr}
export YARN_RESOURCEMANAGER_USER=${usr}
export YARN_NODEMANAGER_USER=${usr}
EOF
}

rewrite_core_site(){
    echo "core_site"
    cat << EOF >"${hadoop_config_path}"/core-site.xml
<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
    <property>
        <name>fs.defaultFS</name>
        <value>hdfs://${namenode}:8020</value>
    </property>
    <property>
        <name>hadoop.tmp.dir</name>
        <value>${hadoop_home}/data</value>
    </property>
    <property>
        <name>hadoop.http.staticuser.user</name>
        <value>root</value>
    </property>
    <property>
        <name>fs.trash.interval</name>
        <value>1440</value>
        <description>1440 minutes equals one day</description>
    </property>
    <property>
        <name>fs.trash.checkpoint.interval</name>
        <value>0</value>
        <description>this value set 0 is equals the same of fs.trash.interval field that it will be one day</description>
    </property>
    <property>
        <name>hadoop.security.key.provider.path</name>
        <value>kms://http@${namenode}:16000/kms</value>
        <description>
            The KeyProvider to use when interacting with encryption keys used
            when reading and writing to an encryption zone.
        </description>
    </property>
</configuration>
EOF
}

hadoop_checknative(){
    hadoop checknative
}

rewrite_hdfs_site(){
    echo "hdfs_site"
    cat << EOF >"${hadoop_config_path}"/hdfs-site.xml
<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
    <property>
        <name>dfs.namenode.secondary.http-address</name>
        <value>hdfs://${secondarynamenode}:9868</value>
    </property>
    <property>
        <name>dfs.client.read.shortcircuit</name>
        <value>true</value>
    </property>
    <property>
        <name>dfs.domain.socket.path</name>
        <value>${hadoop_home}/socket/dn_socket</value>
    </property>
    <property>
        <name>dfs.hosts.exclude</name>
        <value>${hadoop_config_path}/excludes</value>
    </property>
    <property>
        <name>hadoop.kms.key.provider.uri</name>
        <value>kms://http@${namenode}:16000/kms</value>
    </property>
</configuration>
EOF
}

rewrite_mapred_site(){
    echo "mapred_site"
    cat << EOF >"${hadoop_config_path}"/mapred-site.xml
<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
    <property>
        <name>mapreduce.framework.name</name>
        <value>yarn</value>
    </property>
    <property>
        <name>yarn.app.mapreduce.am.env</name>
        <value>HADOOP_MAPRED_HOME=\${HADOOP_HOME}</value>
    </property>
    <property>
        <name>mapreduce.map.env</name>
        <value>HADOOP_MAPRED_HOME=\${HADOOP_HOME}</value>
    </property>
    <property>
        <name>mapreduce.reduce.env</name>
        <value>HADOOP_MAPRED_HOME=\${HADOOP_HOME}</value>
    </property>
    <property>
        <name>mapreduce.jobhistory.address</name>
        <value>${namenode}:10020</value>
    </property>
    <property>
        <name>mapreduce.jobhistory.webapp.address</name>
        <value>${namenode}:19888</value>
    </property>
</configuration>
EOF
}

rewrite_yarn_site(){
    echo "yarn_site"
    cat << EOF >"${hadoop_config_path}"/yarn-site.xml
<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
    <property>
        <name>yarn.resourcemanager.hostname</name>
        <value>${namenode}</value>
    </property>
    <property>
        <name>yarn.nodemanager.aux-services</name>
        <value>mapreduce_shuffle</value>
    </property>
    <property>
        <name>yarn.scheduler.minimum-allocation-mb</name>
        <value>512</value>
    </property>
    <property>
        <name>yarn.scheduler.maximum-allocation-mb</name>
        <value>2048</value>
    </property>
    <property>
        <name>yarn.nodemanager.vmem-pmem-ratio</name>
        <value>4</value>
    </property>
    <property>
        <name>yarn.log-aggregation-enable</name>
        <value>true</value>
    </property>
    <property>
        <name>yarn.log.server.url</name>
        <value>http://${namenode}:19888/jobhistory/logs</value>
    </property>
    <property>
        <name>yarn.log-aggregation.retain-seconds</name>
        <value>604800</value>
    </property>
    <property>
        <name>yarn.timeline-service.enabled</name>
        <value>true</value>
    </property>
    <property>
        <name>yarn.resourcemanager.system-metrics-publisher.enabled</name>
        <value>true</value>
    </property>
    <property>
        <name>yarn.timeline-service.generic-application-history.enabled</name>
        <value>true</value>
    </property>
    <property>
        <name>yarn.timeline-service.hostname</name>
        <value>${namenode}</value>
    </property>
    <property>
        <name>yarn.timeline-service.http-cross-origin.enabled</name>
        <value>true</value>
    </property>
    <property>
        <name>yarn.resourcemanager.scheduler.client.thread-count</name>
        <value>50</value>
        <description>default is 50 , if need ,can turn it bigger</description>
    </property>
    <property>
        <name>yarn.nodemanager.resource.detect-hardware-capabilities</name>
        <value>true</value>
    </property>
    <property>
        <name>yarn.nodemanager.resource.memory-mb</name>
        <value>-1</value>
    </property>
    <property>
        <name>yarn.nodemanager.resource.cpu-vcores</name>
        <value>-1</value>
    </property>
    <property>
        <name>yarn.nodemanager.pmem-check-enabled</name>
        <value>false</value>
    </property>
    <property>
        <name>yarn.nodemanager.vmem-check-enabled</name>
        <value>false</value>
    </property>
</configuration>
EOF
}


rewrite_kms_site(){
    echo "kms_site"
    cat << EOF >"${hadoop_config_path}"/kms-site.xml
<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
    <property>
        <name>hadoop.kms.key.provider.uri</name>
        <value>jceks://file@/${HOME}/kms.jks</value>
    </property>
    <property>
        <name>hadoop.security.keystore.java-keystore-provider.password-file</name>
        <value>kms.keystore.password</value>
    </property>
    <property>
        <name>hadoop.kms.key.provider.uri</name>
        <value>kms://http@${namenode}:16000/kms</value>
    </property>
    <property>
        <name>hadoop.kms.authentication.type</name>
        <value>simple</value>
    </property>
</configuration>
EOF
}

write_kms_keysotre_pwd(){
    echo 123456 > "${hadoop_config_path}"/kms.keystore.password
}

rewrite_kms_envsh(){
    echo "kms_envsh"
    cat << EOF >"${hadoop_config_path}"/kms-env.sh
export KMS_HOME=${hadoop_home}/hadoop
export KMS_LOG=${hadoop_home}/hadoop/logs/kms
export KMS_HTTP_PORT=16000
export KMS_ADMIN_PORT=16001
EOF
}


add_workers(){
    echo "add_workers"
    echo -n "" > "${hadoop_config_path}"/workers
    for ip in $cluster; do
        echo "${ip}" >> ${hadoop_config_path}/workers
    done
}

profile_path=/etc/profile

export_hadoop_env(){
    if [ "" == "$(cat ${profile_path} | grep HADOOP)" ]; then
    echo "export hadoop env"
    {
        echo "#HADOOP"
        echo "export HADOOP_HOME=${hadoop_home}/hadoop"
        echo "export HADOOP_CONF_DIR=${hadoop_config_path}"
        echo "export PATH=\${HADOOP_HOME}/bin:\${HADOOP_HOME}/sbin:\$PATH"
    } >> ${profile_path}
    echo "please souce /etc/profile"
    fi
    
}

#如果多次format,清除所有机器hadoop.tmp.dir下所有文件
format_hadoop_filesystem(){
    echo "hdfs format"
    # hdfs namenode -format &> /data/hadoop/hadoop-format.log
    # hdfs --daemon start namenode hdfs --daemon stop namenode
    # hdfs --daemon start datanode hdfs --daemon stop datanode
    # hdfs --daemon start secondarynamenode hdfs --daemon stop secondarynamenode 
    # \rm  -r /data/hadoop/data/*
    hdfs namenode -format &> ${hadoop_home}/hadoop-format.log
}

start_history_server(){
    mapred --daemon start historyserver
    yarn --daemon start timelineserver
}

setup(){
    export_hadoop_env
    alias_hadoop
    backup_hadoop_etc_file
    rewrite_hadoop_env_sh
    rewrite_core_site
    rewrite_hdfs_site
    rewrite_mapred_site
    rewrite_yarn_site
    add_workers
    hadoop_checknative
    rewrite_kms_site
    write_kms_keysotre_pwd
    rewrite_kms_envsh
}

usage() {
    echo "useage:"
    echo "-h ssh-copy-id & scp tar file"
    echo "-s setup"
    exit 0
}

while getopts 'hf:s' OPT;do
    case $OPT in
        f) setup_hadoop_file "$OPTARG";;
        s) setup;;
        h) usage;;
        ?) usage;;
    esac
done


# 启动hadoop
# hdfs --daemon start namenode hdfs --daemon stop namenode
# hdfs --daemon start datanode hdfs --daemon stop datanode
# hdfs --daemon start secondarynamenode hdfs --daemon stop secondarynamenode

# 启动yarn
# ${hadoop_home}/start-yarn.sh 

# 启动history server 
# mapred --daemon start historyserver
# 启动
# yarn --daemon start timelineserver
