#!/bin/bash

source /home/shell/util.sh

function install () {

    requireJava

    install_filename=`curl -sL https://archive.apache.org/dist/hadoop/common/current/|grep compressed|grep -v site|grep -v aarch64|grep -v src.tar.gz|perl -pe 's/.*(hadoop.*.tar.gz).*/\1/g'`;
    download /opt/hadoop.tar.gz https://mirrors.bfsu.edu.cn/apache/hadoop/common/current/${install_filename}

    tar xf /opt/hadoop.tar.gz -C /opt/
    rm -rf /opt/hadoop.tar.gz
    mv /opt/hadoop-* /opt/hadoop
}

function config () {

    hostname=master

    read -p "web管理端口[8088]：" yarn_resourcemanager_webapp_address
    yarn_resourcemanager_webapp_address=${yarn_resourcemanager_webapp_address:-"8088"}
    read -p "Datanode管理端口[9870]：" dfs_namenode_secondary_http_address
    dfs_namenode_secondary_http_address=${dfs_namenode_secondary_http_address:-"9870"}

tee /etc/profile.d/hadoop.sh<<EOF
export HADOOP_HOME="/opt/hadoop/"
export PATH="\$PATH:\$HADOOP_HOME/bin:\$HADOOP_HOME/sbin"
EOF
source /etc/profile.d/hadoop.sh

tee /etc/hosts -a<<EOF
$host_addr $hostname
EOF

    sed -ri "s@# export JAVA_HOME=@export JAVA_HOME=/opt/jdk/@g" /opt/hadoop/etc/hadoop/hadoop-env.sh

    # hadoop.tmp.dir
    # fs.defaultFS
    # fs.default.name
    if [ ! -d "/opt/hadoop/hadoopdata" ]; then
        mkdir -p /opt/hadoop/hadoopdata
    fi

tee /tmp/hadoop.conf.tmp<<EOF
<property>
    <name>fs.defaultFS</name>
    <value>hdfs://$hostname:9000</value>
</property>
<property>
    <name>hadoop.tmp.dir</name>
    <value>/opt/hadoop/hadoopdata</value>
</property>
EOF

    sed -ri "/<configuration>/r /tmp/hadoop.conf.tmp" /opt/hadoop/etc/hadoop/core-site.xml

tee /tmp/hadoop.conf.tmp<<EOF
    <property>
        <!--数据块的冗余度，默认是3-->
        <!--一般来说，数据块冗余度跟数据节点的个数一致，最大不超过3-->
        <name>dfs.replication</name>
        <value>1</value>
    </property>
    <property>
        <name>dfs.namenode.secondary.http-address</name>
        <value>$hostname:$dfs_namenode_secondary_http_address</value>
    </property>
EOF
    # dfs.replication
    sed -ri "/<configuration>/r /tmp/hadoop.conf.tmp" /opt/hadoop/etc/hadoop/hdfs-site.xml

tee /tmp/hadoop.conf.tmp<<EOF
    <property>
        <name>yarn.nodemanager.aux-services</name>
        <value>mapreduce_shuffle</value>
    </property>
    <property>
        <name>yarn.resourcemanager.hostname</name>
        <value>$hostname</value>
    </property>
    <property>
        <name>yarn.resourcemanager.address</name>
        <value>$hostname:18040</value>
    </property>
    <property>
        <name>yarn.resourcemanager.scheduler.address</name>
        <value>$hostname:18030</value>
    </property>
    <property>
        <name>yarn.resourcemanager.resource-tracker.address</name>
        <value>$hostname:18025</value>
    </property>
    <property>
        <name>yarn.resourcemanager.admin.address</name>
        <value>$hostname:18141</value>
    </property>
    <property>
        <name>yarn.resourcemanager.webapp.address</name>
        <value>$hostname:${yarn_resourcemanager_webapp_address}</value>
    </property>
EOF
    # yarn.nodemanager.aux-services
    sed -ri "/<configuration>/r /tmp/hadoop.conf.tmp" /opt/hadoop/etc/hadoop/yarn-site.xml

tee /tmp/hadoop.conf.tmp<<EOF
<property>
    <name>mapreduce.framework.name</name>
    <value>yarn</value>
</property>
EOF
    # mapreduce.framework.name
    sed -ri "/<configuration>/r /tmp/hadoop.conf.tmp" /opt/hadoop/etc/hadoop/mapred-site.xml

tee /tmp/hadoop.conf.tmp<<EOF
HDFS_DATANODE_USER=root
HDFS_DATANODE_SECURE_USER=hdfs
HDFS_NAMENODE_USER=root
HDFS_SECONDARYNAMENODE_USER=root
EOF
    sed -ri "/\#\!\/usr\/bin\/env/r /tmp/hadoop.conf.tmp" /opt/hadoop/sbin/start-dfs.sh
    sed -ri "/\#\!\/usr\/bin\/env/r /tmp/hadoop.conf.tmp" /opt/hadoop/sbin/stop-dfs.sh

tee /tmp/hadoop.conf.tmp<<EOF
YARN_RESOURCEMANAGER_USER=root
HDFS_DATANODE_SECURE_USER=yarn
YARN_NODEMANAGER_USER=root
EOF
    sed -ri "/\#\!\/usr\/bin\/env/r /tmp/hadoop.conf.tmp" /opt/hadoop/sbin/start-yarn.sh
    sed -ri "/\#\!\/usr\/bin\/env/r /tmp/hadoop.conf.tmp" /opt/hadoop/sbin/stop-yarn.sh
    sed -ri "s/#   StrictHostKeyChecking ask/StrictHostKeyChecking no/g" /etc/ssh/ssh_config
    systemctl restart sshd
    systemctl restart ssh
    if [ ! -d ~/.ssh/ ]; then
        mkdir ~/.ssh/&&chmod 700 ~/.ssh/
    fi
    if [ ! -f "~/.ssh/authorized_keys" ]; then
        touch ~/.ssh/authorized_keys && chmod 600 ~/.ssh/authorized_keys
    fi
    if [ ! -f "~/.ssh/known_hosts" ]; then
        touch ~/.ssh/known_hosts
    fi
    if [ ! -f "~/.ssh/id_rsa.pub" ]; then
        ssh-keygen -t rsa -f ~/.ssh/id_rsa -P ""
        read -p "请输入本机root密码[123456]：" root_host_passwd
        root_host_passwd=${root_host_passwd:-"123456"}
        sshpass -p ${root_host_passwd} ssh-copy-id -i ~/.ssh/id_rsa.pub -o "StrictHostKeyChecking no" root@localhost
    fi

    firewall-addport ${yarn_resourcemanager_webapp_address}
    firewall-addport ${dfs_namenode_secondary_http_address}
}

function run () {
    hdfs namenode -format
    start-all.sh
    
    printInfo "============================hadoop============================"
    printInfo "resourcemanager 控制台：http://$host_addr:${yarn_resourcemanager_webapp_address}"
    printInfo "Datanode        控制台：http://$host_addr:${dfs_namenode_secondary_http_address}"
    printInfo "============================hadoop============================"
}



install
config
run