#!/bin/bash

#获取namenode节点，默认/etc/hosts文件第一个主机名
namenode_host=$(cat /etc/hosts | grep -v "^$"|awk 'NR==3{print $2}')
#获取secondary_host节点，默认/etc/hosts文件第三个主机名
secondary_host=$(cat /etc/hosts | grep -v "#" | awk 'NR==5{print $2}')
#获取resourceMangerhosts]
resourcemanager_host=$(cat /etc/hosts | grep -v "#" | awk 'NR==4{print $2}')

source /etc/profile

#HDFS集群配置 

#修该hadooP_Env
echo "export JAVA_HOME=$JAVA_HOME" >> ${HADOOP_HOME}/etc/hadoop/hadoop-env.sh

#2. 指定NameNode节点以及数据存储目录（修改core-site.xml）
core_text=$(cat << EOF
    <property>
        <name>hadoop.tmp.dir</name>
        <value>${HADOOP_HOME}/data</value>
        <description>Abase for other temporary directories.</description>
    </property>

    <property>
        <name>fs.default.name</name>
        <value>hdfs://$namenode_host:9000</value>
    </property>
      <property>
      <name>hadoop.http.staticuser.user</name>
      <value>root</value>
    </property>
    <property>
        <name>hadoop.proxyuser.root.hosts</name>
        <value>*</value>
    </property>
    <property>
        <name>hadoop.proxyuser.root.groups</name>
        <value>*</value>
    </property>
EOF
)
#3. 指定SecondaryNameNode节点（修改hdfs-site.xml）
hdfs_text=$(cat << EOF
    <property>
        <name>dfs.namenode.name.dir</name>
        <value>file:$HADOOP_HOME/dfs/name</value>
    </property>
    <property>
        <name>dfs.datanode.data.dir</name>
        <value>file:$HADOOP_HOME/dfs/data</value>
    </property>
    <property>
        <name>dfs.replication</name>
        <value>2</value>
    </property>
    <property>
        <name>dfs.namenode.checkpoint.dir</name>
        <value>file:$HADOOP_HOME/dfs/namesecondary</value>
    </property>
    <property>
        <name>dfs.namenode.secondary.http-address</name>
        <value>$secondary_host:9868</value>
    </property>
EOF
)
#4.mapreduce集群配置
mapre_text=$(cat << EOF
    <property>
        <name>mapreduce.framework.name</name>
        <value>yarn</value>
    </property>
    <property>
        <name>mapreduce.jobhistory.address</name>
        <value>$namenode_host:10020</value>
    </property>
    <property>
        <name>mapreduce.jobhistory.webapp.address</name>
        <value>$namenode_host:19888</value>
    </property>

EOF
)
#5.yarn集群配置
yarn_text=$(cat << EOF
    <property>
        <!--Reducer获取数据的方式【必须配置】-->
        <name>yarn.nodemanager.aux-services</name>
        <value>mapreduce_shuffle</value>
    </property>
    <property>
        <!--ResourceManager主机名，配置后其他的address就不用配置了，除非需要自定义端口【必须配置】-->
        <name>yarn.resourcemanager.hostname</name>
        <value>$resourcemanager_host</value>
    </property>
    <property>
        <!--NodeManager节点的内存大小，单位为MB【必须配置】-->
        <name>yarn.nodemanager.resource.memory-mb</name>
        <value>2048</value>
    </property>
    <property>
        <!--NodeManager节点硬件的自动探测，主要为修正CPU个数，开启后不影响前面内存的配置-->
        <name>yarn.nodemanager.resource.detect-hardware-capabilities</name>
        <value>true</value>
    </property>
    <!-- 日志聚集功能【暂时不需要配置】 -->
    <property>
        <name>yarn.log-aggregation-enable</name>
        <value>true</value>
    </property>
    <!-- 日志保留时间设置7天 【暂时不需要配置】-->
    <property>
        <name>yarn.log-aggregation.retain-seconds</name>
        <value>604800</value>
    </property>
    <property>
            <name>yarn.nodemanager.env-whitelist</name>
            <value>JAVA_HOME,HADOOP_COMMON_HOME,HADOOP_HDFS_HOME,HADOOP_CONF_DIR,CLASSPATH_PREPEND_DISTCACHE,HADOOP_YARN_HOME,HADOOP_MAPRED_HOME</value>
    </property>
EOF
)
#6.workers
hosts=$(awk 'NR>=3{print $2}' /etc/hosts | grep -oE '^[^#][^[:space:]]+')

if [ -s "${HADOOP_HOME}/etc/hadoop/workers" ]; then
    echo "File is not empty, clearing contents..."
    > "${HADOOP_HOME}/etc/hadoop/workers"
else
    echo "File is empty"
fi
for i in "${hosts[@]}"
do
    echo "$i" >> "${HADOOP_HOME}/etc/hadoop/workers"
done

HADOOP_HDFS=$(cat << EOF
##Hadoop User
HDFS_DATANODE_USER=root
HADOOP_SECURE_DN_USER=hdfs
HDFS_NAMENODE_USER=root
HDFS_SECONDARYNAMENODE_USER=root
EOF
)

HADOOP_YARN=$(cat << EOF
YARN_RESOURCEMANAGER_USER=root
HADOOP_SECURE_DN_USER=yarn
YARN_NODEMANAGER_USER=root
EOF
)

#逐行插入core-site.xml
echo "$core_text" | while IFS= read -r line; do
    sed -i -e "/<\/configuration>/i\\$line" "${HADOOP_HOME}/etc/hadoop/core-site.xml"
done

#逐行插入hdfs-site.xml
echo "$hdfs_text" | while IFS= read -r line; do
    sed -i -e "/<\/configuration>/i\\$line" "${HADOOP_HOME}/etc/hadoop/hdfs-site.xml"
done

#yarn-site.xml
echo "$yarn_text" | while IFS= read -r line; do
    sed -i -e "/<\/configuration>/i\\$line" "${HADOOP_HOME}/etc/hadoop/yarn-site.xml"
done

#mapred-site.xml
echo "$mapre_text" | while IFS= read -r line; do
    sed -i -e "/<\/configuration>/i\\$line" "${HADOOP_HOME}/etc/hadoop/mapred-site.xml"
done

#start-dfs.sh
echo "$HADOOP_HDFS" | while IFS= read -r line; do
    sed -i -e "1a\\$line" "${HADOOP_HOME}/sbin/start-dfs.sh"
    sed -i -e "1a\\$line" "${HADOOP_HOME}/sbin/stop-dfs.sh"
done


#start-yarn.sh
echo "$HADOOP_YARN" | while IFS= read -r line; do
    sed -i -e "1a\\$line" "${HADOOP_HOME}/sbin/start-yarn.sh"
    sed -i -e "1a\\$line" "${HADOOP_HOME}/sbin/stop-yarn.sh"
done

echo "hadoop配置文件已经加载完成"

