#!/usr/bin/bash
set -e

# 获取安装版本
echo -n "install hadoop version(3.3.6): "
read version

download_path="https://mirrors.tuna.tsinghua.edu.cn/apache/hadoop/core/hadoop-${version}/hadoop-${version}.tar.gz"
binary_name=`basename ${download_path}`

package_dir="/srv/package/"
install_dir="/srv/software/"
custom_env_file="/etc/profile.d/custom_env.sh"

# 创建存放目录，然后下载、安装、改名
mkdir -p ${package_dir} ${install_dir}
wget ${download_path} -P ${package_dir}
tar xf ${package_dir}${binary_name} -C ${install_dir}

# 添加环境变量
software_name=$(ls -l ${install_dir} |grep hadoop |grep ${version} |awk '{print $9}')
software_home=${install_dir}${software_name}
cat >> ${custom_env_file} <<-EOF
export HADOOP_HOME=${software_home}
export HADOOP_CONF_DIR=\$HADOOP_HOME/etc/hadoop
export HADOOP_CLASSPATH=`hadoop classpath`
export PATH=\$HADOOP_HOME/bin:\$HADOOP_HOME/sbin:\$PATH
EOF
source /etc/profile

# 配置 hadoop-env.sh 参数
cat >> ${software_home}/etc/hadoop/hadoop-env.sh <<-EOF
export JAVA_HOME=${JAVA_HOME}
export HDFS_NAMENODE_USER=root
export HDFS_DATANODE_USER=root
export HDFS_SECONDARYNAMENODE_USER=root
export YARN_RESOURCEMANAGER_USER=root
export YARN_NODEMANAGER_USER=root
EOF

# 配置 core-site.xml 参数
cat > ${software_home}/etc/hadoop/core-site.xml <<-EOF
<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
    <property>
        <name>fs.defaultFS</name>
        <value>hdfs://node1:8020</value>
    </property>
    <!-- hadoop本地数据存储目录 format时自动生成 -->
    <property>
        <name>hadoop.tmp.dir</name>
        <value>${software_home}/data</value>
    </property>
    <!-- 在Web UI访问HDFS使用的用户名。-->
    <property>
        <name>hadoop.http.staticuser.user</name>
        <value>root</value>
    </property>
    <!-- 整合hive，使其可以用root来访问 -->
    <property>
        <name>hadoop.proxyuser.root.hosts</name>
        <value>*</value>
    </property>
    <property>
        <name>hadoop.proxyuser.root.groups</name>
        <value>*</value>
    </property>
    <!-- 文件系统垃圾桶保存时间 -->
    <property>
        <name>fs.trash.interval</name>
        <value>1440</value>
    </property>
</configuration>
EOF

# 配置 hdfs-site.xml 参数
cat > ${software_home}/etc/hadoop/hdfs-site.xml <<-EOF
<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
    <!-- 设定SNN运行主机和端口 -->
    <property>
        <name>dfs.namenode.secondary.http-address</name>
        <value>node2:9868</value>
    </property>
</configuration>
EOF

# 配置 mapred-site.xml 参数
cat > ${software_home}/etc/hadoop/mapred-site.xml <<-'EOF'
<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
    <!-- mr程序默认运行方式。yarn集群模式 local本地模式 -->
    <property>
      <name>mapreduce.framework.name</name>
      <value>yarn</value>
    </property>
    <!-- 历史服务器端地址 -->
    <property>
      <name>mapreduce.jobhistory.address</name>
      <value>node1:10020</value>
    </property>
    <!-- 历史服务器web端地址 -->
    <property>
      <name>mapreduce.jobhistory.webapp.address</name>
      <value>node1:19888</value>
    </property>
    <!-- MR App Master环境变量 -->
    <property>
      <name>yarn.app.mapreduce.am.env</name>
      <value>HADOOP_MAPRED_HOME=${HADOOP_HOME}</value>
    </property>
    <!-- MR MapTask环境变量 -->
    <property>
      <name>mapreduce.map.env</name>
      <value>HADOOP_MAPRED_HOME=${HADOOP_HOME}</value>
    </property>
    <!-- MR ReduceTask环境变量 -->
    <property>
      <name>mapreduce.reduce.env</name>
      <value>HADOOP_MAPRED_HOME=${HADOOP_HOME}</value>
    </property>
</configuration>
EOF

# 配置 yarn-site.xml 参数
cat > ${software_home}/etc/hadoop/yarn-site.xml <<-EOF
<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
    <!-- yarn集群主角色RM运行机器。-->
    <property>
        <name>yarn.resourcemanager.hostname</name>
        <value>node1</value>
    </property>
    <!-- NodeManager上运行的附属服务。需配置成mapreduce_shuffle,才可运行MR程序。-->
    <property>
        <name>yarn.nodemanager.aux-services</name>
        <value>mapreduce_shuffle</value>
    </property>
    <!-- 是否将对容器实施物理内存限制 -->
    <property>
        <name>yarn.nodemanager.pmem-check-enabled</name>
        <value>false</value>
    </property>
    <!-- 是否将对容器实施虚拟内存限制。 -->
    <property>
        <name>yarn.nodemanager.vmem-check-enabled</name>
        <value>false</value>
    </property>
    <!-- 开启日志聚集 -->
    <property>
      <name>yarn.log-aggregation-enable</name>
      <value>true</value>
    </property>
    <!-- 设置yarn历史服务器地址 -->
    <property>
        <name>yarn.log.server.url</name>
        <value>http://node1:19888/jobhistory/logs</value>
    </property>
    <!-- 保存的时间7天 -->
    <property>
      <name>yarn.log-aggregation.retain-seconds</name>
      <value>604800</value>
    </property>
</configuration>
EOF

cat > ${software_home}/etc/hadoop/workers <<-EOF
node1
node2
node3
EOF

# 分发配置，并修改 myid
scp -r ${software_home} root@node2:${install_dir}
scp -r ${software_home} root@node3:${install_dir}

scp ${custom_env_file} node2:${custom_env_file}
scp ${custom_env_file} node3:${custom_env_file}

# 安装成功提示
echo "Successfully installed hadoop..."