echo "开始运行！"
if [ -e /opt/module/hadoop ]; then
   rm -rf /opt/module/hadoop
   echo "/opt/module/hadoop,已删除"
fi
sudo mkdir -p /opt/module/hadoop
echo "/opt/module/hadoop,创建目录成功！"
hadoop_tar=`find /opt/upload/software -name hadoop-3.4.0*`
if [[ $hadoop_tar == *"hadoop-3.4.0"* ]]; then
    echo "hadoop_tar = $hadoop_tar"
else
    echo "hadoop_tar，值为空,退出安装"
    exit 1
fi
tar -xzvf $jdk_tar -C /opt/module/hadoop
cd /opt/module/hadoop
hadoop_install=`find /opt/module/hadoop -name hadoop-3.4.0*`
if [[ $hadoop_install == *"hadoop-3.4.0"* ]]; then
    echo "hadoop_install = $hadoop_install"
else
    echo "hadoop_install，值为空,退出安装"
    exit 1
fi
echo "修改文件权限,755方便其他用户有权限执行"
sudo chmod -R 755 /opt/module/hadoop
echo "export JAVA_HOME=$JAVA_HOME" >> $HADOOP_HOME/etc/hadoop/hadoop-env.sh
sudo echo "#环境变量" > /etc/profile.d/hadoop_env.sh
sudo echo "export HADOOP_HOME=$hadoop_install" >> /etc/profile.d/hadoop_env.sh
sudo echo "export PATH=\$PATH:\$HADOOP_HOME/bin" >> /etc/profile.d/hadoop_env.sh
sudo echo "export PATH=\$PATH:\$HADOOP_HOME/sbin" >> /etc/profile.d/hadoop_env.sh
#环境变量生效
cat /etc/profile.d/hadoop_env.sh
source /etc/profile
if [ -e /opt/module/hadoop/tmp ]; then
   rm -rf /opt/module/hadoop/tmp
   echo "/opt/module/hadoop/tmp,已删除"
fi
sudo mkdir -p /opt/module/hadoop/tmp
echo "/opt/module/hadoop/tmp,创建目录成功！"
cat > $HADOOP_HOME/etc/hadoop/core-site.xml << EOF
<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
    <!-- 集群服务名 -->
    <property>
        <name>fs.defaultFS</name>
        <value>hdfs://cms</value>
    </property>
    <!-- Hadoop临时目录 -->
    <property>
        <name>hadoop.tmp.dir</name>
        <value>/opt/module/hadoop/tmp</value>
    </property>
    <!-- Zookeeper地址 -->
    <property>
        <name>ha.zookeeper.quorum</name>
        <value>hadoop3:2181,hadoop4:2181,hadoop5:2181</value>
    </property>
</configuration>
EOF
if [ -e /opt/module/hadoop/hdfs/data ]; then
   rm -rf /opt/module/hadoop/hdfs/data
   echo "/opt/module/hadoop/hdfs/data,已删除"
fi
sudo mkdir -p /opt/module/hadoop/hdfs/data
echo "/opt/module/hadoop/hdfs/data,创建目录成功！"
cat > $HADOOP_HOME/etc/hadoop/hdfs-site.xml << EOF
<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
    <!-- 集群服务名 -->
    <property>
        <name>dfs.nameservices</name>
        <value>cms</value>
    </property>
    <!-- 自动故障转移 -->
    <property>
        <name>dfs.ha.automatic-failover.enabled</name>
        <value>true</value>
    </property>
    <!-- 故障切换代理类 -->
    <property>
        <name>dfs.client.failover.proxy.provider.cms</name>
        <value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
    </property>
    <!-- hadoop1,NameNode的HTTP地址 -->
    <property>
        <name>dfs.namenode.http-address.cms.hadoop1</name>
        <value>hadoop1:9870</value>
    </property>
    <!-- hadoop1,NameNode的RPC地址 -->
    <property>
        <name>dfs.namenode.rpc-address.cms.hadoop1</name>
        <value>hadoop1:9000</value>
    </property>
    <!-- hadoop1,NameNode的HTTPS地址 -->
    <property>
        <name>dfs.namenode.https-address.cms.hadoop1</name>
        <value>hadoop1:50470</value>
    </property>
    <!-- hadoop2,NameNode的HTTP地址 -->
    <property>
        <name>dfs.namenode.http-address.cms.hadoop2</name>
        <value>hadoop2:9870</value>
    </property>
    <!-- hadoop2,NameNode的RPC地址 -->
    <property>
        <name>dfs.namenode.rpc-address.cms.hadoop2</name>
        <value>hadoop2:9000</value>
    </property>
    <!-- hadoop2,NameNode的HTTPS地址 -->
    <property>
        <name>dfs.namenode.https-address.cms.hadoop2</name>
        <value>hadoop2:50470</value>
    </property>
    <!-- NameNode节点列表 -->
    <property>
        <name>dfs.ha.namenodes.cms</name>
        <value>hadoop1,hadoop2</value>
    </property>
    <!-- JournalNode 集群地址 -->
    <property>
        <name>dfs.namenode.shared.edits.dir</name>
        <value>qjournal://hadoop1:8485,hadoop2:8485,hadoop3:8485/cms</value>
    </property>
    <!-- 副本数 -->
    <property>
        <name>dfs.replication</name>
        <value>3</value>
    </property>
    <!-- DataNode 存储的位置 -->
    <property>
        <name>dfs.datanode.data.dir</name>
        <value>/opt/module/hadoop/hdfs/data</value>
    </property>
</configuration>
EOF
cat > $HADOOP_HOME/etc/hadoop/mapred-site.xml << EOF
<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
    <!-- 指定 MapReduce 运行框架 -->
    <property>
        <name>mapreduce.framework.name</name>
        <value>yarn</value>
    </property>
    <!-- 允许作业历史服务器在重启后恢复未完成的作业记录 -->
    <property>
        <name>mapreduce.jobhistory.recovery.enable</name>
        <value>true</value>
    </property>
    <!-- 使用JournalNode存储作业历史元数据，支持HA -->
    <property>
        <name>mapreduce.jobhistory.store.class</name>
        <value>org.apache.hadoop.mapreduce.jobhistory.JournalBasedStore</value>
    </property>
    <!-- JournalNode节点列表（与HDFS HA共享），用于存储作业历史元数据 -->
    <property>
        <name>mapreduce.jobhistory.journal.nodes</name>
        <value>hadoop1:8485,hadoop2:8485,hadoop3:8485</value>
    </property>
    <!-- YARN资源管理器的逻辑服务名，由ZooKeeper管理HA -->
    <property>
        <name>yarn.resourcemanager.address</name>
        <value>cms</value>
    </property>
    <!-- JobHistory Server RPC 地址 -->
    <property>
        <name>mapreduce.jobhistory.address</name>
        <value>hadoop4:10020</value>
    </property>
    <!-- Web UI 访问地址 -->
    <property>
        <name>mapreduce.jobhistory.webapp.address</name>
        <value>hadoop4:19888</value>
    </property>
    <!-- 启用 Map 输出压缩（减少网络传输） -->
    <property>
        <name>mapreduce.map.output.compress</name>
        <value>true</value>
    </property>
    <!-- 指定压缩算法 -->
    <property>
        <name>mapreduce.map.output.compress.codec</name>
        <value>org.apache.hadoop.io.compress.SnappyCodec</value>
    </property>
</configuration>
EOF
cat > $HADOOP_HOME/etc/hadoop/yarn-site.xml << EOF
<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
    <!-- 启用 RM HA 模式 -->
    <property>
        <name>yarn.resourcemanager.ha.enabled</name>
        <value>true</value>
    </property>
    <!-- YARN集群的唯一标识，用于区分不同集群 -->
    <property>
        <name>yarn.resourcemanager.cluster-id</name>
        <value>cms</value>
    </property>
    <!-- 配置各 RM 主机地址rm1 -->
    <property>
        <name>yarn.resourcemanager.hostname.rm1</name>
        <value>hadoop1</value>
    </property>
    <!-- 注册和发送心跳的 RPC 服务地址rm1 -->
    <property>
        <name>yarn.resourcemanager.resource-tracker.address.rm1</name>
        <value>hadoop1:8031</value>
    </property>
    <!-- 客户端提交作业的 RPC 地址rm1 -->
    <property>
        <name>yarn.resourcemanager.address.rm1</name>
        <value>hadoop1:8032</value>
    </property>
    <!-- Resource Manager 调度器地址rm1 -->
    <property>
        <name>yarn.resourcemanager.scheduler.address.rm1</name>
        <value>hadoop1:8030</value>
    </property>
    <!-- 管理员与 ResourceManager 通信的 RPC 地址rm1 -->
    <property>
        <name>yarn.resourcemanager.admin.address.rm1</name>
        <value>hadoop1:8033</value>
    </property>
    <!-- ResourceManager Web UI 地址rm1 -->
    <property>
        <name>yarn.resourcemanager.webapp.https.address.rm1</name>
        <value>hadoop1:8090</value>
    </property>
    <!-- ResourceManager Web UI 地址rm1 -->
    <property>
        <name>yarn.resourcemanager.webapp.address.rm1</name>
        <value>hadoop1:8080</value>
    </property>
    <!-- 配置各 RM 主机地址rm2 -->
    <property>
        <name>yarn.resourcemanager.hostname.rm2</name>
        <value>hadoop2</value>
    </property>
    <!-- 注册和发送心跳的 RPC 服务地址rm2 -->
    <property>
        <name>yarn.resourcemanager.resource-tracker.address.rm2</name>
        <value>hadoop2:8031</value>
    </property>
    <!-- 客户端提交作业的 RPC 地址rm2 -->
    <property>
        <name>yarn.resourcemanager.address.rm2</name>
        <value>hadoop2:8032</value>
    </property>
    <!-- Resource Manager 调度器地址rm2 -->
    <property>
        <name>yarn.resourcemanager.scheduler.address.rm2</name>
        <value>hadoop2:8030</value>
    </property>
    <!-- 管理员与 ResourceManager 通信的 RPC 地址rm2 -->
    <property>
        <name>yarn.resourcemanager.admin.address.rm2</name>
        <value>hadoop2:8033</value>
    </property>
    <!-- ResourceManager Web UI 地址rm2 -->
    <property>
        <name>yarn.resourcemanager.webapp.https.address.rm2</name>
        <value>hadoop2:8090</value>
    </property>
    <!-- ResourceManager Web UI 地址rm2 -->
    <property>
        <name>yarn.resourcemanager.webapp.address.rm2</name>
        <value>hadoop2:8080</value>
    </property>
    <!-- RM集群ID -->
    <property>
        <name>yarn.resourcemanager.ha.rm-ids</name>
        <value>rm1rm</value>
    </property>
    <!-- 指定 NodeManager 的辅助服务 -->
    <property>
        <name>yarn.nodemanager.aux-services</name>
        <value>mapreduce_shuffle</value>
    </property>
    <!-- shuffle服务的实现类 -->
    <property>
        <name>yarn.nodemanager.aux-services.mapreduce_shuffle.class</name>
        <value>org.apache.hadoop.mapred.ShuffleHandler</value>
    </property>
    <!-- NodeManager 节点总可用物理内存 -->
    <property>
        <name>yarn.nodemanager.resource.memory-mb</name>
        <value>1024</value>
    </property>
    <!-- NodeManager 节点总虚拟 CPU 核数 -->
    <property>
        <name>yarn.nodemanager.resource.cpu-vcores</name>
        <value>2</value>
    </property>
</configuration>
EOF
echo "运行结束！"