#!/bin/bash
apt install openjdk-8-jdk
java -version

#https://hadoop.apache.org/releases.html
wget -nc https://mirrors.tuna.tsinghua.edu.cn/apache/hadoop/common/hadoop-3.4.1/hadoop-3.4.1.tar.gz
tar -zxvf hadoop-3.4.1.tar.gz
rm -rf /usr/local/hadoop
mkdir /usr/local/hadoop
mv hadoop-3.4.1/* /usr/local/hadoop

read -p "yarn and juicefs[y] or single node[s] or cluster[c]? or HA[h]?" -n 1 input
if [ $input = "y" ];then
    wget -nc https://github.com/juicedata/juicefs/releases/download/v1.2.2/juicefs-hadoop-1.2.2.jar
    mv juicefs-hadoop-1.2.2.jar ${HADOOP_HOME}/share/hadoop/common/lib/

    sed -i "export JAVA_HOME=/usr/lib/jvm/java-17-openjdk-amd64" $HADOOP_HOME/etc/hadoop/hadoop-env.sh
		
	tee $HADOOP_HOME/etc/hadoop/core-site.xml <<-'EOF'
<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>

<!-- Put site-specific property overrides in this file. -->
<configuration>
<property>
    <name>fs.jfs.impl</name>
    <value>io.juicefs.JuiceFileSystem</value>
  </property>
  <property>
    <name>fs.AbstractFileSystem.jfs.impl</name>
    <value>io.juicefs.JuiceFS</value>
  </property>
  <property>
    <name>juicefs.meta</name>
    <value>redis://:redis_mWGDTt@172.19.109.227:6379/10</value>
  </property>
  <property>
    <name>juicefs.cache-dir</name>
    <value>/data*/jfs</value>
  </property>
  <property>
    <name>juicefs.cache-size</name>
    <value>1024</value>
  </property>
  <property>
    <name>juicefs.access-log</name>
    <value>/tmp/juicefs.access.log</value>
  </property>
</configuration>
EOF

	tee $HADOOP_HOME/etc/hadoop/mapred-site.xml <<-'EOF'
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>

<!-- Put site-specific property overrides in this file. -->
<configuration>
	<property>
		<name>mapreduce.framework.name</name>
		<value>yarn</value>
	</property>
</configuration>
EOF

	tee $HADOOP_HOME/etc/hadoop/yarn-site.xml <<-'EOF'
<?xml version="1.0"?>

<configuration>

	<!-- Site specific YARN configuration properties -->
	<property>
		<name>yarn.nodemanager.aux-services</name>
		<value>mapreduce_shuffle</value>
	</property>
	<property>
		<name>yarn.nodemanager.env-whitelist</name>
		<value>JAVA_HOME,HADOOP_COMMON_HOME,HADOOP_HDFS_HOME,HADOOP_CONF_DIR,CLASSPATH_PREPEND_DISTCACHE,HADOOP_YARN_HOME,HADOOP_MAPRED_HOME</value>
	</property>
</configuration>
EOF

	tee $HADOOP_HOME/etc/hadoop/workers <<-'EOF'
bigdata
EOF

fi


if [ $input = "s" ];then
    sed -i "export JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64" $HADOOP_HOME/etc/hadoop/hadoop-env.sh
		
	tee $HADOOP_HOME/etc/hadoop/core-site.xml <<-'EOF'
<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>

<!-- Put site-specific property overrides in this file. -->
<configuration>
    <!-- 读取和写入数据的默认路径 -->
	<property>
		<name>fs.defaultFS</name>
		<value>hdfs://bigdata:9000</value>
	</property>
	<property>
		<name>hadoop.tmp.dir</name>
		<value>/data/hadoop</value>
	</property>
	<!--./share/doc/hadoop/hadoop-project-dist/hadoop-common/core-default.xml-->
	<property>
		<name>hadoop.http.staticuser.user</name>
		<value>hadoop</value>
	</property>
</configuration>
EOF

	tee $HADOOP_HOME/etc/hadoop/hdfs-site.xml <<-'EOF'
<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>

<!-- Put site-specific property overrides in this file. -->
<configuration>
	<property>
		<name>dfs.replication</name>
		<value>1</value>
	</property>
	<!--./share/doc/hadoop/hadoop-project-dist/hadoop-hdfs/hdfs-default.xml-->
	<property>
		<name>dfs.permissions.enabled</name>
		<value>false</value>
	</property>
</configuration>
EOF

	tee $HADOOP_HOME/etc/hadoop/mapred-site.xml <<-'EOF'
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>

<!-- Put site-specific property overrides in this file. -->
<configuration>
	<property>
		<name>mapreduce.framework.name</name>
		<value>yarn</value>
	</property>
</configuration>
EOF

	tee $HADOOP_HOME/etc/hadoop/yarn-site.xml <<-'EOF'
<?xml version="1.0"?>

<configuration>

	<!-- Site specific YARN configuration properties -->
	<property>
		<name>yarn.nodemanager.aux-services</name>
		<value>mapreduce_shuffle</value>
	</property>
	<property>
		<name>yarn.nodemanager.env-whitelist</name>
		<value>JAVA_HOME,HADOOP_COMMON_HOME,HADOOP_HDFS_HOME,HADOOP_CONF_DIR,CLASSPATH_PREPEND_DISTCACHE,HADOOP_YARN_HOME,HADOOP_MAPRED_HOME</value>
	</property>
</configuration>
EOF

	tee $HADOOP_HOME/etc/hadoop/workers <<-'EOF'
bigdata
EOF

fi

if [ $input = "c" ];then
	sed -i "export JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64" $HADOOP_HOME/etc/hadoop/hadoop-env.sh
	
	tee $HADOOP_HOME/etc/hadoop/core-site.xml <<-'EOF'
<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>

<!-- Put site-specific property overrides in this file. -->
<configuration>
    <!-- 读取和写入数据的默认路径 -->
	<property>
		<name>fs.defaultFS</name>
		<value>hdfs://bigdata01:9000</value>
	</property>
	<property>
		<name>hadoop.tmp.dir</name>
		<value>/data/hadoop</value>
	</property>
	<!--./share/doc/hadoop/hadoop-project-dist/hadoop-common/core-default.xml-->
	<property>
		<name>hadoop.http.staticuser.user</name>
		<value>hadoop</value>
	</property>
</configuration>
EOF

	tee $HADOOP_HOME/etc/hadoop/hdfs-site.xml <<-'EOF'
<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>

<!-- Put site-specific property overrides in this file. -->
<configuration>
	<property>
		<name>dfs.replication</name>
		<value>2</value>
	</property>
	<property>
        <name>dfs.namenode.secondary.http-address</name>
        <value>bigdata01:50090</value>
    </property>
	<!--./share/doc/hadoop/hadoop-project-dist/hadoop-hdfs/hdfs-default.xml-->
	<property>
		<name>dfs.permissions.enabled</name>
		<value>false</value>
	</property>
</configuration>
EOF

	tee $HADOOP_HOME/etc/hadoop/mapred-site.xml <<-'EOF'
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>

<!-- Put site-specific property overrides in this file. -->
<configuration>
	<property>
		<name>mapreduce.framework.name</name>
		<value>yarn</value>
	</property>
</configuration>
EOF

	tee $HADOOP_HOME/etc/hadoop/yarn-site.xml <<-'EOF'
<?xml version="1.0"?>

<configuration>

	<!-- Site specific YARN configuration properties -->
	<property>
		<name>yarn.nodemanager.aux-services</name>
		<value>mapreduce_shuffle</value>
	</property>
	<property>
		<name>yarn.nodemanager.env-whitelist</name>
		<value>JAVA_HOME,HADOOP_COMMON_HOME,HADOOP_HDFS_HOME,HADOOP_CONF_DIR,CLASSPATH_PREPEND_DISTCACHE,HADOOP_YARN_HOME,HADOOP_MAPRED_HOME</value>
	</property>
	<property>
        <name>yarn.resourcemanager.hostname</name>
        <value>bigdata01</value>
    </property>
</configuration>
EOF

	tee $HADOOP_HOME/etc/hadoop/workers <<-'EOF'
bigdata02
bigdata03
EOF

fi

if [ $input = "h" ];then
	sed -i "export JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64" $HADOOP_HOME/etc/hadoop/hadoop-env.sh
	
    tee $HADOOP_HOME/etc/hadoop/core-site.xml <<-'EOF'
<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>

<!-- Put site-specific property overrides in this file. -->
<configuration>
    <!-- Namenode高可用配置-自定义集群名称,且不用指定端口号 -->
    <property>
        <name>fs.defaultFS</name>
        <value>hdfs://mycluster</value>
    </property>
    <!-- 指定 hadoop 数据的存储目录 -->
    <property>
        <name>hadoop.tmp.dir</name>
        <value>/data/hadoop</value>
    </property>
    <!-- 配置 HDFS 网页登录使用的静态用户为 bigdata -->
    <property>
        <name>hadoop.http.staticuser.user</name>
        <value>bigdata</value>
    </property>
    <!--置超级代理-->
    <property>
        <name>hadoop.proxyuser.bigdata.hosts</name>
        <value>*</value>
    </property>
    <property>
        <name>hadoop.proxyuser.bigdata.groups</name>
        <value>*</value>
    </property>
    <!-- 配置ZKFC进程连接zookeeper的地址 -->
	<property>
        <name>ha.zookeeper.quorum</name>
        <value>bigdata11:2181,bigdata22:2181,bigdata33:2181</value>
    </property>
</configuration>
EOF

    tee $HADOOP_HOME/etc/hadoop/hdfs-site.xml <<-'EOF'
<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
	<property>
	  <name>dfs.nameservice.id</name>
	  <value>mycluster</value> <!-- Substitute with your nameservice id -->
	</property>
	<property>
	  <name>dfs.ha.namenode.id</name>
	  <value>nn1</value> <!-- Substitute with your local namenode id -->
	</property>
	<!--./share/doc/hadoop/hadoop-project-dist/hadoop-hdfs/hdfs-default.xml-->
	<property>
		<name>dfs.permissions.enabled</name>
		<value>false</value>
	</property>
  <!-- namenode服务逻辑id -->
  <property>
    <name>dfs.nameservices</name>
    <value>mycluster</value>
  </property>
  <!-- namenode服务mycluster下3个节点 -->
  <property>
    <name>dfs.ha.namenodes.mycluster</name>
    <value>nn1,nn2,nn3</value>
  </property>
  <!-- 节点通讯地址 -->
  <property>
    <name>dfs.namenode.rpc-address.mycluster.nn1</name>
    <value>bigdata11:8020</value>
  </property>
  <property>
    <name>dfs.namenode.rpc-address.mycluster.nn2</name>
    <value>bigdata22:8020</value>
  </property>
  <property>
    <name>dfs.namenode.rpc-address.mycluster.nn3</name>
    <value>bigdata33:8020</value>
  </property>
  <!-- web ui地址 -->
  <property>
    <name>dfs.namenode.http-address.mycluster.nn1</name>
    <value>bigdata11:9870</value>
  </property>
  <property>
    <name>dfs.namenode.http-address.mycluster.nn2</name>
    <value>bigdata22:9870</value>
  </property>
  <property>
    <name>dfs.namenode.http-address.mycluster.nn3</name>
    <value>bigdata33:9870</value>
  </property>
  <!-- journalnode edits读取写入地址 -->
  <property>
    <name>dfs.namenode.shared.edits.dir</name>
    <value>qjournal://bigdata11:8485;bigdata22:8485;bigdata33:8485/mycluster</value>
  </property>
  <!--  the Java class that HDFS clients use to contact the Active NameNode -->
  <property>
    <name>dfs.client.failover.proxy.provider.mycluster</name>
    <value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
  </property>
  <!-- a list of scripts or Java classes which will be used to fence the Active NameNode during a
  failover -->
  <property>
    <name>dfs.ha.fencing.methods</name>
    <value>sshfence</value>
    <value>shell(/bin/true)</value>
  </property>
  <property>
    <name>dfs.ha.fencing.ssh.private-key-files</name>
    <value>/root/.ssh/id_ed25519</value>
  </property>
  <property>
    <name>dfs.ha.nn.not-become-active-in-safemode</name>
    <value>true</value>
  </property>
  <!-- 故障情况自动切换 -->
  <property>
    <name>dfs.ha.automatic-failover.enabled</name>
    <value>true</value>
  </property>
  <!-- Namenode 数据存储目录-->
  <property>
    <name>dfs.namenode.name.dir</name>
    <value>${hadoop.tmp.dir}/name</value>
  </property>

  <!-- Datanode 数据存储目录-->
  <property>
    <name>dfs.namenode.data.dir</name>
    <value>${hadoop.tmp.dir}/data</value>
  </property>

  <!-- journalnode 数据存储目录-->
  <property>
    <name>dfs.journalnode.edits.dir</name>
    <value>${hadoop.tmp.dir}/jn</value>
  </property>
</configuration>
EOF

    tee $HADOOP_HOME/etc/hadoop/yarn-site.xml <<-'EOF'
<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
    <!-- 指定 MR 走 shuffle -->
    <property>
        <name>yarn.nodemanager.aux-services</name>
        <value>mapreduce_shuffle</value>
    </property>
    <!-- 指定 ResourceManager 的地址 单节点 -->
    <!--
<property>
<name>yarn.resourcemanager.hostname</name>
<value>centos2</value>
</property>
-->
    <!-- 环境变量的继承 -->
    <property>
        <name>yarn.nodemanager.env-whitelist</name>
        <value>
            JAVA_HOME,HADOOP_COMMON_HOME,HADOOP_HDFS_HOME,HADOOP_CONF_DIR,CLASSPATH_PREPEND_DISTCACHE,HADOOP_YARN_HOME,HADOOP_MAPRED_HOME</value>
    </property>
    <!-- hadoop classpath输出以下路径 -->
    <property>
        <name>yarn.application.classpath</name>
        <value>
/usr/local/hadoop/etc/hadoop:/usr/local/hadoop/share/hadoop/common/lib/*:/usr/local/hadoop/share/hadoop/common/*:/usr/local/hadoop/share/hadoop/hdfs:/usr/local/hadoop/share/hadoop/hdfs/lib/*:/usr/local/hadoop/share/hadoop/hdfs/*:/usr/local/hadoop/share/hadoop/mapreduce/lib/*:/usr/local/hadoop/share/hadoop/mapreduce/*:/usr/local/hadoop/share/hadoop/yarn:/usr/local/hadoop/share/hadoop/yarn/lib/*:/usr/local/hadoop/share/hadoop/yarn/*</value>
    </property>
    <!-- 开启resourcemanager HA-->
    <property>
        <name>yarn.resourcemanager.ha.enabled</name>
        <value>true</value>
    </property>
    <!-- 自定义一个resourcemanager的逻辑集群id-->
    <property>
        <name>yarn.resourcemanager.cluster-id</name>
        <value>yarn-cluster</value>
    </property>
    <!-- 指定resourcemanager集群的逻辑节点名称列表-->
    <property>
        <name>yarn.resourcemanager.ha.rm-ids</name>
        <value>rm1,rm2,rm3</value>
    </property>
    <!-- rm1的节点信息-->
    <property>
        <name>yarn.resourcemanager.hostname.rm1</name>
        <value>bigdata11</value>
    </property>
    <!-- yarn web页面地址  -->
    <property>
        <name>yarn.resourcemanager.webapp.address.rm1</name>
        <value>bigdata11:8088</value>
    </property>
    <!-- rm1 对客户端暴露的地址，客户端通过该地址向RM提交任务等 -->
    <property>
        <name>yarn.resourcemanager.address.rm1</name>
        <value>bigdata11:8032</value>
    </property>
    <!-- rm1 与 applicationMaster的通信地址  -->
    <property>
        <name>yarn.resourcemanager.scheduler.address.rm1</name>
        <value>bigdata11:8030</value>
    </property>
    <!-- rm1 与 nm的通信地址  -->
    <property>
        <name>yarn.resourcemanager.resource-tracker.address.rm1</name>
        <value>bigdata11:8031</value>
    </property>

    <!-- rm2的节点信息-->
    <property>
        <name>yarn.resourcemanager.hostname.rm2</name>
        <value>bigdata22</value>
    </property>
    <!-- yarn web页面地址  -->
    <property>
        <name>yarn.resourcemanager.webapp.address.rm2</name>
        <value>bigdata22:8088</value>
    </property>
    <!-- rm2 对客户端暴露的地址，客户端通过该地址向RM提交任务等 -->
    <property>
        <name>yarn.resourcemanager.address.rm2</name>
        <value>bigdata22:8032</value>
    </property>
    <!-- rm2 与 applicationMaster的通信地址  -->
    <property>
        <name>yarn.resourcemanager.scheduler.address.rm2</name>
        <value>bigdata22:8030</value>
    </property>
    <!-- rm2 与 nm的通信地址  -->
    <property>
        <name>yarn.resourcemanager.resource-tracker.address.rm2</name>
        <value>bigdata22:8031</value>
    </property>

    <!-- rm3的节点信息-->
    <property>
        <name>yarn.resourcemanager.hostname.rm3</name>
        <value>bigdata33</value>
    </property>
    <!-- yarn web页面地址  -->
    <property>
        <name>yarn.resourcemanager.webapp.address.rm3</name>
        <value>bigdata33:8088</value>
    </property>
    <property>
        <name>yarn.resourcemanager.address.rm3</name>
        <value>bigdata33:8032</value>
    </property>
    <!-- rm3 与 applicationMaster的通信地址  -->
    <property>
        <name>yarn.resourcemanager.scheduler.address.rm3</name>
        <value>bigdata33:8030</value>
    </property>
    <!-- rm3 与 nm的通信地址  -->
    <property>
        <name>yarn.resourcemanager.resource-tracker.address.rm3</name>
        <value>bigdata33:8031</value>
    </property>

    <!-- 配置zookeeper信息  -->
    <property>
        <name>yarn.resourcemanager.zk-address</name>
        <value>bigdata11:2181,bigdata22:2181,bigdata33:2181</value>
    </property>

    <!-- 启动自动恢复 -->
    <property>
        <name>yarn.resourcemanager.recovery.enabled</name>
        <value>true</value>
    </property>

    <!-- 配置将recourcemanager的状态信息存储在zookeeper中 -->
    <property>
        <name>yarn.resourcemanager.store.class</name>
        <value>org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore</value>
    </property>
    <!-- 开启日志聚集功能 -->
    <property>
        <name>yarn.log-aggregation-enable</name>
        <value>true</value>
    </property>
    <!-- 设置日志聚集服务器地址 -->
    <property>
        <name>yarn.log.server.url</name>
        <value>http://bigdata11:19888/jobhistory/logs</value>
    </property>
    <!-- 设置日志保留时间为 7 天 -->
    <property>
        <name>yarn.log-aggregation.retain-seconds</name>
        <value>604800</value>
    </property>
</configuration>
EOF

    tee $HADOOP_HOME/etc/hadoop/mapred-site.xml <<-'EOF'
<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
    <!-- 指定 MapReduce 程序运行在 Yarn 上 -->
    <property>
        <name>mapreduce.framework.name</name>
        <value>yarn</value>
    </property>
    <!-- 历史服务器端地址 -->
    <property>
        <name>mapreduce.jobhistory.address</name>
        <value>bigdata11:10020</value>
    </property>
    <!-- 历史服务器 web 端地址 -->
    <property>
        <name>mapreduce.jobhistory.webapp.address</name>
        <value>bigdata11:19888</value>
    </property>
</configuration>
EOF

	tee $HADOOP_HOME/etc/hadoop/workers <<-'EOF'
bigdata11
bigdata22
bigdata33
EOF

fi

#hdfs namenode -format