#!/bin/bash

set -e  # 遇错退出

# ========================
# 1. 基础路径与变量
# ========================
export_dir="/export"
hadoop_tar="$export_dir/hadoop-3.3.6.tar.gz"
jdk_tar="$export_dir/openjdk-8u40-b25-linux-x64-10_feb_2015.tar.gz.gz"
hadoop_home="$export_dir/server/hadoop"
jdk_home="$export_dir/server/jdk"
# 全局IP变量
base_ip="192.168.95"

# 检查压缩包是否存在
[ ! -f "$hadoop_tar" ] && { echo "ERROR: $hadoop_tar not found!"; exit 1; }
[ ! -f "$jdk_tar" ] && { echo "ERROR: $jdk_tar not found!"; exit 1; }

# ========================
# 2. 创建目录并解压
# ========================
# 若server目录存在就删除后重建
if [ -d "$export_dir/server" ]; then
    rm -rf "$export_dir/server"
fi
mkdir -p "$export_dir/server"
tar -zxf "$hadoop_tar" -C "$export_dir/server/"
mv "$export_dir/server/hadoop-3.3.6" "$hadoop_home"
tar -zxf "$jdk_tar" -C "$export_dir/server/"
mv "$export_dir/server/java-se-8u40-ri" "$jdk_home"

# ========================
# 3. 创建数据目录
# ========================
# 若data目录存在就删除后重建
if [ -d "/data" ]; then
    rm -rf "/data"
fi
mkdir -p "/data/nn"
mkdir -p "/data/dn"

# ========================
# 4. 授权
# ========================
chown -R hadoop:hadoop "$export_dir/server"
chown -R hadoop:hadoop "/data"

# ========================
# 5. 配置环境变量
# ========================
cat > /etc/profile.d/hadoop.sh <<EOF
export JAVA_HOME=$jdk_home
export HADOOP_HOME=$hadoop_home
export HADOOP_CONF_DIR=\$HADOOP_HOME/etc/hadoop
export HADOOP_LOG_DIR=\$HADOOP_HOME/logs
EOF

# ========================
# 6. 设置主机名
# ========================
hostnamectl set-hostname node1

# ========================
# 7. 配置静态 IP（ens33）
# ========================
cat > /etc/sysconfig/network-scripts/ifcfg-ens33 <<EOF
TYPE=Ethernet
BOOTPROTO=static
NAME=ens33
DEVICE=ens33
ONBOOT=yes
IPADDR=${base_ip}.131
NETMASK=255.255.255.0
GATEWAY=${base_ip}.2
DNS1=8.8.8.8
EOF

systemctl restart network

# ========================
# 8. 配置 /etc/hosts
# ========================
cat > /etc/hosts <<EOF
127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6
${base_ip}.131 node1
${base_ip}.132 node2
${base_ip}.133 node3
EOF

# ========================
# 9. 创建 hadoop 用户
# ========================
# 若hadoop用户存在就跳过新建hadoop用户
if ! id "hadoop" &>/dev/null; then
    useradd -m hadoop
    echo "hadoop:123456" | chpasswd
fi

# ========================
# 10. 配置 SSH 免密（仅本机）
# ========================
su - hadoop -c "ssh-keygen -t rsa -P '' -f ~/.ssh/id_rsa"
su - hadoop -c "cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys"

# ========================
# 11. 配置 SSH 免密（所有节点）
# ========================
# 请手动在每台机器上执行 ssh-copy-id 所有节点

# ========================
# 12. 配置防火墙
# ========================
# 请手动配置防火墙

# ========================
# 13. 配置 SELinux
# ========================
# 请手动配置 SELinux

# ========================
# 14. 配置 Hadoop
# ========================
hadoop_conf="$hadoop_home/etc/hadoop"

# core-site.xml
cat > "$hadoop_conf/core-site.xml" <<EOF
<configuration>
  <property>
    <name>fs.defaultFS</name>
    <value>hdfs://node1:8020</value>
  </property>
  <property>
    <name>io.file.buffer.size</name>
    <value>131072</value>
  </property>
</configuration>
EOF

# hdfs-site.xml
cat > "$hadoop_conf/hdfs-site.xml" <<EOF
<configuration>
  <property>
    <name>dfs.datanode.data.dir.perm</name>
    <value>700</value>
  </property>
  <property>
    <name>dfs.namenode.name.dir</name>
    <value>/data/nn</value>
  </property>
  <property>
    <name>dfs.namenode.hosts</name>
    <value>node1,node2,node3</value>
  </property>
  <property>
    <name>dfs.blocksize</name>
    <value>268435456</value>
  </property>
  <property>
    <name>dfs.namenode.handler.count</name>
    <value>100</value>
  </property>
  <property>
    <name>dfs.datanode.data.dir</name>
    <value>/data/dn</value>
  </property>
</configuration>
EOF

# workers
echo -e "node1\nnode2\nnode3" > "$hadoop_conf/workers"

# hadoop-env.sh
cat > "$hadoop_conf/hadoop-env.sh" <<EOF
export JAVA_HOME=$jdk_home
export HADOOP_HOME=$hadoop_home
export HADOOP_CONF_DIR=\$HADOOP_HOME/etc/hadoop
export HADOOP_LOG_DIR=\$HADOOP_HOME/logs
EOF

# ========================
# 15. 格式化 NameNode（仅 node1）
# ========================
su - hadoop -c "hdfs namenode -format"

# ========================
# 16. 设置root用户密码
# ========================
echo "root:sanda" | chpasswd

# ========================
# 完成提示
# ========================
echo "✅ Hadoop node1 初始化完成！"
echo "👉 请手动将 /export/server 目录同步到 node2、node3，并在它们上执行："
echo "   - 修改主机名（node2 / node3）"
echo "   - 修改 IP 地址"
echo "   - 创建 /data/nn /data/dn 并授权给 hadoop"
echo "   - 配置 SSH 免密互信（在每台机器上 ssh-copy-id 所有节点）"