#!/bin/bash

source ./common.sh

#等待时间
sleep_time=0

# ha目录
ha_directory="/opt/ha"

# 获取hadoop版本
hadoop_version=$(basename "${hadoop_package_path}" | awk -F'.tar.gz' '{print $1}')

# 配置环境变量
env_variables=(
  "#HADOOP_HOME"
  "export HADOOP_HOME=${ha_directory}/${hadoop_version}"
  "export PATH=\$PATH:\$HADOOP_HOME/bin:\$HADOOP_HOME/sbin"
)

# 检查Zookeeper是否已启动
for server in "${zookeeper_servers[@]}"; do
  
  echo "开始检查Zookeeper状态 ${server}"
  
  # 检查Zookeeper状态
  zookeeper_status=$(sshpass -p "${new_user_password}" ssh -o StrictHostKeyChecking=no "${new_user}@${server}" "ps -ef | grep zookeeper | grep -v grep" || echo "6666666")

	if [ "${zookeeper_status}" == "6666666" ]; then
	
	  echo "zookeeper_status:${zookeeper_status}"
  
      echo "Zookeeper未在服务器${server}上启动。请启动Zookeeper并重新运行此脚本。"
	
      exit 1
	fi
done

for server in "${servers[@]}"; do
  # 复制安装包
  sshpass -p "${new_user_password}" ssh -o StrictHostKeyChecking=no "${new_user}@${master_server}" "rsync -av ${hadoop_package_path}  ${new_user}@${server}:${software_directory}"
done

for server in "${servers[@]}"; do
  # 创建ha文件夹
  sshpass -p "${new_user_password}" ssh -o StrictHostKeyChecking=no "${new_user}@${server}" "echo \"${root_password}\" | sudo -S mkdir -p ${ha_directory} && sudo chown ${new_user}:${new_user} ${ha_directory}"
done

for server in "${servers[@]}"; do
  # 删除已存在的目录
  sshpass -p "${new_user_password}" ssh -o StrictHostKeyChecking=no "${new_user}@${server}" "sudo rm -rf ${ha_directory}/hadoop*"
done

# 解压缩Hadoop安装包
for server in "${servers[@]}"; do
  
  echo "开始解压 ${server}"
  
  sshpass -p "${new_user_password}" ssh -o StrictHostKeyChecking=no "${new_user}@${server}" "tar -zxf ${hadoop_package_path} -C ${ha_directory}/"
done

# 在for循环中实现覆盖文件
for server in "${servers[@]}"; do

  echo "开始复制配置文件 ${server}"

  sshpass -p "${new_user_password}" ssh -o StrictHostKeyChecking=no "${new_user}@${server}" "scp ${new_user}@${master_server}:${autoshell_path}/hadoop-ha_conf/core-site.xml ${new_user}@${server}:${ha_directory}/${hadoop_version}/etc/hadoop/"
  
  sshpass -p "${new_user_password}" ssh -o StrictHostKeyChecking=no "${new_user}@${server}" "scp ${new_user}@${master_server}:${autoshell_path}/hadoop-ha_conf/hdfs-site.xml ${new_user}@${server}:${ha_directory}/${hadoop_version}/etc/hadoop/"
  
  sshpass -p "${new_user_password}" ssh -o StrictHostKeyChecking=no "${new_user}@${server}" "scp ${new_user}@${master_server}:${autoshell_path}/hadoop-ha_conf/yarn-site.xml ${new_user}@${server}:${ha_directory}/${hadoop_version}/etc/hadoop/"
  
  sshpass -p "${new_user_password}" ssh -o StrictHostKeyChecking=no "${new_user}@${server}" "scp ${new_user}@${master_server}:${autoshell_path}/hadoop-ha_conf/workers ${new_user}@${server}:${ha_directory}/${hadoop_version}/etc/hadoop/"
done

for server in "${servers[@]}"; do
  # 判断my_env.sh文件是否存在，如果不存在，新建
  echo "开始判断my_env.sh文件是否存在"
  sshpass -p "${new_user_password}" ssh -o StrictHostKeyChecking=no "${new_user}@${server}" "sudo touch /etc/profile.d/my_env.sh"
  echo "判断my_env.sh文件是否存在完成"
done

for server in "${servers[@]}"; do
  # 删除原有的HADOOP_HOME配置
  sshpass -p "${new_user_password}" ssh -o StrictHostKeyChecking=no "${new_user}@${server}" "sudo sed -i '/HADOOP_HOME/d' /etc/profile.d/my_env.sh"
done

for server in "${servers[@]}"; do
  # 追加新的HADOOP_HOME配置
  for line in "${env_variables[@]}"; do
    sshpass -p "${new_user_password}" ssh -o StrictHostKeyChecking=no "${new_user}@${server}" "echo '${line}' | sudo tee -a /etc/profile.d/my_env.sh"
  done
done

for server in "${servers[@]}"; do
  # 刷新环境变量
  sshpass -p "${new_user_password}" ssh -o StrictHostKeyChecking=no "${new_user}@${server}" "source /etc/profile"
done

# 启动JournalNode服务
for server in "${JournalNode_servers[@]}"; do
  sshpass -p "${new_user_password}" ssh -o StrictHostKeyChecking=no "${new_user}@${server}" "hdfs --daemon start journalnode"
done

# 等待2秒
sleep ${sleep_time}

#格式化NameNode
first_namenode="${NameNode_servers[0]}"
sshpass -p "${new_user_password}" ssh -o StrictHostKeyChecking=no "${new_user}@${first_namenode}" "hdfs namenode -format"

# 等待2秒
sleep ${sleep_time}

#启动namenode
sshpass -p "${new_user_password}" ssh -o StrictHostKeyChecking=no "${new_user}@${first_namenode}" "hdfs --daemon start namenode"

# 等待2秒
sleep ${sleep_time}

#在NameNode_servers其他机器上，同步nn1的元数据信息
for server in "${NameNode_servers[@]:1}"; do
  sshpass -p "${new_user_password}" ssh -o StrictHostKeyChecking=no "${new_user}@${server}" "hdfs namenode -bootstrapStandby"
done

# 等待2秒
sleep ${sleep_time}

#启动其他NameNode服务器
for server in "${NameNode_servers[@]:1}"; do
  sshpass -p "${new_user_password}" ssh -o StrictHostKeyChecking=no "${new_user}@${server}" "hdfs --daemon start namenode"
done

# 等待2秒
sleep ${sleep_time}

# 启动DataNode服务
for server in "${DataNode_servers[@]}"; do
  sshpass -p "${new_user_password}" ssh -o StrictHostKeyChecking=no "${new_user}@${server}" "hdfs --daemon start datanode"
done

# 等待2秒
sleep ${sleep_time}

# 格式化ZKFC
sshpass -p "${new_user_password}" ssh -o StrictHostKeyChecking=no "${new_user}@${first_namenode}" "hdfs zkfc -formatZK"

# 等待2秒
sleep ${sleep_time}

# 启动ZKFC服务
for server in "${NameNode_servers[@]}"; do
  sshpass -p "${new_user_password}" ssh -o StrictHostKeyChecking=no "${new_user}@${server}" "hdfs --daemon start zkfc"
done

# 等待2秒
sleep ${sleep_time}

# 启动YARN
sshpass -p "${new_user_password}" ssh -o StrictHostKeyChecking=no "${new_user}@${first_namenode}" "start-yarn.sh"

# 等待2秒
sleep ${sleep_time}

#查看服务状态
sshpass -p "${new_user_password}" ssh -o StrictHostKeyChecking=no "${new_user}@${first_namenode}" "yarn rmadmin -getServiceState rm1"

echo "Hadoop HA集群部署完成。"
