#!/bin/bash
set -e
## 读取配置文件
. ../config/my-config.sh
##################### 第一部分 初始化安装环境 ########################
echo '查看当前集群信息'
if [ -f 'hadoop.info'  ];then
	chmod a+x hadoop.info
	cat hadoop.info
else
	touch hadoop.info
fi

read -p "是否继续(Y/N)？：" isY
if [ "${isY}" != "y" ] && [ "${isY}" != "Y" ];then
   exit 1
fi

# 生成集群配置文件
echo '#127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4' > hosts
echo '#::1         localhost localhost.localdomain localhost6 localhost6.localdomain6' >> hosts
rm -f authorized_keys
touch authorized_keys
rm -f slaves
touch slaves
rm -f tmp.sh
echo "set -e" > tmp.sh

echo "循环写入文件"
# awk -F ":" 自定义分割符号
while read line
do 
    eval $(echo $line | awk '{print "i1="$1";i2="$2";i3="$3}')
    # 获取子节点hadoop用户的授权文件
	scp root@$i3:/home/hadoop/.ssh/id_dsa.pub ./id_dsa.pub.$i2
	cat id_dsa.pub.$i2 >> authorized_keys
    # 追加写入hosts
	echo "$i3    $i2" >> hosts
	# 追加写入slaves
	if [ "$i1" != "name" ];then
		echo "$i2" >> slaves
	fi
	# 生成临时提权脚本
	echo "ssh root@$i3 'chown -R hadoop.hadoop /home/hadoop/.ssh/authorized_keys;chmod 700 /home/hadoop/.ssh;chmod 700 /home/hadoop/.ssh/*'" >> tmp.sh
	echo "ssh root@$i3 'chown -R hadoop.hadoop ${HADOOP_HOME}/etc/hadoop/slaves'" >> tmp.sh
	#echo "ssh root@$i3 \"mkdir -p ${HADOOP_PID_DIR};chown -R hadoop.hadoop ${HADOOP_PID_DIR};sed -i 's@\\\${HADOOP_PID_DIR}@${HADOOP_PID_DIR}@g' $HADOOP_HOME/etc/hadoop/hadoop-env.sh\"" >> tmp.sh
done < hadoop.info

echo "分发文件到各个节点"
while read line
do 
    eval $(echo $line | awk '{print "i1="$1";i2="$2";i3="$3}')
    echo "$i2"
    scp authorized_keys root@${i3}:/home/hadoop/.ssh/
    scp hosts root@${i3}:/etc/
    scp slaves root@${i3}:${HADOOP_HOME}/etc/hadoop/
	#scp tmp.sh root@${i3}:~/
done < hadoop.info

echo "执行临时脚本"
chmod a+x tmp.sh;./tmp.sh