#!/bin/bash


nodes=("node02" "node03")

# ssh 免密设置
#if [ ! -s /home/hadoop/.ssh/id_rsa ]; then
sh /home/hadoop/bin/authBatchSSH.sh
for node in ${nodes[@]};do
    # ssh-copy-id
    password="123456"
    expect -c "
    spawn ssh $node sh /home/hadoop/bin/authBatchSSH.sh
        expect {
        \"*yes/no*\" {send \"yes\r\"; exp_continue}
        \"*password*\" {send \"$password\r\"; exp_continue}
        \"*Password*\" {send \"$password\r\";}
        }
    "
done

for node in ${nodes[@]};do
    # ssh-copy-id
    password="123456"
    expect -c "
    spawn scp /home/hadoop/.ssh/authorized_keys $node:/home/hadoop/.ssh/authorized_keys
        expect {
        \"*yes/no*\" {send \"yes\r\"; exp_continue}
        \"*password*\" {send \"$password\r\"; exp_continue}
        \"*Password*\" {send \"$password\r\";}
        }
    "
done
#fi


basePath="/kkb/install/"
# hadoop 基本设置
hadoopPath=$basePath/"hadoop-3.1.4"
mkdir -p $hadoopPath/hadoopDatas/tempDatas
mkdir -p $hadoopPath/hadoopDatas/namenodeDatas
mkdir -p $hadoopPath/hadoopDatas/datanodeDatas 
mkdir -p $hadoopPath/hadoopDatas/dfs/nn/edits
mkdir -p $hadoopPath/hadoopDatas/dfs/snn/name
mkdir -p $hadoopPath/hadoopDatas/dfs/nn/snn/edits
echo "export  JAVA_HOME=$basePath/jdk1.8.0_141" >> $hadoopPath/etc/hadoop/hadoop-env.sh

echo  "node01" > $hadoopPath/etc/hadoop/workers
for node in ${nodes[@]};do
    echo  $node >> $hadoopPath/etc/hadoop/workers
done
## 包分发
for node in ${nodes[@]};do
    rsync -a $hadoopPath $node:$hadoopPath
done


# zookeeper基本配置
zkPath=$basePath/"apache-zookeeper-3.6.2-bin/"
mkdir -p $zkPath/zkdatas
cp -f $zkPath/conf/zoo_sample.cfg $zkPath/conf/zoo.cfg

sed -i "s#dataDir=/tmp/zookeeper#dataDir=$zkPath/zkdatas#g" $zkPath/conf/zoo.cfg
echo "autopurge.snapRetainCount=3" >> $zkPath/conf/zoo.cfg
echo "autopurge.purgeInterval=1" >> $zkPath/conf/zoo.cfg
for (( i=1 ; i <= 3 ; i = $i + 1 )) ; do
    echo "server.$i=node0$i:2888:3888" >> $zkPath/conf/zoo.cfg
    if [  $i -eq 1 ];then
        echo $i > $zkPath/zkdatas/myid
    else
        rsync -a $zkPath node0$i:$zkPath 
        ssh node0$i "echo $i > $zkPath/zkdatas/myid"
    fi
done

# spark 基本配置
sparkPath=$basePath/"spark-3.0.0-bin-hadoop3.2"
cp $sparkPath/conf/spark-env.sh.template $sparkPath/conf/spark-env.sh
echo "export JAVA_HOME=$basePath/jdk1.8.0_141" >> $sparkPath/conf/spark-env.sh
echo "export HADOOP_HOME=$hadoopPath" >> $sparkPath/conf/spark-env.sh
echo "export HADOOP_CONF_DIR=$hadoopPath/etc/hadoop" >> $sparkPath/conf/spark-env.sh
echo "export SPARK_CONF_DIR=$sparkPath/conf" >> $sparkPath/conf/spark-env.sh
echo "export YARN_CONF_DIR=$hadoopPath/etc/hadoop" >> $sparkPath/conf/spark-env.sh

cp $sparkPath/conf/slaves.template $sparkPath/conf/slaves
echo  "node01" >  $sparkPath/conf/slaves
for node in ${nodes[@]};do
    echo  $node >>  $sparkPath/conf/slaves
done

cp $sparkPath/conf/spark-defaults.conf.template  $sparkPath/conf/spark-defaults.conf
echo "spark.eventLog.enabled  true" >> $sparkPath/conf/spark-defaults.conf
echo "spark.eventLog.dir      hdfs://node01:8020/spark_log" >> $sparkPath/conf/spark-defaults.conf
echo "spark.eventLog.compress true" >> $sparkPath/conf/spark-defaults.conf
## 包分发
for node in ${nodes[@]};do
    rsync -a $sparkPath $node:$sparkPath
done



# 初始化 hdfs
hdfs namenode -format

## run hadoop
./bin/hadoop.sh start
./bin/xcall.sh jps

## run zk
./bin/xcall.sh zkServer.sh start
./bin/xcall.sh zkServer.sh status

## run spark 
cd $sparkPath
mkdir -p /tmp/spark-events
hdfs  dfs -mkdir -p /spark_log 
sbin/start-all.sh   
sbin/start-history-server.sh


echo "
=== 环境部署完成，使用以下命令访问相关内容  ===
sh ./bin/node1Conf.sh # 初始化环境 
./bin/hadoop.sh start # 启动hadoop
./bin/xcall.sh jps

===   zk  ===
./bin/xcall.sh zkServer.sh start  # 启动zk

=== spark ===
hdfs  dfs -mkdir -p /spark_log  
cd /kkb/install/spark-3.0.0-bin-hadoop3.2  
sbin/start-all.sh   
sbin/start-history-server.sh
"  