#!/bin/bash
#准备工作：三台机器执行清理垃圾进程     三台机器启动zookeeper
ssh node01 "/export/servers/onekey/zookeeper/startzk.sh"

#第二步：三台机器执行以下命令启动journalNode，用于我们的元数据管理
for n1 in node01 node02 node03
do
	ssh $n1 "rm -rf /tmp/hsperfdata_impala;/export/servers/hadoop-2.6.0-cdh5.14.0/sbin/hadoop-daemon.sh start journalnode"
done

#第三步：node01机器上准备初始化journalNode
ssh node01 "/export/servers/hadoop-2.6.0-cdh5.14.0/bin/hdfs namenode -initializeSharedEdits -force"

#第四步：node01和node02机器上启动namenode  
ssh node01 "/export/servers/hadoop-2.6.0-cdh5.14.0/sbin/hadoop-daemon.sh start namenode"
ssh node02 "/export/servers/hadoop-2.6.0-cdh5.14.0/bin/hdfs namenode -bootstrapStandby"
ssh node02 "/export/servers/hadoop-2.6.0-cdh5.14.0/sbin/hadoop-daemon.sh start namenode"


#第五步：在node01机器上启动所有节点的datanode进程
ssh node01 "/export/servers/hadoop-2.6.0-cdh5.14.0/sbin/hadoop-daemons.sh start datanode"

#第六步：在node01和node02机器上面启动zkfc进程
for n3 in node01 node02
do
	ssh  $n3 "/export/servers/hadoop-2.6.0-cdh5.14.0/sbin/hadoop-daemon.sh start zkfc"
done

#第七步：在node02和node03机器上启动yarn集群
for n4 in node02 node03
do
	ssh  $n4 "/export/servers/hadoop-2.6.0-cdh5.14.0/sbin/start-yarn.sh"
done

#第八步：node03节点启动jobhistoryserver
ssh node03 "/export/servers/hadoop-2.6.0-cdh5.14.0/sbin/mr-jobhistory-daemon.sh start historyserver"


