#!/bin/bash
set -e
mkdir -p /opt/tmp/
mkdir -p /opt/script/


name="hadoop-2.7.4"
#wget "http://mirrors.tuna.tsinghua.edu.cn/apache/hadoop/common/${name}/${name}.tar.gz" -O ${name}.tar.gz
wget  "http://www-us.apache.org/dist/hadoop/common/hadoop-2.7.4/hadoop-2.7.4.tar.gz"
tar zxvf hadoop-2.7.4.tar.gz
mv hadoop-2.7.4 /opt/hadoop
mv hadoop-2.7.4.tar.gz /opt/tmp/
mkdir /opt/hadoop/tmp
mkdir -p /opt/hadoop/hdfs/namenode
mkdir -p /opt/hadoop/hdfs/datanode
mkdir -p /opt/hadoop/hdfs/namesecondarya


#环境配置
echo -e "
export HADOOP_HOME=/opt/hadoop/
export PATH=\$HADOOP_HOME/bin:\$HADOOP_HOME/sbin:\$PATH
">>/etc/profile
#export HADOOP_COMMON_LIB_NATIVE_DIR=\$HADOOP_HOME/lib/native
#export HADOOP_OPTS=\"-Djava.library.path=\$HADOOP_HOME/lib\"






core=/opt/hadoop/etc/hadoop/core-site.xml
hdfs=/opt/hadoop/etc/hadoop/hdfs-site.xml
map=/opt/hadoop/etc/hadoop/mapred-site.xml
yarn=/opt/hadoop/etc/hadoop/yarn-site.xml
env1=/opt/hadoop/etc/hadoop/hadoop-env.sh
env2=/opt/hadoop/etc/hadoop/yarn-env.sh


master="hk"
standby="hk2"
visitor="hk3"
resourcemanager="hk3"
rsa=/root/.ssh/id_rsa
num=`cat config.json  |jq '.server | length'`

#走hostname
zk=""
journal=""
#zk集群
for((i=0;i<$num;i++))
do
host=`cat config.json  |jq ".server[$i].host"`
ip=`cat config.json  |jq ".server[$i].ip"`

#2181逻辑
if  [ $host == '"'${visitor}'"' ]
then
#        EPS主机
        zk="${zk}${host}:2181"
        journal="${journal}${host}:8485/ns"
else
        zk="${zk}${host}:2181"
        journal="${journal}${host}:8485"
fi


r=`expr $num  - $i `
if [ $r != 1  ]
then
        zk="${zk},"
        journal="${journal};"
fi
done




#*********right*********
#配置core
sed -i "s/<\/configuration>//g"  $core
echo -e "
 <property>
      <name>fs.defaultFS</name>
      <value>hdfs://ns</value>
 </property>
 <property>
      <name>hadoop.tmp.dir</name>
      <value>/opt/hadoop/tmp</value>
 </property>
 <property>
      <name>io.file.buffer.size</name>
      <value>4096</value>
 </property>
 <property>
      <name>ha.zookeeper.quorum</name>
      <value>`echo ${zk} | sed 's/\"//g' `</value>
 </property>
" >>$core
echo "</configuration>" >> $core


#*********right*********
#配置hdfs


#0.0.0.0走这个
sed -i "s/<\/configuration>//g"  $hdfs
echo -e "
 <property>
       <name>dfs.permissions</name>
       <value>false</value>
 </property>
 <property>
       <name>dfs.nameservices</name>
       <value>ns</value>
 </property>
 <property>
       <name>dfs.ha.namenodes.ns</name>
       <value>nn1,nn2</value>
 </property>
 <!-- nn1的RPC通信地址 -->
  <property>
       <name>dfs.namenode.rpc-address.ns.nn1</name>
       <value>${master}:9000</value>
  </property>

  <!-- nn1的http通信地址 -->
  <property>
        <name>dfs.namenode.http-address.ns.nn1</name>
        <value>${master}:50070</value>
  </property>
 <!-- nn2的RPC通信地址 -->
    <property>
        <name>dfs.namenode.rpc-address.ns.nn2</name>
        <value>${standby}:9000</value>
    </property>
 <!-- nn2的http通信地址 -->
    <property>
        <name>dfs.namenode.http-address.ns.nn2</name>
        <value>${standby}:50070</value>
    </property>
    <property>
         <name>dfs.namenode.shared.edits.dir</name>
         <value>qjournal://`echo ${journal} | sed 's/\"//g'`</value>
    </property>
    <!-- 指定JournalNode在本地磁盘存放数据的位置 -->
    <property>
          <name>dfs.journalnode.edits.dir</name>
          <value>/opt/hadoop/hdfs</value>
    </property>
 <!-- 开启NameNode故障时自动切换 -->
    <property>
          <name>dfs.ha.automatic-failover.enabled</name>
          <value>true</value>
    </property>

 <!-- 配置失败自动切换实现方式 -->
    <property>
            <name>dfs.client.failover.proxy.provider.ns</name>
            <value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
    </property>
  <!-- 配置隔离机制 -->
    <property>
             <name>dfs.ha.fencing.methods</name>
             <value>sshfence</value>
    </property>
 <!-- 使用隔离机制时需要ssh免登陆 -->
    <property>
            <name>dfs.ha.fencing.ssh.private-key-files</name>
            <value>${rsa}</value>
    </property>
    <property>
        <name>dfs.namenode.name.dir</name>
        <value>file:///opt/hadoop/hdfs/namenode</value>
    </property>
   <property>
        <name>dfs.datanode.data.dir</name>
        <value>file:///opt/hadoop/hdfs/datanode</value>
    </property>
">>$hdfs
echo "</configuration>" >> $hdfs



#*********right*********
#mapred配置
cp /opt/hadoop/etc/hadoop/mapred-site.xml.template /opt/hadoop/etc/hadoop/mapred-site.xml
sed -i "s/<\/configuration>//g"  $map
echo -e "
 <property>
        <name>mapreduce.framework.name</name>
        <value>yarn</value>
 </property>
 <property>
  <name>yarn.app.mapreduce.am.resource.mb</name>
  <value>200</value>
  <description>The amount of memory the MR AppMaster needs.</description>
</property>
<property>
  <name>mapreduce.reduce.memory.mb</name>
  <value>200</value>
  <description>The amount of memory the MR AppMaster needs.</description>
</property>
<property>
  <name>mapreduce.map.memory.mb</name>
  <value>200</value>
  <description>The amount of memory the MR AppMaster needs.</description>
</property>
">>$map
echo "</configuration>" >> $map




#*********right*********
#yarn配置
sed -i "s/<\/configuration>//g"  $yarn
echo -e "
 <!-- 指定nodemanager启动时加载server的方式为shuffle server -->
    <property>
            <name>yarn.nodemanager.aux-services</name>
            <value>mapreduce_shuffle</value>
     </property>
 <!-- 指定resourcemanager地址 -->
     <property>
            <name>yarn.resourcemanager.hostname</name>
            <value>${resourcemanager}</value>
      </property>
      <property>
    <name>yarn.nodemanager.resource.memory-mb</name>
    <value>1024</value>
  </property>
  <property>
    <name>yarn.nodemanager.resource.cpu-vcores</name>
    <value>1</value>
  </property>
  <property>
    <name>yarn.scheduler.minimum-allocation-mb</name>
    <value>50</value>
  </property>
<property>
  <name>yarn.nodemanager.vmem-check-enabled</name>
  <value>false</value>
  <description>Whether virtual memory limits will be enforced for containers</description>
</property>


">>$yarn
echo "</configuration>" >> $yarn



#*********right*********
#slaves配置
echo -e "hk
hk2
hk3
" >/opt/hadoop/etc/hadoop/slaves



#*********right*********
#JAVA环境更替
sed -i "s/\(JAVA_HOME*=\).*/\1\/usr\/lib\/jdk1.8/g"  $env1
sed -i "s/\(JAVA_HOME*=\).*/\1\/usr\/lib\/jdk1.8/g"  $env2





#1.sbin/hadoop-daemons.sh start journalnode

#2.hdfs zkfc -formatZK

#3.hadoop namenode -format
#3.hadoop datanode -format

#4. sbin/hadoop-daemon.sh start namenode
#5.[hadoop@Mast2 hadoop-2.5.2]$ hdfs namenode -bootstrapStandby
#6.[hadoop@Mast2 hadoop-2.5.2]$ sbin/hadoop-daemon.sh start namenode
#6.[hadoop@Mast2 hadoop-2.5.2]$ sbin/hadoop-daemon.sh start datanode

#7.[hadoop@Mast3 hadoop-2.5.2]$ sbin/start-yarn.sh
#8.[hadoop@Mast1 hadoop-2.5.2]$ sbin/hadoop-daemons.sh start zkfc

#http://www.iteye.com/news/30739

#http://bbs.csdn.net/topics/390976365
#hdfs hdfs haadmin -transitionToActive --forcemanual nn1

#


#http://blog.csdn.net/u010967382/article/details/20380387

#helloworld
#http://jingyan.baidu.com/article/ce09321b7a2e052bff858fd9.html
#
#setting yarn.nodemanager.vmem-check-enabled to false in yarn-site.xml






