#!/bin/bash
set -e
## 检查输入参数
if [ $# -lt 1 ]; then
    echo "$0 错误：请传入参数，(1、服务类型:name/node;2、机器名：name00/node01;3、机器IP)！"
	echo "例如：$0 name name00 127.0.0.1"
	exit 2
fi

##################### 第一部分 初始化安装环境 ########################
echo '查看已安装节点信息'
if [ -f 'hadoop.info'  ];then
	cat hadoop.info
fi
echo '准备安装机器信息'
echo $1    $2    $3
read -p "是否继续(Y/N):" isY
if [ "${isY}" != "y" ] && [ "${isY}" != "Y" ];then
   exit 1
fi
sed -i "/$2/d" hadoop.info
echo "$1    $2    $3" >> hadoop.info
# 在主机上配置免密登陆（root）
#记录当前路径
INITIALDIR=`pwd` 
cd ~/.ssh
if [ "$2" == "name" ];then
	if [ ! -f 'id_dsa.pub' ];then
		ssh-keygen -t dsa -P '' -f id_dsa
		cat id_dsa.pub >> authorized_keys
	fi	
elif [ "$2" == "node" ];then
	scp authorized_keys root@${2}:~/.ssh
fi
#切换回安装目录
cd $INITIALDIR 
# 关闭一些安全限制
##关闭iptables：
service iptables stop
chkconfig iptables off
##关闭selinux： 
setenforce 0
sed -i "s@^SELINUX=enforcing@SELINUX=disabled@g" /etc/sysconfig/selinux

# 时间同步
##主机设置
service ntpd stop
sed -i "/$3/d" /etc/ntp.conf
echo "server $3" >> /etc/ntp.conf
echo "fudge $3 stratum 10" >> /etc/ntp.conf
##重启ntpd：
service ntpd restart

# 修改host
sed -i "/$2/d" /etc/hosts
echo "$3    $2" >> /etc/hosts

##查看hostname
sed -i "/HOSTNAME/d" /etc/sysconfig/network
echo "HOSTNAME=$2" >> /etc/hosts
hostname $2

# 创建hadoop用户以及相关目录
groupadd hadoop
useradd hadoop -g hadoop
echo '123456' | passwd --stdin hadoop

# 创建hadoop相关安装目录
mkdir -p /home/hadoop/src
mkdir -p /home/hadoop/tools
mkdir -p /home/hadoop/logs
chown -R hadoop.hadoop /home/hadoop/*
#定义数据节点存放的路径
#要有足够的空间存放 ————非常重要
#chmod -R 777 /home/data/hadoop
mkdir -p /home/data/hadoop/hdfs
mkdir -p /home/data/hadoop/temp
mkdir -p /home/data/hadoop/name
mkdir -p /home/data/hadoop/data
chown -R hadoop.hadoop /home/data/hadoop

# 配置hadoop用户的免密登录(以hadoop用户执行以下命令)
sudo -u hadoop mkdir -p /home/hadoop/.ssh
cd /home/hadoop/.ssh
sudo -u hadoop ssh-keygen -t dsa -P '' -f id_dsa
sudo -u hadoop cat id_dsa.pub >> authorized_keys


#######################	2.安装hadoop 	#######################
## 读取配置文件
. ../config/my-config.sh
## 重新设置下载链接
HADOOP_URL=$URL_FILE/hadoop
mkdir -p /home/hadoop/src

## 如果文件不存在，则下载
if [ ! -f "$HADOOP_FILE" ]
then
    wget $HADOOP_URL/$HADOOP_FILE
    ## tar -zxvf 解压并列出详情; -C 指定目录
	## tar -zxvf $HADOOP_FILE -C /home/soft
fi
rm -rf $HADOOP_HOME
tar -zxvf $HADOOP_FILE -C /home/hadoop/src

## 创建hadoop相关目录:
## 定义需要数据及目录的存放路径，定义代码及工具存放的路径
mkdir -p /home/hadoop/src
mkdir -p /home/hadoop/tools
chown -R hadoop.hadoop /home/hadoop/*

##定义数据节点存放的路径到跟目录下的hadoop文件夹, 这里是数据节点存放目录需要有足够的空间存放
mkdir -p /home/data/hadoop/hdfs
mkdir -p /home/data/hadoop/temp
mkdir -p /home/data/hadoop/name
mkdir -p /home/data/hadoop/data
mkdir -p /home/logs/hadoop

## 设置可写权限
chmod -R 777 /home/data/hadoop
chown -R hadoop.hadoop /home/data/hadoop/*
chown -R hadoop.hadoop /home/data/hadoop
chown -R hadoop.hadoop /home/logs/hadoop
chown -R hadoop.hadoop /home/data/hadoop/name
chown -R hadoop.hadoop /home/data/hadoop/data


## 备份文件
mkdir -p /home/backup/$HADOOP_VERSION
hadoopCfgDir=/home/hadoop/src/$HADOOP_VERSION/etc/hadoop
cp -rf $hadoopCfgDir/* /home/backup/$HADOOP_VERSION
cp -rf ./$HADOOP_VERSION/* $hadoopCfgDir
## 替换变量
sed -i "s@\${JAVA_HOME}@${JAVA_HOME}@g" $hadoopCfgDir/hadoop-env.sh
sed -i "s@\${JAVA_HOME}@${JAVA_HOME}@g" $hadoopCfgDir/yarn-env.sh
## 写入所有从节点主机名
echo "node01" > $hadoopCfgDir/slaves
echo "node02" >> $hadoopCfgDir/slaves

## 修改core-site.xml配置文件
cat > $hadoopCfgDir/core-site.xml <<END
<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
    <property>
        <name>fs.default.name</name>
        <value>hdfs://master:9000</value>
    </property>
    <property>
        <name>io.file.buffer.size</name>
        <value>131072</value>
    </property>
    <property>
        <name>hadoop.tmp.dir</name>
        <value>/home/data/hadoop/temp</value>
         <description>A base for other temporary directories.</description>
    </property>
    <property>
        <name>hadoop.proxyuser.hduser.hosts</name>
        <value>*</value>
    </property>
        <property>
        <name>hadoop.proxyuser.hduser.groups</name>
        <value>*</value>
    </property>
</configuration>
END

## 修改hdfs-site.xml配置：
cat > $hadoopCfgDir/hdfs-site.xml <<END
<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
    <property>
        <name>dfs.namenode.secondary.http-address</name>
        <value>master:9001</value>
        <description></description>
    </property>
    <property>
        <name>dfs.namenode.name.dir</name>
        <value>file:/home/data/hadoop/name</value>
        <description></description>
    </property>
    <property>
        <name>dfs.datanode.data.dir</name>
        <value>file:/home/data/hadoop/data</value>
        <description></description>
    </property>
    <property>
        <name>dfs.replication</name>
        <value>3</value>
        <description>storage copy number</description>
    </property>
    <property>
        <name>dfs.webhdfs.enabled</name>
        <value>true</value>
        <description></description>
    </property>
</configuration>
END

## 修改mapred-site.xml配置：
cat > $hadoopCfgDir/mapred-site.xml <<END
<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
    <property>
        <name>mapreduce.framework.name</name>
        <value>yarn</value>
        <description></description>
    </property>
    <property>
        <name>mapreduce.jobhistory.address</name>
        <value>master:10020</value>
        <description></description>
    </property>
    <property>
        <name>mapreduce.jobhistory.webapp.address</name>
        <value>master:19888</value>
        <description></description>
    </property>
<!--
    <property>
        <name>mapred.job.tracker</name>
        <value>master:9001</value>
        <description>JobTracker visit path</description>
    </property>
-->
</configuration>
END

## 修改yarn-site.xml配置：
cat > $hadoopCfgDir/yarn-site.xml <<END
<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
    <property>
        <name>yarn.nodemanager.aux-services</name>
        <value>mapreduce_shuffle</value>
        <description></description>
    </property>
    <property>
        <name>yarn.nodemanager.aux-services.mapreduce.shuffle.class</name>
        <value>org.apache.hadoop.mapred.ShuffleHandler</value>
        <description></description>
    </property>
    <property>
        <name>yarn.resourcemanager.address</name>
        <value>master:8032</value>
        <description></description>
    </property>
    <property>
        <name>yarn.resourcemanager.scheduler.address</name>
        <value>master:8030</value>
        <description></description>
    </property>
    <property>
        <name>yarn.resourcemanager.resource-tracker.address</name>
        <value>master:8031</value>
        <description></description>
    </property>
    <property>
        <name>yarn.resourcemanager.admin.address</name>
        <value>master:8033</value>
        <description></description>
    </property>
    <property>
        <name>yarn.resourcemanager.webapp.address</name>
        <value>master:8088</value>
        <description></description>
    </property>
</configuration>
END