#!/bin/bash
set -e
## 检查输入参数
if [ $# -lt 1 ]; then
    echo "$0 错误：请传入参数，(1、服务类型:master/slave)！"
	echo "例如：$0 master"
	exit 2
fi

#######################	2.安装hadoop 	#######################
## 读取配置文件
. ../config/my-config.sh
## 重新设置下载链接
HADOOP_URL=$URL_FILE/hadoop
mkdir -p /home/hadoop/src

## 如果文件不存在，则下载
if [ ! -f "$HADOOP_FILE" ]
then
    wget $HADOOP_URL/$HADOOP_FILE
    ## tar -zxvf 解压并列出详情; -C 指定目录
	## tar -zxvf $HADOOP_FILE -C /home/soft
fi
rm -rf $HADOOP_HOME
tar -zxvf $HADOOP_FILE -C /home/hadoop/src
##写入环境变量 
HADOOP_PROFILE=$SYS_PROFILE_DIR/hadoop.sh

## 块输入，转义字符"\"
cat > $HADOOP_PROFILE <<END
HADOOP_HOME=$HADOOP_HOME
PATH=/usr/lib64/qt-3.3/bin:/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin
PATH=\$PATH:\$HADOOP_HOME/bin
export HADOOP_COMMON_LIB_NATIVE_DIR=${HADOOP_HOME}/lib/native  
export HADOOP_OPTS="-Djava.library.path=${HADOOP_HOME}/lib" 
export HADOOP_HOME PATH
END

##修改文件权限
chmod 755 $HADOOP_PROFILE
##刷新环境变量
source /etc/profile

## 创建hadoop相关目录:
## 定义需要数据及目录的存放路径，定义代码及工具存放的路径
mkdir -p /home/hadoop/src
mkdir -p /home/hadoop/tools
chown -R hadoop.hadoop /home/hadoop/*

##定义数据节点存放的路径到跟目录下的hadoop文件夹, 这里是数据节点存放目录需要有足够的空间存放
mkdir -p /home/data/hadoop/hdfs
mkdir -p /home/data/hadoop/temp
mkdir -p /home/data/hadoop/name
mkdir -p /home/data/hadoop/data
mkdir -p /home/logs/hadoop

## 设置可写权限
chmod -R 777 /home/data/hadoop
chown -R hadoop.hadoop /home/data/hadoop/*
chown -R hadoop.hadoop /home/data/hadoop
chown -R hadoop.hadoop /home/logs/hadoop
chown -R hadoop.hadoop /home/data/hadoop/name
chown -R hadoop.hadoop /home/data/hadoop/data


## 备份文件
mkdir -p /home/backup/$HADOOP_VERSION
hadoopCfgDir=/home/hadoop/src/$HADOOP_VERSION/etc/hadoop
cp -rf $hadoopCfgDir/* /home/backup/$HADOOP_VERSION
cp -rf ./$HADOOP_VERSION/* $hadoopCfgDir
## 替换变量
sed -i "s@\${JAVA_HOME}@${JAVA_HOME}@g" $hadoopCfgDir/hadoop-env.sh
sed -i "s@\${JAVA_HOME}@${JAVA_HOME}@g" $hadoopCfgDir/yarn-env.sh
## 写入所有从节点主机名
echo "node01" > $hadoopCfgDir/slaves
echo "node02" >> $hadoopCfgDir/slaves

## 修改core-site.xml配置文件
cat > $hadoopCfgDir/core-site.xml <<END
<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
    <property>
        <name>fs.default.name</name>
        <value>hdfs://master:9000</value>
    </property>
    <property>
        <name>io.file.buffer.size</name>
        <value>131072</value>
    </property>
    <property>
        <name>hadoop.tmp.dir</name>
        <value>/home/data/hadoop/temp</value>
         <description>A base for other temporary directories.</description>
    </property>
    <property>
        <name>hadoop.proxyuser.hduser.hosts</name>
        <value>*</value>
    </property>
        <property>
        <name>hadoop.proxyuser.hduser.groups</name>
        <value>*</value>
    </property>
</configuration>
END

## 修改hdfs-site.xml配置：
cat > $hadoopCfgDir/hdfs-site.xml <<END
<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
    <property>
        <name>dfs.namenode.secondary.http-address</name>
        <value>master:9001</value>
        <description></description>
    </property>
    <property>
        <name>dfs.namenode.name.dir</name>
        <value>file:/home/data/hadoop/name</value>
        <description></description>
    </property>
    <property>
        <name>dfs.datanode.data.dir</name>
        <value>file:/home/data/hadoop/data</value>
        <description></description>
    </property>
    <property>
        <name>dfs.replication</name>
        <value>3</value>
        <description>storage copy number</description>
    </property>
    <property>
        <name>dfs.webhdfs.enabled</name>
        <value>true</value>
        <description></description>
    </property>
</configuration>
END

## 修改mapred-site.xml配置：
cat > $hadoopCfgDir/mapred-site.xml <<END
<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
    <property>
        <name>mapreduce.framework.name</name>
        <value>yarn</value>
        <description></description>
    </property>
    <property>
        <name>mapreduce.jobhistory.address</name>
        <value>master:10020</value>
        <description></description>
    </property>
    <property>
        <name>mapreduce.jobhistory.webapp.address</name>
        <value>master:19888</value>
        <description></description>
    </property>
<!--
    <property>
        <name>mapred.job.tracker</name>
        <value>master:9001</value>
        <description>JobTracker visit path</description>
    </property>
-->
</configuration>
END

## 修改yarn-site.xml配置：
cat > $hadoopCfgDir/yarn-site.xml <<END
<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
    <property>
        <name>yarn.nodemanager.aux-services</name>
        <value>mapreduce_shuffle</value>
        <description></description>
    </property>
    <property>
        <name>yarn.nodemanager.aux-services.mapreduce.shuffle.class</name>
        <value>org.apache.hadoop.mapred.ShuffleHandler</value>
        <description></description>
    </property>
    <property>
        <name>yarn.resourcemanager.address</name>
        <value>master:8032</value>
        <description></description>
    </property>
    <property>
        <name>yarn.resourcemanager.scheduler.address</name>
        <value>master:8030</value>
        <description></description>
    </property>
    <property>
        <name>yarn.resourcemanager.resource-tracker.address</name>
        <value>master:8031</value>
        <description></description>
    </property>
    <property>
        <name>yarn.resourcemanager.admin.address</name>
        <value>master:8033</value>
        <description></description>
    </property>
    <property>
        <name>yarn.resourcemanager.webapp.address</name>
        <value>master:8088</value>
        <description></description>
    </property>
</configuration>
END