###使用前将jdk，hadoop压缩包放到/otp/software目录中
###jtar、htar为jdk和hadoop压缩包文件名。jdir、hdir为解压后的目录名。根据实际情况替换
###执行脚本前需要配置单节点免密登录，即hadoop1免密登录自身

jtar=jdk-8u202-linux-x64.tar.gz
htar=hadoop-3.3.0.tar.gz
jdir=jdk1.8.0_202
hdir=hadoop-3.3.0

echo "stop-all.sh  and remove all data---"
stop-all.sh
rm -rf /opt/tmp
rm -rf /opt/software/$jdir
rm -rf /opt/software/$hdir


echo "--install jdk and hadoop--"
cd  /opt/software
tar  zxf  $jtar
tar  zxf  $htar

echo "modify  /etc/profile---"

sed -i '70,$ {/export/d}' /etc/profile

echo   "export  JAVA_HOME=/opt/software/$jdir">>/etc/profile
echo   "export  HADOOP_HOME=/opt/software/$hdir">>/etc/profile
echo   'export  PATH=$PATH:$JAVA_HOME/bin:$HADOOP_HOME/bin:$HADOOP_HOME/sbin'>>/etc/profile
source /etc/profile

echo   "modify hadoop-env.sh,core-site.xml---"
cd  $HADOOP_HOME/etc/hadoop/
sed  -i  "/<configuration>/a <property>\n<name>fs.defaultFS</name>\n<value>hdfs://hadoop1:9000</value>\n</property>\n<property>\n<name>hadoop.tmp.dir</name>\n<value>/opt/tmp</value>\n</property><property><name>hadoop.http.staticuser.user</name><value>root</value></property>\n<property><name>hadoop.proxyuser.root.hosts</name><value>*</value></property><property><name>hadoop.proxyuser.root.groups</name><value>*</value></property>"    core-site.xml

sed  -i  "/<configuration>/a <property>\n<name>dfs.replication</name>\n<value>1</value>\n</property>\n<property>\n<name>dfs.permissions</name>\n<value>false</value>\n</property>"    hdfs-site.xml

sed  -i  "/<configuration>/a <property>\n<name>mapreduce.framework.name</name>\n<value>yarn</value>\n</property><property><name>yarn.app.mapreduce.am.env</name><value>HADOOP_MAPRED_HOME=${HADOOP_HOME}</value></property><property><name>mapreduce.map.env</name><value>HADOOP_MAPRED_HOME=${HADOOP_HOME}</value></property><property><name>mapreduce.reduce.env</name><value>HADOOP_MAPRED_HOME=${HADOOP_HOME}</value></property>" mapred-site.xml

sed  -i  "/<configuration>/a <property>\n<name>yarn.resourcemanager.hostname</name>\n<value>hadoop1</value>\n</property>\n<property>\n<name>yarn.nodemanager.aux-services</name>\n<value>mapreduce_shuffle</value>\n</property>"    yarn-site.xml

sed  -i   "1a  export  JAVA_HOME=/opt/software/$jdir\nexport  HDFS_NAMENODE_USER=root\nexport  HDFS_DATANODE_USER=root\nexport  HDFS_SECONDARYNAMENODE_USER=root\nexport  YARN_RESOURCEMANAGER_USER=root\nexport  YARN_NODEMANAGER_USER=root"   hadoop-env.sh

echo "hadoop1">workers

echo "format hadoop----"
hdfs namenode -format

echo "start  hadoop----"
start-all.sh
