#!/bin/bash
 
#卸载组件安装包
yum --setopt=tsflags=noscripts remove -y $(rpm -qa|egrep "ambari|hadoop|hdfs|yarn|ranger|hdp|spark|bigtop|smartsense|zookeeper|mapre|sqoop|flume|storm|tez|slider|hcat|pig|hive|phoenix|zeppelin|oozie|hbase|druid|knox|atlas|kafka|livy|superset|datafu") 
for i in `rpm -qa|egrep "ambari|hadoop|hdfs|yarn|ranger|hdp|spark|bigtop|smartsense|zookeeper|mapre|sqoop|flume|storm|tez|slider|hcat|pig|hive|phoenix|zeppelin|oozie|hbase|druid|knox|atlas|kafka|livy|superset|datafu"`;do rpm      -e      --nodeps $i;done
 
#检查是否还有组件的安装包
 
rpm -qa|egrep "ambari|hadoop|hdfs|yarn|ranger|hdp|spark|bigtop|smartsense|zookeeper|mapre|sqoop|flume|storm|tez|slider|hcat|pig|hive|phoenix|zeppelin|oozie|hbase|druid|knox|atlas|kafka|livy|superset|datafu"
 
if [ ! $? -eq 0 ]; then
    echo "卸载安装包完毕"
fi
 
#清理yum数据源
yum clean all  
 
 
#删除组件的用户和用户组
 
for u in  sentry  phoenix kudu hive zookeeper infra-solr ams tez spark ambari-qa hdfs yarn mapred hbase hcat slider ranger sqoop impala storm flume sqoop2 yarn-ats hive livy kafka accumulo activity_analyzer atlas kms knox zeppelin superset oozie logsearch druid; do sudo userdel $u && rm -rf /home/$u ;done
for g in  hadoop sentry hive spark phoenix  hdfs zookeeper yarn mapred hbase slider ranger ambari-qa sqoop impala storm flume sqoop2 livy hdfs kms zeppelin knox accumulo druid kafka oozie superset; do sudo groupdel $g ; done

 
#有些应用卸载时需要与依赖应用一起卸载
#删除安装路径下的相关文件及目录，大部分在卸载的时候已经自动删除
#找到并删除残留文件及目录，全部卸载完成后删除集群数据文件
#delete install_dir。可根据最后的检查组件安装路径语句，通过ansible执行命令，返回文件结果，然后sort加上uniq筛选出需要删除的路径
rm -rf /hadoop
rm -rf /etc/ambari-agent
rm -rf /etc/ambari-metrics-grafana
rm -rf /etc/ambari-metrics-monitor
rm -rf /etc/ambari-server
rm -rf /etc/ams-hbase
rm -rf /etc/hadoop
rm -rf /etc/hive
rm -rf /etc/hive2
rm -rf /etc/hive-hcatalog
rm -rf /etc/hive-webhcat
rm -rf /etc/hst
rm -rf /etc/pig
rm -rf /etc/ranger
rm -rf /etc/ranger-admin
rm -rf /etc/ranger-kms
rm -rf /etc/ranger-tagsync
rm -rf /etc/ranger-usersync
rm -rf /etc/slider
rm -rf /etc/spark2/
rm -rf /etc/sqoop/
rm -rf /etc/storm-slider-client
rm -rf /etc/tez
rm -rf /etc/tez_hive2
rm -rf /etc/zookeeper
rm -rf /etc/ambari-metrics-collector
rm -rf ambari-infra-solr
rm -rf ambari-logsearch-portal
 
rm -rf /var/run/agent_update.pid
rm -rf /var/run/ambari-agent
rm -rf /var/run/ambari-metrics-collector
rm -rf /var/run/ambari-metrics-monitor
rm -rf /var/run/ams-hbase
rm -rf /var/run/hadoop
rm -rf /var/run/hadoop-hdfs
rm -rf /var/run/hadoop-mapreduce
rm -rf /var/run/hadoop-yarn
rm -rf /var/run/hive
rm -rf /var/run/hive2
rm -rf /var/run/hive-hcatalog
rm -rf /var/run/hst
rm -rf /var/run/ranger   
rm -rf /var/run/spark
rm -rf /var/run/spark2
rm -rf /var/run/sqoop
rm -rf /var/run/webhcat
rm -rf /var/run/zookeeper
 
rm -rf /var/log/ambari-metrics-grafana
rm -rf /var/log/ambari-server
rm -rf /var/log/ambari-agent
rm -rf /var/log/ambari-metrics-collector
rm -rf /var/log/ambari-metrics-monitor
rm -rf /var/log/hadoop
rm -rf /var/log/hadoop-hdfs
rm -rf /var/log/hadoop-mapreduce
rm -rf /var/log/hadoop-yarn
rm -rf /var/log/hive
rm -rf /var/log/hive2
rm -rf /var/log/hive-hcatalog
rm -rf /var/log/hbase
rm -rf /var/log/hst
rm -rf /var/log/ranger 
rm -rf /var/log/spark
rm -rf /var/log/spark2
rm -rf /var/log/spooler*
rm -rf /var/log/sqoop
rm -rf /var/log/webhcat
rm -rf /var/log/zookeeper
rm -rf /var/log/kafka
rm -rf /var/log/knox
rm -rf /var/log/livy2
 
rm -rf /usr/lib/ambari-agent
rm -rf /usr/lib/ambari-metrics-collector
rm -rf /usr/lib/ambari-metrics-hadoop-sink
rm -rf /usr/lib/ambari-metrics-kafka-sink
rm -rf /usr/lib/ams-hbase
rm -rf /usr/lib/flume 
rm -rf /usr/lib/storm
 
rm -rf /var/lib/ambari-agent
rm -rf /var/lib/ambari-metrics-collector
rm -rf /var/lib/hadoop-hdfs
rm -rf /var/lib/hadoop-mapreduce
rm -rf /var/lib/hadoop-yarn
rm -rf /var/lib/hive
rm -rf /var/lib/hive2
rm -rf /var/lib/ranger
rm -rf /var/lib/slider
rm -rf /var/lib/smartsense
rm -rf /var/lib/spark2
rm -rf /var/lib/zookeeper
rm -rf /usr/lib/bigtop-utils

rm -rf /usr/hdp
rm -rf /apps/druid
rm -rf /kafka-logs
 
 
rm -rf /tmp/hives
rm -rf /tmp/ambari-qa
rm -rf /tmp/sqoop-ambari-qa
rm -rf /tmp/hadoop-hive
rm -rf /tmp/hadoop-hcat
rm -rf /tmp/hadoop-ambari-qa
rm -rf /tmp/hadoop-hdfs
rm -rf /tmp/hsperfdata_hbase
rm -rf /tmp/hsperfdata_hive
rm -rf /tmp/hsperfdata_oozie
rm -rf /tmp/hsperfdata_zookeeper
rm -rf /tmp/hsperfdata_mapred
rm -rf /tmp/hsperfdata_hdfs
rm -rf /tmp/hsperfdata_hcat
rm -rf /tmp/hsperfdata_ambari-qa
rm -rf /tmp/hsperfdata_ranger
rm -rf /tmp/hsperfdata_ams
rm -rf /tmp/hsperfdata_yarn
rm -rf /tmp/hadoop-hdfs
rm -rf /tmp/ambari-qa
rm -rf /tmp/hadoop-unjar
rm -rf /tmp/hive
 
rm -rf /usr/lib/python2.6/site-packages/ambari*
 
 
#delete data_dir，根据自己集群实际存储磁盘和路径来删除
rm -rf /data1/hadoop 
rm -rf /data2/hadoop  
rm -rf /data3/hadoop 
rm -rf /data4/hadoop
rm -rf /data1/var 
rm -rf /data2/var  
rm -rf /data3/var 
rm -rf /data4/var
rm -rf /data1/Hlog 
rm -rf /data2/Hlog  
rm -rf /data3/Hlog 
rm -rf /data4/Hlog
rm -rf /data1/blog 
rm -rf /data2/blog  
rm -rf /data3/blog 
rm -rf /data4/blog
 
#delete soft_lianjie。删除/etc/init/和/usr/bin下的启动文件，一般都是软连接
for i in ambari hadoop hdfs yarn ranger hdp spark bigtop smartsense zookeeper mapre sqoop flume storm tez slider hcat pig;do  rm -rf /usr/bin/${i}*;done
for i in ambari hadoop hdfs yarn ranger hdp spark bigtop smartsense zookeeper mapre sqoop flume storm tez slider hcat pig;do  rm -rf /etc/init/${i}*;done
 
 
#检查是否还有组件安装路径残余
for i in `rpm -qa|egrep "ambari|hadoop|hdfs|yarn|ranger|hdp|spark|bigtop|smartsense|zookeeper|mapre|sqoop|flume|storm|tez|slider|hcat|pig|hive|phoenix|zeppelin|oozie|hbase|druid|knox|atlas|kafka|livy|superset|datafu"`;do rpm -ql $i | awk 'BEGIN{FS="/"} {OFS="/";print "",$2,$3,$4}'|uniq | egrep "ambari|hadoop|hdfs|yarn|ranger|hdp|spark|bigtop|smartsense|zookeeper|mapre|sqoop|flume|storm|tez|slider|hcat|pig|hive|phoenix|zeppelin|oozie|hbase|druid|knox|atlas|kafka|livy|superset|datafu" ;done | uniq
 
#如果上条命令返回结果等于0，则表明安装路径没有残余。安装包，程序包，配置文件等等文件基本清理完毕。
if [ $? -eq 0 ]; then
   echo "数据删除完毕"
fi
