#!/bin/bash

info() {
  echo -e "\033[42;30m$1\033[0m"
}

warn() {
  echo -e "\033[44;30m$1\033[0m"
}

error() {
  echo -e "\033[41;30m$1\033[0m"
}

command_exists() {
  if [[ -z $(command -v $1) ]]; then
    info "$1已正确安装"
  else
    error "请先安装$1" && exit 11
  fi
}

path_exists() {
  if [[ -e $1 ]]; then
    info "$1路径检查通过"
  else
    error "$1路径不存在" && exit 12
  fi
}

# ============================================ Start ============================================
set -e

# 安装工具软件 # dialog axel lsof telnet

# 添加 cloudera repos
warn "添加 cloudera repos"
wget http://archive.cloudera.com/cdh5/redhat/6/x86_64/cdh/cloudera-cdh5.repo
mv cloudera-cdh5.repo /etc/yum.repos.d/cloudera-cdh5.repo
rpm --import http://archive.cloudera.com/cdh5/redhat/6/x86_64/cdh/RPM-GPG-KEY-cloudera

# 安装 hadoop
# clean
# warn "安装 hadoop"
# yum install -y \
#   hadoop-hdfs-namenode \
#   hadoop-hdfs-secondarynamenode \
#   hadoop-hdfs-datanode \
#   hadoop-mapreduce \
#   hadoop-mapreduce-historyserver \
#   hadoop-yarn-resourcemanager \
#   hadoop-yarn-nodemanager \
#   hadoop-yarn-proxyserver \
#   hadoop-client \
#   hadoop-conf-pseudo

# yum clean all -y

# 安装 Impala
# yum install -y impala impala-server impala-state-store impala-catalog impala-shell

# Moving Security file to perform action as hdfs user /etc/security/limits.d/hdfs.conf
# warn "移动安全文件"
# path_exists /etc/security/limits.d/hdfs.conf
# path_exists /etc/security/limits.d/mapreduce.conf
# path_exists /etc/security/limits.d/yarn.conf
# mv /etc/security/limits.d/hdfs.conf ~/
# mv /etc/security/limits.d/mapreduce.conf ~/
# mv /etc/security/limits.d/yarn.conf ~/

warn "格式化 NameNode"
su - hdfs -c 'hdfs namenode -format'

warn "Start HDFS"
command_exists hdfs
# bash -c 'for x in `cd /etc/init.d ; ls hadoop-hdfs-*` ; do service $x start ; done'
bash -c 'for x in namenode datanode ; do hdfs /usr/lib/hadoop/sbin/hadoop-daemon.sh start $x ; done'

warn "创建Hadoop流程所需的目录"
path_exists /usr/lib/hadoop/libexec/init-hdfs.sh
/usr/lib/hadoop/libexec/init-hdfs.sh

warn "验证HDFS文件结构"
su - hdfs -c 'hdfs dfs -ls -R /'

warn "启动 Yarn"
command_exists service
bash -c 'for x in resourcemanager nodemanager; do yarn --daemon start $x; done'
# /usr/lib/hadoop-mapred/sbin/mapred-history-daemon.sh start $x; done
# service hadoop-yarn-resourcemanager start
# service hadoop-yarn-nodemanager start
# service hadoop-mapreduce-historyserver start

warn "创建用户目录"

su - hdfs -c 'hdfs dfs -chmod a+w /'
su - hdfs -c 'hdfs dfs -mkdir -p /user/hadoop'
su - hdfs -c 'hdfs dfs -chmod a+w /user'
su - hdfs -c 'hdfs dfs -chown hadoop /user/hadoop'

hdfs dfs -chmod g+w /tmp
hdfs dfs -mkdir -p /user/hive/warehouse
hdfs dfs -chmod g+w /user/hive/warehouse

#Satish: Changing warehouse permissions
hdfs dfs -chmod -R a+w /user/hive/warehouse
hdfs dfs -chmod -R a+w /user/hive/warehouse/*

# Adding Hbase dir
su - hdfs -c 'hdfs dfs -mkdir /hbase'
su - hdfs -c 'hdfs dfs -chown hbase /hbase'
su - hdfs -c 'hdfs dfs -chmod a+w /hbase'

# warn "⑬ 将安全文件移回其位置"
# mv ~/hdfs.conf /etc/security/limits.d/
# mv ~/mapreduce.conf /etc/security/limits.d/
# mv ~/yarn.conf /etc/security/limits.d/

warn "安装Cloudera组件"
#Satish: Added zookeeper
# yum install -y zookeeper zookeeper-server hive hbase hbase-thrift hbase-master spark-core spark-master spark-worker spark-history-server spark-python hue hue-server pig oozie oozie-client

warn "启动Oozie数据库"
oozie-setup db create -run

#Create HUE Secret Key
warn "⑯ 创建HUE秘密密钥"
path_exists /etc/hue/conf/hue.ini
sed -i 's/secret_key=/secret_key=_S@s+D=h;B,s$C%k#H!dMjPmEsSaJR/g' /etc/hue/conf/hue.ini

warn "开始安装 oh-my-zsh"
path_exists /tmp/omzsh-install.sh
/tmp/omzsh-install.sh
