#!/usr/bin/env bash

# This script is used for setup k8s+calico all configurations in multi-nodes, one or more master(s)
#
# FileName     : deploy-k8s.sh
# Path         : ~/bin/
# Author       : ymliu
# Create Date  : 2021-05-31 17:10
# WorkFlow     : To clear installation information & setup k8s with calico.
#                per service install on each hosts & startup service.
#
# History      : 2021-05-31 copy from old install-k8s-service.sh to be modified

# ------ function declaration ---------------------------------------

# show all variables
function show_vars()
{
  function showmap()
  {
    local -r mapvar=$1
    local -i count=0
    for key in $(eval echo \${!${mapvar}[@]}); do
      if [ ${count} -gt 0 ]; then
        echo -n ", "
      fi
      echo -n "{key:${key}, value:$(eval echo \${${mapvar}[${key}]})}"

      count=$(( count + 1 ))
    done
  }

  echo "-------------------------------------------------------------"
  echo "Show declare environment variables"
  echo "-------------------------------------------------------------"

  echo "USER=${USER}"
  echo "GROUP=${GROUP}"
  echo ""
  echo "MASTERS=($(showmap MASTERS))"
  echo "WORKERS=($(showmap WORKERS))"
  echo "NODES=($(showmap NODES))"
  echo "EXEC_NODE=${EXEC_NODE}"
  echo ""
  echo "DEFAULT_VIP=${DEFAULT_VIP}"
  echo "MASTER_COUNT=${MASTER_COUNT}"
  echo "APISERVER_IP=${APISERVER_IP}"
  echo "APISERVER_PORT=${APISERVER_PORT}"
  echo ""
  echo "APP_DIR=${APP_DIR}"
  echo "SSL_DIR=${SSL_DIR}"
  echo "RPM_DIR=${RPM_DIR}"
  echo "SHELL_DIR=${SHELL_DIR}"
  echo "TEMPLATE_DIR=${TEMPLATE_DIR}"
  echo "SRC_TMPDIR=${SRC_TMPDIR}"
  echo "TMP_DIR=${TMP_DIR}"
  echo ""
  echo "CLUSTER_IP_SEGMENT=${CLUSTER_IP_SEGMENT}.0"
  echo "POD_IP_SEGMENT=${POD_IP_SEGMENT}.0"
  echo ""
  echo "SERVICE_DIR=${SERVICE_DIR}"
  echo "K8S_BASE_DIR=${K8S_BASE_DIR}"
  echo "K8S_BIN_DIR=${K8S_BIN_DIR}"
  echo "K8S_CONF_DIR=${K8S_CONF_DIR}"
  echo "K8S_YAML_DIR=${K8S_YAML_DIR}"
  echo "K8S_TOKEN_DIR=${K8S_TOKEN_DIR}"
  echo ""
  echo "KUBECTL=${KUBECTL}"
  echo "ETCD_VER=${ETCD_VER}"
  echo "DOCKER_HUB=${DOCKER_HUB}"
  echo "NETWORK_CARD=${NETWORK_CARD}"
  echo ""
  echo "ETCD_CLUSTER=${ETCD_CLUSTER}"
  echo "ETCD_ENDPOINTS=${ETCD_ENDPOINTS}"
  echo ""
  echo "MAP_DEPLOY=($(showmap MAP_DEPLOY))"
  echo "MAP_DEPLOY=($(showmap MAP_UNDEPLOY))"

  echo "------ Ending of show environment variables -----------------"
  echo ""
}

# combine masters & workers all to nodes
function build_nodes()
{
  for key in ${!MASTERS[@]}; do
    NODES[${key}]=${MASTERS[${key}]}
  done

  for key in ${!WORKERS[@]}; do
    NODES[${key}]=${WORKERS[${key}]}
  done
}

# build apiserver ip, vip for multi-master, master ip for single master
function build_apiserver_ip()
{
  if [ ${MASTER_COUNT} -eq 1 ]; then
    for host in ${!MASTERS[@]}; do
      # only return first value
      echo ${MASTERS[${host}]}
      break
    done
  else
    echo ${DEFAULT_VIP}
  fi
}

# port 6443 for single master, 8443 for multi-master on vip
function build_apiserver_port()
{
  if [ ${MASTER_COUNT} -eq 1 ]; then
    echo "6443"
  else
    echo "8443"
  fi
}

# combine etcd initialCluster string
function build_etcd_initialCluster()
{
  # return host1=https://ip1:2380,host2=https://ip2:2380...

  local STR=""
  local -i count=0
  local -r PORT="2380"

  for host in ${!MASTERS[@]}; do
    count=$(( count + 1 ))
    if [ ${count} -gt 1 ]; then
      STR="${STR},"
    fi
    STR="${STR}${host}=https://${MASTERS[${host}]}:${PORT}"
  done

  echo ${STR}
}

# combine etcd endpoints string
function build_etcd_endpoints()
{
  # return https://ip1:2379,https://ip2:2379...

  local STR=""
  local -i count=0
  local -r PORT="2379"

  for host in ${!MASTERS[@]}; do
    count=$(( count + 1 ))
    if [ ${count} -gt 1 ]; then
      STR="${STR},"
    fi
    STR="${STR}https://${MASTERS[${host}]}:${PORT}"
  done

  echo ${STR}
}

# valid exec node, current node must be one of the masters
function valid_execnode()
{
  local -r LOCALHOST=$(hostname -s)
  local ret="false"
  for host in ${!MASTERS[@]}; do
    if [ "x${host}x" == "x${LOCALHOST}x" ]; then
      ret="true"
      break;
    fi
  done

  if [ "${ret}" == "false" ]; then
    echo "NOTICE IMPORTANT: The current node MUST be one of master nodes."
    echo "                  All k8s cli running on current node."
    exit 1
  fi
}

# process shell scripts' arguments, decide to run deploy / undeploy
function parse_shell_args()
{
  function init_deploy()
  {
    local -r flag=$1
    for c in ${COMPONENTS[@]}; do
      MAP_DEPLOY[${c}]="${flag}"
    done
  }
  function init_undeploy()
  {
    local -r flag=$1
    for c in ${COMPONENTS[@]}; do
      MAP_UNDEPLOY[${c}]="${flag}"
    done
  }
  function showUsage()
  {
    echo "Usage: Deploy or undeploy kubernetes components and addons, all or specified."
    echo ""
    echo "       deploy-k8s.sh <command> [components...]"
    echo ""
    echo "       command, only one of below:"
    echo "       -a,  --all      : Deploy all components besides additional components."
    echo "       -c,  --clean    : Undeploy (clean) all components, including pacakges, files, addons..."
    echo "       -i,  --deploy   : Deploy components specified follow, must specify one or more."
    echo "       -u,  --undeploy : Undeploy components specified follow, must specify one or more."
    echo ""
    echo -n "       components defined in sequence:"
    for c in ${COMPONENTS[@]}; do
      echo -n " ${c}"
    done
    echo ""
    echo ""
    echo -n "       Which storage deployed is defined by the variable DEFAULT_STORAGE. Here is the ${DEFAULT_STORAGE}. You can specify among of them:"
    for c in ${STORAGE_COMPONENTS[@]}; do
      echo -n " ${c}"
    done
    echo ""

    exit 1
  }

  local -r OPT_ARGS=$(getopt -o :aciu --long all,clean,deploy,undeploy -- "$@")
  eval set -- ${OPT_ARGS}

  local FLAG="ERROR" # CONTINUE | BREAK | ERROR
  local CMD="UNSET" # UNSET | ALL | DEPLOY | UNDEPLOY | CLEAN
  init_deploy false
  init_undeploy false
  while [ -n "$1" ]; do
    case "$1" in
      -a|--all)
        if [ ${CMD} == "UNSET" ]; then
          CMD="ALL"
          FLAG="CONTINUE"
          init_deploy true
        fi
        ;;
      -c|--clean)
        if [ ${CMD} == "UNSET" ] ; then
          CMD="CLEAN"
          FLAG="CONTINUE"
          init_undeploy true
        fi
        ;;
      -i|--deploy)
        if [ ${CMD} == "UNSET" ]; then
          CMD="DEPLOY"
          FLAG="CONTINUE"
        fi
        ;;
      -u|--undeploy)
        if [ ${CMD} == "UNSET" ]; then
          CMD="UNDEPLOY"
          FLAG="CONTINUE"
        fi
        ;;
      --)
        ;;
      *)
        if [[ -n "${MAP_DEPLOY[$1]}" && "${CMD}" == "DEPLOY" ]]; then
          MAP_DEPLOY[$1]="true"
          FLAG="CONTINUE"
        elif [[ -n "${MAP_UNDEPLOY[$1]}" && "${CMD}" == "UNDEPLOY" ]]; then
          MAP_UNDEPLOY[$1]="true"
          FLAG="CONTINUE"
        else
          FLAG="ERROR"
        fi
    esac

    # FLAG == "continue"
    if [ ${FLAG} == "ERROR" ]; then
      break
    else
      shift
    fi
  done

  if [ ${FLAG} == "ERROR" ]; then
    showUsage
  fi
}

# deploy mode: initial directories declare alias, only run once
function deploy_init()
{
  [ "${MAP_DEPLOY["init"]}" != "true" ] && return

  echo "-------------------------------------------------------------"
  echo "Initializing directories, declares, alias, etc..."
  echo "-------------------------------------------------------------"

  # modify local .bashrc
  echo -n "Modify .bashrc to add declares, alias, etc..."
  cp -f ${HOME}/.bashrc ${SRC_TMPDIR}/
  local SED=""
  SED="/^# export SYSTEMD_PAGER/adeclare -a MASTERS=(${!MASTERS[@]})"
  sed -i "${SED}" ${SRC_TMPDIR}/.bashrc
  SED="/^declare -a MASTERS/adeclare -a NODES=(${!NODES[@]})"
  sed -i "${SED}" ${SRC_TMPDIR}/.bashrc
  sed -i "/^alias logs=.*$/aalias masterExec='_f() { for host in \${MASTERS[@]}; do echo \"Executing in host: \${host}\"; ssh \${USER}@\${host} \"\$@\"; echo ''; done; }; _f'" ${SRC_TMPDIR}/.bashrc
  sed -i "/^alias masterExec=.*$/aalias nodeExec='_f() { for host in \${NODES[@]}; do echo \"Executing in host: \${host}\"; ssh \${USER}@\${host} \"\$@\"; echo ''; done; }; _f'" ${SRC_TMPDIR}/.bashrc
  echo "ok"
  echo ""

  # copy .bashrc to all nodes & mkdir
  for host in ${!NODES[@]}; do
    echo -n "Running on ${host}(${NODES[${host}]}): Initializing directories, declare, alias, etc..."
    cp -Rf ${SRC_TMPDIR}/.bashrc ${TMP_DIR}/ >/dev/null 2>&1
    mv -f ${TMP_DIR}/.bashrc ${HOME}/.bashrc
    echo "ok"
  done
  echo ""

  echo "------ Ending of initializing -------------------------------"
  echo ""
}

# deploy mode: create CA
function deploy_ca()
{
  [ "${MAP_DEPLOY["ca"]}" != "true" ] && return
  echo "-------------------------------------------------------------"
  echo "Deploying CA files..."
  echo "-------------------------------------------------------------"

  # copy cfssl packages
  echo -n "Running on localhost: Copying cfssl packages..."
  local -r SRC_DIR="${BASE_SRC_DIR}/cfssl"
  local -ra CFSSL_FILES=("cfssl" "cfssl-certinfo" "cfssljson")
  mkdir -p ${SSL_DIR}
  for file in ${CFSSL_FILES[@]}; do
    cp -f "${SRC_DIR}/${file}_linux-amd64" "${SSL_DIR}/${file}"
  done
  echo "ok"

  # creating ca
  echo -n "Building CA cert & key files..."
  cp -f ${TEMPLATE_DIR}/ca/ca-{config,csr}.json ${SRC_TMPDIR}
  chown root:root ${SRC_TMPDIR}/ca-{config,csr}.json
  pushd ${SRC_TMPDIR} >/dev/null 2>&1
  (${SSL_DIR}/cfssl gencert -initca ca-csr.json | ${SSL_DIR}/cfssljson -bare ca) #>/dev/null 2>&1
  popd >/dev/null 2>&1
  cp -f ${SRC_TMPDIR}/ca-config.json ${SSL_DIR}
  echo "ok"
  echo ""

  # dispatch ca cert & key files
  chmod +r ${SRC_TMPDIR}/ca-key.pem
  for host in ${!NODES[@]}; do
    echo -n "Running on ${host}(${NODES[${host}]}): Copying ca files..."
    cp -Rf ${SRC_TMPDIR}/ca*.pem ${TMP_DIR} >/dev/null 2>&1
    mkdir -p ${SSL_DIR}
    chown root:root ${TMP_DIR}/ca*.pem
    chmod 600 ${TMP_DIR}/ca-key.pem
    cp -f ${TMP_DIR}/ca*.pem ${SSL_DIR}
    echo "ok"
  done
  echo ""

  echo "------ Ending of CA deployment-------------------------------"
  echo ""
}

function deploy_etcd()
{
  [ "${MAP_DEPLOY["etcd"]}" != "true" ] && return
  echo "-------------------------------------------------------------"
  echo "Deploying etcd service..."
  echo "-------------------------------------------------------------"

  # etcd.sh
  echo -n "Building etcd.sh ..."
  cp -f ${TEMPLATE_DIR}/conf/etcd.sh ${SRC_TMPDIR}
  sed -i "s#\${ETCD_ENDPOINTS}#\"${ETCD_ENDPOINTS}\"#g" ${SRC_TMPDIR}/etcd.sh
  echo "ok"

  # etcd cert & key
  echo -n "Building etcd cert & key files..."
  cp -f ${TEMPLATE_DIR}/etcd/etcd-csr.json ${SRC_TMPDIR}
  for host in ${!MASTERS[@]}; do
    sed -i "/\"\${ip list}\"/i \ \ \ \ \ \ \ \ \"${MASTERS[${host}]}\" ," ${SRC_TMPDIR}/etcd-csr.json
    sed -i "/\"\${hostname list}\"/i \ \ \ \ \ \ \ \ \"${host}\" ," ${SRC_TMPDIR}/etcd-csr.json
  done
  sed -i "/\"\${ip list}\"/d;/\"\${hostname list}\"/d" ${SRC_TMPDIR}/etcd-csr.json
  chown root:root ${SRC_TMPDIR}/etcd-csr.json

  pushd ${SRC_TMPDIR} >/dev/null 2>&1
  (${SSL_DIR}/cfssl gencert -ca=${SSL_DIR}/ca.pem -ca-key=${SSL_DIR}/ca-key.pem -config=${SSL_DIR}/ca-config.json -profile=kubernetes etcd-csr.json | ${SSL_DIR}/cfssljson -bare etcd) >/dev/null 2>&1
  popd >/dev/null 2>&1
  chmod +r ${SRC_TMPDIR}/etcd-key.pem
  echo "ok"
  echo ""

  # dispatch files upon to all nodes
  # ATTENTION: ALL NODES
  for host in ${!NODES[@]}; do
    echo -n "Running on ${host}(${NODES[${host}]}): Copying etcd profile, cert & key files..."
    cp -Rf ${SRC_TMPDIR}/etcd.sh ${SRC_TMPDIR}/etcd*.pem ${TMP_DIR} >/dev/null 2>&1
    mkdir -p ${APP_DIR}/etcd
    chmod 700 ${APP_DIR}/etcd
    chown root:root ${TMP_DIR}/etcd.sh ${TMP_DIR}/etcd*.pem
    chmod 600 ${TMP_DIR}/etcd-key.pem
    cp -f ${TMP_DIR}/etcd.sh /etc/profile.d/
    cp -f ${TMP_DIR}/etcd*.pem ${SSL_DIR}
    echo "ok"
  done
  echo ""

  # etcd service
  echo -n "Building etcd service & conf files..."
  cp -f ${TEMPLATE_DIR}/etcd/etcd.service ${SRC_TMPDIR}
  sed -i "s#\${APP_DIR}#${APP_DIR}#g" ${SRC_TMPDIR}/etcd.service
  sed -i "s#\${K8S_BIN_DIR}#${K8S_BIN_DIR}#g" ${SRC_TMPDIR}/etcd.service

  cp -f ${TEMPLATE_DIR}/etcd/etcd.conf ${SRC_TMPDIR}
  sed -i "s#\${APP_DIR}#${APP_DIR}#g" ${SRC_TMPDIR}/etcd.conf
  sed -i "s#\${ETCD_CLUSTER}#${ETCD_CLUSTER}#g" ${SRC_TMPDIR}/etcd.conf
  sed -i "s#\${SSL_DIR}#${SSL_DIR}#g" ${SRC_TMPDIR}/etcd.conf
  echo "ok"
  echo ""

  # install etcd package & dispatch files
  local -r ETCD_SRC_DIR="${BASE_SRC_DIR}/k8s"
  local -r ETCD_DEST_DIR="etcd-v${ETCD_VER}-linux-amd64"
  local -r ETCD_PKG="${ETCD_DEST_DIR}.tar.gz"
  for host in ${!MASTERS[@]}; do
    echo -n "Running on ${host}(${MASTERS[${host}]}): Copying etcd service & conf files, installing packages..."
    cp -Rf ${SRC_TMPDIR}/etcd.{service,conf} ${TMP_DIR} >/dev/null 2>&1
    tar -xzf ${ETCD_SRC_DIR}/${ETCD_PKG} -C /opt/
    chown root:root -R /opt/${ETCD_DEST_DIR}
    ln -sf /opt/${ETCD_DEST_DIR} /opt/etcd
    sed -i "s#\${hostname}#${host}#g" ${TMP_DIR}/etcd.conf
    sed -i "s#\${ip}#${MASTERS[${host}]}#g" ${TMP_DIR}/etcd.conf
    chown root:root ${TMP_DIR}/etcd.{service,conf}
    cp -f ${TMP_DIR}/etcd.service ${SERVICE_DIR}
    cp -f ${TMP_DIR}/etcd.conf /opt/etcd/
    echo "ok"
  done
  echo ""

  # start etcd service
  for host in ${!MASTERS[@]}; do
    echo -n "Running on ${host}(${MASTERS[${host}]}): Starting etcd service..."
    systemctl daemon-reload
    systemctl enabled etcd >/dev/null 2>&1
    nohup systemctl start etcd >/dev/null 2>&1 &
    echo "ok"
  done
  echo ""
  sleep 2s # wait 2 seconds for etcd cluster be ready

  echo "------ Ending of etcd deployment-----------------------------"
  echo ""
}

function deploy_ha()
{
  [ "${MAP_DEPLOY["ha"]}" != "true" ] && return
  [ ${MASTER_COUNT} -eq 0 ] && return

  echo "-------------------------------------------------------------"
  echo "Deploying HA services (haproxy & keepalived)..."
  echo "-------------------------------------------------------------"

  # building haproxy.cfg
  echo -n "Building haproxy.cfg..."
  cp -f ${TEMPLATE_DIR}/haproxy/haproxy.cfg ${SRC_TMPDIR}
  sed -i "s#\${APP_DIR}#${APP_DIR}\/haproxy#g" ${SRC_TMPDIR}/haproxy.cfg
  sed -i "s#\${APISERVER_PORT}#${APISERVER_PORT}#g" ${SRC_TMPDIR}/haproxy.cfg
  for host in ${!MASTERS[@]}; do
    sed -i "/\${server list}$/a \ \ \ \ server  ${host}  ${MASTERS[${host}]}:6443  check  inter  2000  fall 2  rise 2 weight 1" ${SRC_TMPDIR}/haproxy.cfg
  done
  sed -i "/\${server list}$/d" ${SRC_TMPDIR}/haproxy.cfg
  echo "ok"
  echo ""

  # dispatch & building keepalived.conf
  local -i num=0
  local -i priority=0
  for host in ${!MASTERS[@]}; do
    echo -n "Running on ${host}(${MASTERS[${host}]}): Install haproxy & keepalived, copy & modify conf files..."
    cp -Rf ${SRC_TMPDIR}/haproxy.cfg ${TEMPLATE_DIR}/keepalived/keepalived.conf ${TMP_DIR}
    # install packages
    yum install -y ${RPM_DIR}/haproxy-1.5.18-9.el7_9.1.x86_64.rpm \
      ${RPM_DIR}/psmisc-22.20-17.el7.x86_64.rpm \
      ${RPM_DIR}/lm_sensors-libs-3.4.0-8.20160601gitf9185e5.el7.x86_64.rpm \
      ${RPM_DIR}/net-snmp-agent-libs-5.7.2-49.el7_9.2.x86_64.rpm \
      ${RPM_DIR}/net-snmp-libs-5.7.2-49.el7_9.2.x86_64.rpm
    mkdir -p ${APP_DIR}/haproxy
    chown -R haproxy:haproxy ${APP_DIR}/haproxy

    # copy files to dest
    chown root:root ${TMP_DIR}/haproxy.cfg
    cp -f ${TMP_DIR}/haproxy.cfg /etc/haproxy/
    echo "ok"
  done
  echo ""

  # restart start service
  for host in ${!MASTERS[@]}; do
    echo -n "Running on ${host}(${MASTERS[${host}]}): Starting haproxy & keepalived service..."
    systemctl daemon-reload
    systemctl enable haproxy  >/dev/null 2>&1
    systemctl restart haproxy
    echo "ok"
  done
  echo ""

  echo "------ Ending of HA deployment ------------------------------"
  sleep 2s
  echo ""
}

# deploy mode: k8s packages with autoCompletion
function deploy_k8s()
{
  deploy_k8s_packages
  deploy_autoCompletion
}

function deploy_k8s_packages()
{
  [ "${MAP_DEPLOY["k8s"]}" != "true" ] && return
  echo "-------------------------------------------------------------"
  echo "Deploying k8s packages..."
  echo "-------------------------------------------------------------"

  # building kubernetes.sh, kubernetes.conf...
  echo -n "Building kubernetes.sh, kubernetes.conf ..."
  cp -f ${TEMPLATE_DIR}/conf/kubernetes.{sh,conf} ${SRC_TMPDIR}
  sed -i "s#\${K8S_BASE_DIR}#${K8S_BASE_DIR}#g" ${SRC_TMPDIR}/kubernetes.sh
  echo "ok"
  echo ""

  # install packages & dispatch files...
  local -r K8S_SRC_DIR="${BASE_SRC_DIR}/k8s"
  for host in ${!NODES[@]}; do
    echo -n "Running on ${host}(${NODES[${host}]}): Installing k8s packages, initialing environments..."
    cp -Rf ${SRC_TMPDIR}/kubernetes.{sh,conf} ${TMP_DIR} >/dev/null 2>&1
    mkdir -p ${K8S_BASE_DIR} ${K8S_BIN_DIR} ${K8S_CONF_DIR} ${K8S_YAML_DIR} ${K8S_TOKEN_DIR}
    tar -xzf ${K8S_SRC_DIR}/kubernetes-server-linux-amd64.tar.gz -C /opt/
    ln -sf /opt/kubernetes/server/bin/{kube-apiserver,kube-controller-manager,kube-scheduler,kubelet,kubectl,kubeadm,kube-proxy,apiextensions-apiserver,mounter} ${K8S_BIN_DIR}
    chown root:root ${TMP_DIR}/kubernetes.{sh,conf}
    cp -f ${TMP_DIR}/kubernetes.sh /etc/profile.d/
    cp -f ${TMP_DIR}/kubernetes.conf /etc/sysctl.d/
    sed -i '/^\/dev.*swap / s/^\(.*\)$/#\1/g' /etc/fstab
    sysctl -p /etc/sysctl.d/kubernetes.conf >/dev/null 2>&1
    echo "ok"
  done
  echo ""

  echo "------ Ending of k8s packages deployment --------------------"
  echo ""
}

function deploy_autoCompletion()
{
  [ "${MAP_DEPLOY["k8s"]}" != "true" ] && return
  echo "-------------------------------------------------------------"
  echo "Deploying autoCompletion..."
  echo "-------------------------------------------------------------"

  for host in ${!NODES[@]}; do
    echo -n "Running on ${host}(${NODES[${host}]}): Config autoCompletion...."
    yum install -y ${RPM_DIR}/bash-completion-2.1-8.el7.noarch.rpm >/dev/null 2>&1
    echo "# kubectl auto-compelete" >> ${HOME}/.bashrc
    echo "source <(kubectl completion bash)" >> ${HOME}/.bashrc
    echo "ok"
  done
  echo ""

  echo "------ Ending of autoCompletion deployment ------------------"
  echo ""
}

# deploy mode: kube-apiserver
function deploy_apiserver()
{
  [ "${MAP_DEPLOY["apiserver"]}" != "true" ] && return
  echo "-------------------------------------------------------------"
  echo "Deploying kube-apiServer service..."
  echo "-------------------------------------------------------------"

  pushd ${SRC_TMPDIR} >/dev/null 2>&1
  # build kubernetes cert & key
  echo -n "Building kubernetes cert & key files..."
  cp -f ${TEMPLATE_DIR}/apiserver/kubernetes-csr.json ${SRC_TMPDIR}
  for host in ${!NODES[@]}; do
    sed -i "/\${host ips}/i \ \ \ \ \ \ \"${NODES[${host}]}\"," ${SRC_TMPDIR}/kubernetes-csr.json
  done
  sed -i "/\${host ips}/d" ${SRC_TMPDIR}/kubernetes-csr.json
  sed -i "s#\${cluster ip}#${CLUSTER_IP_SEGMENT}.1#g" ${SRC_TMPDIR}/kubernetes-csr.json
  # if master is cluster, certs must include vip, remove template otherwise.
  if [ ${MASTER_COUNT} -gt 0 ]; then
    sed -i "s#\${host vip}#${APISERVER_IP}#g" ${SRC_TMPDIR}/kubernetes-csr.json
  else
    sed -i "/\${host vip}/d" ${SRC_TMPDIR}/kubernetes-csr.json
  fi
  (${SSL_DIR}/cfssl gencert -ca=${SSL_DIR}/ca.pem -ca-key=${SSL_DIR}/ca-key.pem -config=${SSL_DIR}/ca-config.json -profile=kubernetes kubernetes-csr.json | ${SSL_DIR}/cfssljson -bare kubernetes) >/dev/null 2>&1
  echo "ok"

  # build admin cert & key
  echo -n "Building admin cert & key files..."
  cp -f ${TEMPLATE_DIR}/apiserver/admin-csr.json ${SRC_TMPDIR}
  (${SSL_DIR}/cfssl gencert -ca=${SSL_DIR}/ca.pem -ca-key=${SSL_DIR}/ca-key.pem -config=${SSL_DIR}/ca-config.json -profile=kubernetes admin-csr.json | ${SSL_DIR}/cfssljson -bare admin) >/dev/null 2>&1
  echo "ok"

  # build metrics-server cert & key
  echo -n "Building metrics-server cert & key files..."
  cp -f ${TEMPLATE_DIR}/apiserver/metrics-server-csr.json ${SRC_TMPDIR}
  (${SSL_DIR}/cfssl gencert -ca=${SSL_DIR}/ca.pem -ca-key=${SSL_DIR}/ca-key.pem -config=${SSL_DIR}/ca-config.json -profile=kubernetes metrics-server-csr.json | ${SSL_DIR}/cfssljson -bare metrics-server) >/dev/null 2>&1
  echo "ok"

  # build token
  echo -n "Building bootstrap-token.csv..."
  cp -f ${TEMPLATE_DIR}/apiserver/bootstrap-token.csv ${SRC_TMPDIR}
  local -r TOKEN=$(head -c 16 /dev/urandom | od -An -t x | tr -d ' ')
  sed -i "s#\${TOKEN}#${TOKEN}#g" ${SRC_TMPDIR}/bootstrap-token.csv
  echo "ok"

  # build basic-auth.csv, copy
  # build audit-policy.yaml, copy
  echo "Building basic-auth.csv, audit-policy-min.yaml, same as template."

  # build kube-apiserver service & conf file
  echo -n "Building kube-apiserver srvice & conf files..."
  cp -f ${TEMPLATE_DIR}/apiserver/kube-apiserver.{service,conf} ${SRC_TMPDIR}
  sed -i "s#\${K8S_CONF_DIR}#${K8S_CONF_DIR}#g" ${SRC_TMPDIR}/kube-apiserver.service
  sed -i "s#\${K8S_BIN_DIR}#${K8S_BIN_DIR}#g" ${SRC_TMPDIR}/kube-apiserver.service
  sed -i "s#\${APISERVER_COUNT}#${MASTER_COUNT}#g" ${SRC_TMPDIR}/kube-apiserver.service
  sed -i "s#\${ETCD_ENDPOINTS}#${ETCD_ENDPOINTS}#g" ${SRC_TMPDIR}/kube-apiserver.conf
  sed -i "s#\${CLUSTER_IP_SEGMENT}#${CLUSTER_IP_SEGMENT}#g" ${SRC_TMPDIR}/kube-apiserver.conf
  sed -i "s#\${SSL_DIR}#${SSL_DIR}#g" ${SRC_TMPDIR}/kube-apiserver.conf
  sed -i "s#\${APP_DIR}#${APP_DIR}#g" ${SRC_TMPDIR}/kube-apiserver.conf
  sed -i "s#\${K8S_YAML_DIR}#${K8S_YAML_DIR}#g" ${SRC_TMPDIR}/kube-apiserver.conf
  sed -i "s#\${K8S_TOKEN_DIR}#${K8S_TOKEN_DIR}#g" ${SRC_TMPDIR}/kube-apiserver.conf
  echo "ok"

  chmod +r kubernetes-key.pem admin-key.pem metrics-server-key.pem
  popd >/dev/null 2>&1
  echo ""

  # dispatch files upon, only master nodes
  for host in ${!MASTERS[@]}; do
    echo -n "Running on ${host}(${MASTERS[${host}]}): Copy kube-apiserver keys & certs, service & conf files.."
    cp -Rf ${SRC_TMPDIR}/{kubernetes,admin,metrics-server}*.pem \
      ${SRC_TMPDIR}/bootstrap-token.csv ${TEMPLATE_DIR}/apiserver/basic-auth.csv \
      ${TEMPLATE_DIR}/apiserver/audit-policy-min.yaml \
      ${SRC_TMPDIR}/kube-apiserver.{service,conf} \
      ${TMP_DIR} >/dev/null 2>&1
    mkdir -p ${APP_DIR}/k8s/apiserver
    sed -i "s#\${hostip}#${MASTERS[${host}]}#g" ${TMP_DIR}/kube-apiserver.conf
    chown root:root ${TMP_DIR}/{kubernetes,admin,metrics-server}*.pem \
      ${TMP_DIR}/{bootstrap-token,basic-auth}.csv ${TMP_DIR}/audit-policy-min.yaml \
      ${TMP_DIR}/kube-apiserver.{service,conf}
    cp -f ${TMP_DIR}/{kubernetes,admin,metrics-server}*.pem ${SSL_DIR}
    chmod 600 ${SSL_DIR}/{kubernetes,admin,metrics-server}-key.pem
    cp -f ${TMP_DIR}/{bootstrap-token,basic-auth}.csv ${K8S_TOKEN_DIR}
    cp -f ${TMP_DIR}/audit-policy-min.yaml ${K8S_YAML_DIR}
    cp -f ${TMP_DIR}/kube-apiserver.service ${SERVICE_DIR}
    cp -f ${TMP_DIR}/kube-apiserver.conf ${K8S_CONF_DIR}
    echo "ok"
  done
  echo ""

  # start service
  for host in ${!MASTERS[@]}; do
    echo -n "Running on ${host}(${MASTERS[${host}]}): Starting kube-apiserver service..."
    systemctl daemon-reload
    systemctl enable kube-apiserver >/dev/null 2>&1
    systemctl restart kube-apiserver >/dev/null 2>&1
    echo "ok"
  done
  echo ""

  # build default kubeconfig, grant roles
  echo -n "Building kubectl default kubeconfig file..."
  local -r KUBECONFIG=${SRC_TMPDIR}/config
  ${KUBECTL} config set-cluster kubernetes --certificate-authority=${SSL_DIR}/ca.pem --embed-certs=true --server=https://${APISERVER_IP}:${APISERVER_PORT} --kubeconfig=${KUBECONFIG} >/dev/null 2>&1
  ${KUBECTL} config set-credentials admin --client-certificate=${SSL_DIR}/admin.pem --client-key=${SSL_DIR}/admin-key.pem --embed-certs=true --kubeconfig=${KUBECONFIG} >/dev/null 2>&1
  ${KUBECTL} config set-context kubernetes --cluster=kubernetes --user=admin --kubeconfig=${KUBECONFIG} >/dev/null 2>&1
  ${KUBECTL} config use-context kubernetes --kubeconfig=${KUBECONFIG} >/dev/null 2>&1
  chown ${USER}:${GROUP} ${KUBECONFIG}
  echo "ok"
  echo ""

  for host in ${!NODES[@]}; do
    echo -n "Running on ${host}(${NODES[${host}]}): Copying kubectl kubeconfig file..."
    cp -Rf ${KUBECONFIG} ${TMP_DIR} >/dev/null 2>&1
    mkdir -p ${HOME}/.kube
    cp -f ${TMP_DIR}/config ${HOME}/.kube/
    mkdir -p /root/.kube
    cp -f ${TMP_DIR}/config /root/.kube/
    chown root:root /root/.kube/config
    echo "ok"
  done
  echo ""

  echo -n "Running on localhost: Grant to visit kubectl api..."
  # localhost is one of master nodes
  ${KUBECTL} create clusterrolebinding kube-apiserver:kubelet-apis --clusterrole=system:kubelet-api-admin --user=kubernetes
  echo ""

  echo "------ Ending of kube-apiServer deployment ------------------"
  echo ""
}

# deploy mode: kube-controller-manager
function deploy_controller()
{
  [ "${MAP_DEPLOY["controller"]}" != "true" ] && return
  echo "-------------------------------------------------------------"
  echo "Deploying kube-controller-manager service..."
  echo "-------------------------------------------------------------"

  # build kube-controller-manager cert & key files
  echo -n "Building kube-controller-manager cert & key files..."
  pushd ${SRC_TMPDIR} >/dev/null 2>&1
  cp -f ${TEMPLATE_DIR}/controller/kube-controller-manager-csr.json ${SRC_TMPDIR}
  (${SSL_DIR}/cfssl gencert -ca=${SSL_DIR}/ca.pem -ca-key=${SSL_DIR}/ca-key.pem -config=${SSL_DIR}/ca-config.json -profile=kubernetes kube-controller-manager-csr.json | ${SSL_DIR}/cfssljson -bare kube-controller-manager) >/dev/null 2>&1
  chmod +r ${SRC_TMPDIR}/kube-controller-manager-key.pem
  popd >/dev/null 2>&1
  echo "ok"

  # build kube-controller-manager kubeconfig
  echo -n "Building kube-controller-manager kubeconfig..."
  local -r KUBECONFIG=${SRC_TMPDIR}/kube-controller-manager.kubeconfig
  ${KUBECTL} config set-cluster kubernetes --certificate-authority=${SSL_DIR}/ca.pem --embed-certs=true --server=https://${APISERVER_IP}:${APISERVER_PORT} --kubeconfig=${KUBECONFIG} >/dev/null 2>&1
  ${KUBECTL} config set-credentials system:kube-controller-manager --client-certificate=${SRC_TMPDIR}/kube-controller-manager.pem --client-key=${SRC_TMPDIR}/kube-controller-manager-key.pem --embed-certs=true --kubeconfig=${KUBECONFIG} >/dev/null 2>&1
  ${KUBECTL} config set-context system:kube-controller-manager --cluster=kubernetes --user=system:kube-controller-manager --kubeconfig=${KUBECONFIG} >/dev/null 2>&1
  ${KUBECTL} config use-context system:kube-controller-manager --kubeconfig=${KUBECONFIG} >/dev/null 2>&1
  chmod +r ${KUBECONFIG}
  echo "ok"

  # build kube-controller-manager service & conf files
  echo -n "Building kube-controller-manager service & conf files..."
  cp -f ${TEMPLATE_DIR}/controller/kube-controller-manager.{service,conf} ${SRC_TMPDIR}
  sed -i "s#\${K8S_CONF_DIR}#${K8S_CONF_DIR}#g" ${SRC_TMPDIR}/kube-controller-manager.service
  sed -i "s#\${K8S_BIN_DIR}#${K8S_BIN_DIR}#g" ${SRC_TMPDIR}/kube-controller-manager.service
  sed -i "s#\${KUBECONFIG}#${K8S_CONF_DIR}/kube-controller-manager.kubeconfig#g" ${SRC_TMPDIR}/kube-controller-manager.conf
  sed -i "s#\${APISERVER_IP}#${APISERVER_IP}#g" ${SRC_TMPDIR}/kube-controller-manager.conf
  sed -i "s#\${APISERVER_PORT}#${APISERVER_PORT}#g" ${SRC_TMPDIR}/kube-controller-manager.conf
  sed -i "s#\${SSL_DIR}#${SSL_DIR}#g" ${SRC_TMPDIR}/kube-controller-manager.conf
  sed -i "s#\${CLUSTER_IP_SEGMENT}#${CLUSTER_IP_SEGMENT}#g" ${SRC_TMPDIR}/kube-controller-manager.conf
  sed -i "s#\${POD_IP_SEGMENT}#${POD_IP_SEGMENT}#g" ${SRC_TMPDIR}/kube-controller-manager.conf
  sed -i "s#\${APP_DIR}#${APP_DIR}#g" ${SRC_TMPDIR}/kube-controller-manager.conf
  echo "ok"
  echo ""

  # dispatch files upon
  for host in ${!MASTERS[@]}; do
    echo -n "Running on ${host}(${MASTERS[$host]}): Copy kube-controller-manager cert & key, kubeconfig, service & conf files..."
    cp -Rf ${SRC_TMPDIR}/kube-controller-manager*.pem ${SRC_TMPDIR}/kube-controller-manager.{kubeconfig,service,conf} ${TMP_DIR} >/dev/null 2>&1
    mkdir -p ${APP_DIR}/k8s/controller
    sed -i "s#\${hostip}#${MASTERS[$host]}#g" ${TMP_DIR}/kube-controller-manager.service
    chown root:root ${TMP_DIR}/kube-controller-manager*.pem ${TMP_DIR}/kube-controller-manager.{kubeconfig,service,conf}
    chmod 600 ${TMP_DIR}/kube-controller-manager-key.pem ${TMP_DIR}/kube-controller-manager.kubeconfig
    cp -f ${TMP_DIR}/kube-controller-manager*.pem ${SSL_DIR}
    cp -f ${TMP_DIR}/kube-controller-manager.{kubeconfig,conf} ${K8S_CONF_DIR}
    cp -f ${TMP_DIR}/kube-controller-manager.service ${SERVICE_DIR}
    echo "ok"
  done
  echo ""

  # start service
  for host in ${!MASTERS[@]}; do
    echo -n "Running on ${host}(${MASTERS[${host}]}): Starting kube-controller-manager service..."
    systemctl daemon-reload
    systemctl enable kube-controller-manager >/dev/null 2>&1
    systemctl restart kube-controller-manager
    echo "ok"
  done
  echo ""

  echo "------ Ending of kube-controller-manager deployment ---------"
  echo ""
}

# deploy mode: kube-scheduler
function deploy_scheduler()
{
  [ "${MAP_DEPLOY["scheduler"]}" != "true" ] && return
  echo "-------------------------------------------------------------"
  echo "Deploying kube-scheduler service..."
  echo "-------------------------------------------------------------"

  # build kube-scheduler cert & key files
  echo -n "Building kube-scheduler cert & key files..."
  cp -f ${TEMPLATE_DIR}/scheduler/kube-scheduler-csr.json ${SRC_TMPDIR}
  for host in ${!MASTERS[@]}; do
    sed -i "/\${master ip list}/i \ \ \ \ \"${MASTERS[${host}]}\"," ${SRC_TMPDIR}/kube-scheduler-csr.json
  done
  sed -i "/\${master ip list}/d" ${SRC_TMPDIR}/kube-scheduler-csr.json

  pushd ${SRC_TMPDIR} >/dev/null 2>&1
  (${SSL_DIR}/cfssl gencert -ca=${SSL_DIR}/ca.pem -ca-key=${SSL_DIR}/ca-key.pem -config=${SSL_DIR}/ca-config.json -profile=kubernetes kube-scheduler-csr.json | ${SSL_DIR}/cfssljson -bare kube-scheduler) >/dev/null 2>&1
  chmod +r kube-scheduler-key.pem
  popd >/dev/null 2>&1
  echo "ok"

  # build kube-scheduler kubeconfig
  echo -n "Building kube-scheduler kubeconfig..."
  local -r KUBECONFIG=${SRC_TMPDIR}/kube-scheduler.kubeconfig
  ${KUBECTL} config set-cluster kubernetes --certificate-authority=${SSL_DIR}/ca.pem --embed-certs=true --server=https://${APISERVER_IP}:${APISERVER_PORT} --kubeconfig=${KUBECONFIG} >/dev/null 2>&1
  ${KUBECTL} config set-credentials system:kube-scheduler --client-certificate=${SRC_TMPDIR}/kube-scheduler.pem --client-key=${SRC_TMPDIR}/kube-scheduler-key.pem --embed-certs=true --kubeconfig=${KUBECONFIG} >/dev/null 2>&1
  ${KUBECTL} config set-context system:kube-scheduler --cluster=kubernetes --user=system:kube-scheduler --kubeconfig=${KUBECONFIG} >/dev/null 2>&1
  ${KUBECTL} config use-context system:kube-scheduler --kubeconfig=${KUBECONFIG} >/dev/null 2>&1
  chmod +r ${KUBECONFIG}
  echo "ok"

  # build kube-scheduler service & conf files
  echo -n "Building kube-scheduler service & conf files..."
  cp -f ${TEMPLATE_DIR}/scheduler/kube-scheduler.{service,conf} ${SRC_TMPDIR}
  sed -i "s#\${K8S_CONF_DIR}#${K8S_CONF_DIR}#g" ${SRC_TMPDIR}/kube-scheduler.{service,conf}
  sed -i "s#\${K8S_BIN_DIR}#${K8S_BIN_DIR}#g" ${SRC_TMPDIR}/kube-scheduler.service
  sed -i "s#\${APP_DIR}#${APP_DIR}#g" ${SRC_TMPDIR}/kube-scheduler.conf
  echo "ok"
  echo ""

  # dispatch files upon to masters
  for host in ${!MASTERS[@]}; do
    echo -n "Running on ${host}(${MASTERS[${host}]}): Copying kube-scheduler cert & key, kubeconfig, service & conf files..."
    cp -Rf ${SRC_TMPDIR}/kube-scheduler*.pem ${SRC_TMPDIR}/kube-scheduler.{kubeconfig,service,conf} ${TMP_DIR} >/dev/null 2>&1
    mkdir -p ${APP_DIR}/k8s/scheduler
      sed -i "s#\${hostip}#${MASTERS[${host}]}#g" ${TMP_DIR}/kube-scheduler.service
      chown root:root ${TMP_DIR}/kube-scheduler*.pem ${TMP_DIR}/kube-scheduler.{kubeconfig,service,conf}
      chmod 600 ${TMP_DIR}/kube-scheduler-key.pem ${TMP_DIR}/kube-scheduler.kubeconfig
      cp -f ${TMP_DIR}/kube-scheduler*.pem ${SSL_DIR}
      cp -f ${TMP_DIR}/kube-scheduler.{kubeconfig,conf} ${K8S_CONF_DIR}
      cp -f ${TMP_DIR}/kube-scheduler.service ${SERVICE_DIR}
    echo "ok"
  done
  echo ""

  # start service
  for host in ${!MASTERS[@]}; do
    echo -n "Running on ${host}(${MASTERS[${host}]}): Starting kube-scheduler service..."
    systemctl daemon-reload
    systemctl enable kube-scheduler >/dev/null 2>&1
    systemctl restart kube-scheduler
    echo "ok"
  done
  echo ""

  echo "------ Ending of kube-scheduler deployment ------------------"
  echo ""
}

# deploy mode: kubelet
function deploy_kubelet()
{
  [ "${MAP_DEPLOY["kubelet"]}" != "true" ] && return
  echo "-------------------------------------------------------------"
  echo "Deploying kubelet service..."
  echo "-------------------------------------------------------------"

  # build bootstrap.kubeconfig
  echo -n "Building bootstrap.kubeconfig..."
  local -r KUBE_APISERVER=https://${APISERVER_IP}:${APISERVER_PORT}
  local -r CAFILE=${SSL_DIR}/ca.pem
  local -r TOKEN=$(cat ${K8S_TOKEN_DIR}/bootstrap-token.csv | cut -d ',' -f 1)
  local KUBECONFIG=${SRC_TMPDIR}/bootstrap.kubeconfig
  ${KUBECTL} config set-cluster kubernetes --certificate-authority=${SSL_DIR}/ca.pem --embed-certs=true --server=${KUBE_APISERVER} --kubeconfig=${KUBECONFIG} >/dev/null 2>&1
  ${KUBECTL} config set-credentials kubelet-bootstrap --token=${TOKEN} --kubeconfig=${KUBECONFIG} >/dev/null 2>&1
  ${KUBECTL} config set-context default --cluster=kubernetes --user=kubelet-bootstrap --kubeconfig=${KUBECONFIG} >/dev/null 2>&1
  ${KUBECTL} config use-context default --kubeconfig=${KUBECONFIG} >/dev/null 2>&1
  chmod +r ${KUBECONFIG}
  echo "ok"

  # building kubelet.kubeconfig
  echo -n "Building kubelet.kubeconfig..."
  KUBECONFIG=${SRC_TMPDIR}/kubelet.kubeconfig
  ${KUBECTL} config set-cluster kubernetes --certificate-authority=${SSL_DIR}/ca.pem --embed-certs=true --server=${KUBE_APISERVER} --kubeconfig=${KUBECONFIG} >/dev/null 2>&1
  ${KUBECTL} config set-credentials kubelet --token=${TOKEN} --kubeconfig=${KUBECONFIG} >/dev/null 2>&1
  ${KUBECTL} config set-context default --cluster=kubernetes --user=kubelet --kubeconfig=${KUBECONFIG} >/dev/null 2>&1
  ${KUBECTL} config use-context default --kubeconfig=${KUBECONFIG} >/dev/null 2>&1
  chmod +r ${KUBECONFIG}
  echo "ok"

  # creating clusterrole for kubelet-bootstrap
  echo -n "Creating clusterrole & clusterrolebinding kubelet-bootstrap..."
  ${KUBECTL} create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --user=kubelet-bootstrap

  # build kubelet service, conf & yaml files
  echo -n "Buiding kubelet service, conf & yaml files..."
  cp -f ${TEMPLATE_DIR}/kubelet/kubelet.{service,conf,yaml} ${SRC_TMPDIR}
  sed -i "s#\${APP_DIR}#${APP_DIR}#g" ${SRC_TMPDIR}/kubelet.service
  sed -i "s#\${K8S_CONF_DIR}#${K8S_CONF_DIR}#g" ${SRC_TMPDIR}/kubelet.service
  sed -i "s#\${K8S_BIN_DIR}#${K8S_BIN_DIR}#g" ${SRC_TMPDIR}/kubelet.service
  sed -i "s#\${DOCKER_HUB}#${DOCKER_HUB}#g" ${SRC_TMPDIR}/kubelet.conf
  sed -i "s#\${K8S_CONF_DIR}#${K8S_CONF_DIR}#g" ${SRC_TMPDIR}/kubelet.conf
  sed -i "s#\${K8S_YAML_DIR}#${K8S_YAML_DIR}#g" ${SRC_TMPDIR}/kubelet.conf
  sed -i "s#\${SSL_DIR}#${SSL_DIR}#g" ${SRC_TMPDIR}/kubelet.conf
  sed -i "s#\${APP_DIR}#${APP_DIR}#g" ${SRC_TMPDIR}/kubelet.conf
  sed -i "s#\${CLUSTER_IP_SEGMENT}#${CLUSTER_IP_SEGMENT}#g" ${SRC_TMPDIR}/kubelet.yaml
  sed -i "s#\${SSL_DIR}#${SSL_DIR}#g" ${SRC_TMPDIR}/kubelet.yaml
  echo "ok"
  echo ""

  # dispatch files upon, 2*kubeconfig, service, conf, yaml files, and install packages
  for host in ${!NODES[@]}; do
    echo -n "Running on ${host}(${NODES[${host}]}): Copy kubelet files, install packages..."
    cp -Rf ${SRC_TMPDIR}/{bootstrap,kubelet}.kubeconfig ${SRC_TMPDIR}/kubelet.{service,conf,yaml} ${TMP_DIR} >/dev/null 2>&1
    mkdir -p ${APP_DIR}/k8s/kubelet
    yum install -y ${RPM_DIR}/ipvsadm-1.27-8.el7.x86_64.rpm \
      ${RPM_DIR}/bridge-utils-1.5-9.el7.x86_64.rpm \
      ${RPM_DIR}/conntrack-tools-1.4.4-7.el7.x86_64.rpm \
      ${RPM_DIR}/libnetfilter_cthelper-1.0.0-11.el7.x86_64.rpm \
      ${RPM_DIR}/libnetfilter_cttimeout-1.0.0-7.el7.x86_64.rpm \
      ${RPM_DIR}/libnetfilter_queue-1.0.2-2.el7_2.x86_64.rpm >/dev/null 2>&1
    sed -i "s#\${hostname}#${host}#g" ${TMP_DIR}/kubelet.conf
    sed -i "s#\${hostip}#${NODES[${host}]}#g" ${TMP_DIR}/kubelet.yaml
    chown root:root ${TMP_DIR}/{bootstrap,kubelet}.kubeconfig ${TMP_DIR}/kubelet.{service,conf,yaml}
    chmod 600 ${TMP_DIR}/{bootstrap,kubelet}.kubeconfig
    cp -f ${TMP_DIR}/{bootstrap,kubelet}.kubeconfig ${K8S_CONF_DIR}
    cp -f ${TMP_DIR}/kubelet.service ${SERVICE_DIR}
    cp -f ${TMP_DIR}/kubelet.conf ${K8S_CONF_DIR}
    cp -f ${TMP_DIR}/kubelet.yaml ${K8S_YAML_DIR}
    echo "ok"
  done
  echo ""

  # start service
  for host in ${!NODES[@]}; do
    echo -n "Running on ${host}(${NODES[${host}]}): Staring kubelet service..."
    systemctl daemon-reload
    systemctl enable kubelet >/dev/null 2>&1
    systemctl restart kubelet
    echo "ok"
  done
  echo ""

  # approve TLS request
  echo "Viewing csr and approving TLS requests after 10 seconds when kubelet.service is ready..."
  sleep 10s
  ${KUBECTL} get csr | grep 'Pending' | awk 'NR>0{print $1}' | xargs ${KUBECTL} certificate approve

  echo "------ Ending of kubelet deployment -------------------------"
  echo ""
}

# deploy mode: kube-proxy
function deploy_proxy()
{
  [ "${MAP_DEPLOY["proxy"]}" != "true" ] && return
  echo "-------------------------------------------------------------"
  echo "Deploying kubelet-proxy service..."
  echo "-------------------------------------------------------------"

  # build kube-proxy cert & key files
  echo -n "Building kube-proxy cert & key files..."
  cp -f ${TEMPLATE_DIR}/proxy/kube-proxy-csr.json ${SRC_TMPDIR}
  pushd ${SRC_TMPDIR} >/dev/null 2>&1
  (${SSL_DIR}/cfssl gencert -ca=${SSL_DIR}/ca.pem -ca-key=${SSL_DIR}/ca-key.pem -config=${SSL_DIR}/ca-config.json -profile=kubernetes kube-proxy-csr.json | ${SSL_DIR}/cfssljson -bare kube-proxy) >/dev/null 2>&1
  chmod +r kube-proxy-key.pem
  popd >/dev/null 2>&1
  echo "ok"

  # build kube-proxy.kubeconfig
  echo -n "Building kube-proxy.kubeconfig..."
  local -r KUBECONFIG=${SRC_TMPDIR}/kube-proxy.kubeconfig
  ${KUBECTL} config set-cluster kubernetes --certificate-authority=${SSL_DIR}/ca.pem --embed-certs=true --server=https://${APISERVER_IP}:${APISERVER_PORT} --kubeconfig=${KUBECONFIG} >/dev/null 2>&1
  ${KUBECTL} config set-credentials kube-proxy --client-certificate=${SRC_TMPDIR}/kube-proxy.pem --client-key=${SRC_TMPDIR}/kube-proxy-key.pem --embed-certs=true --kubeconfig=${KUBECONFIG} >/dev/null 2>&1
  ${KUBECTL} config set-context default --cluster=kubernetes --user=kube-proxy --kubeconfig=${KUBECONFIG} >/dev/null 2>&1
  ${KUBECTL} config use-context default --kubeconfig=${KUBECONFIG} >/dev/null 2>&1
  chmod +r ${KUBECONFIG}
  echo "ok"

  # build kube-proxy service,conf,yaml files
  echo -n "Building kube-proxy service & conf files..."
  cp -f ${TEMPLATE_DIR}/proxy/kube-proxy.{service,conf,yaml} ${SRC_TMPDIR}
  sed -i "s#\${K8S_CONF_DIR}#${K8S_CONF_DIR}#g" ${SRC_TMPDIR}/kube-proxy.{service,conf,yaml}
  sed -i "s#\${K8S_BIN_DIR}#${K8S_BIN_DIR}#g" ${SRC_TMPDIR}/kube-proxy.service
  sed -i "s#\${APP_DIR}#${APP_DIR}#g" ${SRC_TMPDIR}/kube-proxy.{service,conf}
  sed -i "s#\${K8S_YAML_DIR}#${K8S_YAML_DIR}#g" ${SRC_TMPDIR}/kube-proxy.conf
  sed -i "s#\${POD_IP_SEGMENT}#${POD_IP_SEGMENT}#g" ${SRC_TMPDIR}/kube-proxy.{conf,yaml}
  echo "ok"
  echo ""

  # dispatch files upon to all nodes
  for host in ${!NODES[@]}; do
    echo -n "Running on host ${host}(${NODES[${host}]}): Copying kube-proxy cert & key, kubeconfig, service, conf, yaml files..."
    cp -Rf ${SRC_TMPDIR}/kube-proxy*.pem ${SRC_TMPDIR}/kube-proxy.{kubeconfig,service,conf,yaml} ${TMP_DIR} >/dev/null 2>&1
    mkdir -p ${APP_DIR}/k8s/proxy
    sed -i "s#\${hostip}#${NODES[${host}]}#g" ${TMP_DIR}/kube-proxy.{conf,yaml}
    sed -i "s#\${hostname}#${host}#g" ${TMP_DIR}/kube-proxy.yaml
    chown root:root ${TMP_DIR}/kube-proxy*.pem ${TMP_DIR}/kube-proxy.{kubeconfig,service,conf,yaml}
    chmod 600 ${TMP_DIR}/kube-proxy-key.pem ${TMP_DIR}/kube-proxy.kubeconfig
    cp -f ${TMP_DIR}/kube-proxy*.pem ${SSL_DIR}
    cp -f ${TMP_DIR}/kube-proxy.service ${SERVICE_DIR}
    cp -f ${TMP_DIR}/kube-proxy.{kubeconfig,conf} ${K8S_CONF_DIR}
    cp -f ${TMP_DIR}/kube-proxy.yaml ${K8S_YAML_DIR}
    echo "ok"
  done
  echo ""

  # start service
  for host in ${!NODES[@]}; do
    echo -n "Running on host ${host}(${NODES[${host}]}): Starting kube-proxy service..."
    systemctl daemon-reload
    systemctl enable kube-proxy >/dev/null 2>&1
    systemctl restart kube-proxy
    echo "ok"
  done
  echo ""

  echo "------ Ending of kubelet-proxy deployment -------------------"
  echo ""
}

# deploy mode: addons calico
function deploy_calico()
{
  [ "${MAP_DEPLOY["calico"]}" != "true" ] && return
  echo "-------------------------------------------------------------"
  echo "Deploying kubernetes addons - calico (cni-plugin)"
  echo "-------------------------------------------------------------"

  echo -n "Building calico shell, conf, profile, cfg files..."
  cp -f ${TEMPLATE_DIR}/addons/calico/{calico.sh,10-calico.conf,calicoctl.cfg} ${SRC_TMPDIR}
  sed -i "s#\${ETCD_ENDPOINTS}#${ETCD_ENDPOINTS}#g" ${SRC_TMPDIR}/{10-calico.conf,calicoctl.cfg}
  sed -i "s#\${SSL_DIR}#${SSL_DIR}#g" ${SRC_TMPDIR}/{10-calico.conf,calicoctl.cfg}
  sed -i "s#\${K8S_CONF_DIR}#${K8S_CONF_DIR}#g" ${SRC_TMPDIR}/10-calico.conf
  echo "ok"

  # dispatch files to all nodes, install packages
  for host in ${!NODES[@]}; do
    echo -n "Running on ${host}(${NODES[${host}]}): Installing cni packages, copying calico.sh, conf, cfg files..."
    cp -Rf ${SRC_TMPDIR}/{calico.sh,10-calico.conf,calicoctl.cfg} ${TMP_DIR} >/dev/null 2>&1
    mkdir -p /opt/cni/bin /opt/calico/{conf,yaml,bin} /etc/cni/net.d /etc/calico /var/lib/calico
    tar -xzf ${BASE_SRC_DIR}/calico/cni-plugins-linux-amd64-v1.0.1.tgz -C /opt/cni/
    ln -sf /opt/cni/* /opt/cni/bin/
    chmod +x /opt/cni/bin/*
    cp -f ${BASE_SRC_DIR}/calico/calico-amd64 /opt/calico/bin/calico
    cp -f ${BASE_SRC_DIR}/calico/calico-ipam-amd64 /opt/calico/bin/calico-ipam
    cp -f ${BASE_SRC_DIR}/calico/calicoctl-linux-amd64 /opt/calico/bin/calicoctl
    ln -sf /opt/calico/bin/calico-ipam /opt/calico/bin/calico /opt/cni/bin/
    chown root:root ${TMP_DIR}/{calico.sh,10-calico.conf,calicoctl.cfg}
    cp -f ${TMP_DIR}/calico.sh /etc/profile.d/
    cp -f ${TMP_DIR}/10-calico.conf /etc/cni/net.d/10-calico.conflist
    cp -f ${TMP_DIR}/calicoctl.cfg /etc/calico/
    echo "ok"
  done
  echo ""

  # build calico.yaml to deploy on local
  echo -n "Building calico deploy file..."
  cp -f ${TEMPLATE_DIR}/addons/calico/calico.yaml ${SRC_TMPDIR}
  local -r ETCD_CA=$(base64 -w 0 ${SSL_DIR}/ca.pem)
  local -r ETCD_KEY=$(base64 -w 0 ${SSL_DIR}/etcd-key.pem)
  local -r ETCD_CERT=$(base64 -w 0 ${SSL_DIR}/etcd.pem)
  sed -i "s#\${ETCD_KEY}#${ETCD_KEY}#g" ${SRC_TMPDIR}/calico.yaml
  sed -i "s#\${ETCD_CERT}#${ETCD_CERT}#g" ${SRC_TMPDIR}/calico.yaml
  sed -i "s#\${ETCD_CA}#${ETCD_CA}#g" ${SRC_TMPDIR}/calico.yaml
  sed -i "s#\${ETCD_ENDPOINTS}#${ETCD_ENDPOINTS}#g" ${SRC_TMPDIR}/calico.yaml
  sed -i "s#\${DOCKER_HUB}#${DOCKER_HUB}#g" ${SRC_TMPDIR}/calico.yaml
  sed -i "s#\${NETWORK_CARD}#${NETWORK_CARD}#g" ${SRC_TMPDIR}/calico.yaml
  sed -i "s#\${POD_IP_SEGMENT}#${POD_IP_SEGMENT}#g" ${SRC_TMPDIR}/calico.yaml
  echo "ok"
  echo ""

  # deploy calico to kubernetes cluster running on local
  local -r NS="kube-system"
  echo "Deploying calico deamonset, it will take a long time, please wait patiently..."
  ${KUBECTL} apply -f ${SRC_TMPDIR}/calico.yaml
  ${KUBECTL} -n kube-system rollout status daemonsets calico-node
  ${KUBECTL} -n kube-system rollout status deploy calico-kube-controllers
  echo ""

  # view calico deamonset
  local -r CALICOCTL=/opt/calico/bin/calicoctl
  echo "Viewing daemonsets & pods, view calico nodes..."
  echo "-------------------------------------------------------------"
  ${KUBECTL} -n kube-system get daemonsets,pods -o wide
  echo "-------------------------------------------------------------"
  ${CALICOCTL} get node -o wide

  # fix ippool
  echo "Running on localhost: Recreate ippool to fix..."
  cp -f ${TEMPLATE_DIR}/addons/calico/calico-ippool.yaml ${SRC_TMPDIR}
  sed -i "s#\${POD_IP_SEGMENT}#${POD_IP_SEGMENT}#g" ${SRC_TMPDIR}/calico-ippool.yaml
  local -r IPPOOL_NAME=$(${CALICOCTL} get ippool | awk 'NR>1{print $1}')
  ${CALICOCTL} delete ippool ${IPPOOL_NAME} >/dev/null 2>&1
  ${CALICOCTL} apply -f ${SRC_TMPDIR}/calico-ippool.yaml >/dev/null 2>&1
  ${CALICOCTL} get ippool -o wide
  echo ""

  # view all nodes' status and peers info
  for host in ${!NODES[@]}; do
    echo "Running on ${host}(${NODES[${host}]}): View calico node status..."
    echo "--------------------------------------"
    ${CALICOCTL} node status
  done

  echo "------ Ending of calico deployment --------------------------"
  echo ""
}

# deploy mode: addons coredns
function deploy_coredns()
{
  [ "${MAP_DEPLOY["coredns"]}" != "true" ] && return
  echo "-------------------------------------------------------------"
  echo "Deploying kubernetes addons - coreDNS..."
  echo "-------------------------------------------------------------"

  # build files
  echo -n "Building coreDNS deployment files..."
  local -r OUTTER_DNS="172.18.0.4"
  local -r INNER_DNS="${CLUSTER_IP_SEGMENT}.2"
  cp -Rf ${TEMPLATE_DIR}/addons/coredns ${SRC_TMPDIR}
  sed -i "s#\${OUTTER_DNS}#${OUTTER_DNS}#g" ${SRC_TMPDIR}/coredns/01-coredns-config.yaml
  sed -i "s#\${DOCKER_HUB}#${DOCKER_HUB}#g" ${SRC_TMPDIR}/coredns/21-coredns-deploy.yaml
  sed -i "s#\${INNER_DNS}#${INNER_DNS}#g" ${SRC_TMPDIR}/coredns/22-coredns-service.yaml
  echo "ok"
  echo "OUTTER_DNS=${OUTTER_DNS}"
  echo "INNER_DNS=${INNER_DNS}"

  # deploy
  local -r NS="kube-system"
  echo "Deploying coreDNS in a few seconds..."
  ${KUBECTL} apply -Rf ${SRC_TMPDIR}/coredns/
  ${KUBECTL} -n ${NS} rollout status deploy coredns
  echo ""

  # view
  echo "Viewing coreDNS service, pods..."
  echo "--------------------------------------"
  ${KUBECTL} get -n kube-system -o wide pods,svc
  echo ""

  echo "------ Ending of coreDNS deployment -------------------------"
  echo ""
}

# deploy mode: addons metrics-server
function deploy_metrics()
{
  [ "${MAP_DEPLOY["metrics"]}" != "true" ] && return
  echo "-------------------------------------------------------------"
  echo "Deploying kubernetes addons - metrics-server..."
  echo "-------------------------------------------------------------"

  echo -n "Building metrics-server yaml files..."
  cp -Rf ${TEMPLATE_DIR}/addons/metrics-server ${SRC_TMPDIR}
  sed -i "s#\${DOCKER_HUB}#${DOCKER_HUB}#g" ${SRC_TMPDIR}/metrics-server/*.yaml
  echo "ok"

  echo "Deploying metrics-server in a few seconds..."
  local -r NS="kube-system"
  ${KUBECTL} apply -Rf ${SRC_TMPDIR}/metrics-server
  ${KUBECTL} -n ${NS} rollout status deploy metrics-server
  echo ""

  # view
  echo "Viewing metrics-server service,pods..."
  echo "--------------------------------------"
  ${KUBECTL} get -n kube-system pods,services -o wide
  echo ""

  echo "Viewing metrics after 1 MINUTE for metrics collecting..."
  echo "--------------------------------------"
  sleep 65s
  ${KUBECTL} top nodes --use-protocol-buffers
  ${KUBECTL} top pods -A --use-protocol-buffers

  echo "------ Ending of metrics-server deployment -------------------"
  echo ""
}

# deploy mode: addons ingress
function deploy_ingress()
{
  [ "${MAP_DEPLOY["ingress"]}" != "true" ] && return
  echo "-------------------------------------------------------------"
  echo "Deploying kubernetes addons - ingress..."
  echo "-------------------------------------------------------------"

  echo -n "Building ingress deploy yaml files..."
  cp -Rf ${TEMPLATE_DIR}/addons/ingress ${SRC_TMPDIR}
  sed -i "s#\${DOCKER_HUB}#${DOCKER_HUB}#g" ${SRC_TMPDIR}/ingress/*.yaml
  echo "ok"
  echo ""

  echo "Deploying ingress in a few seconds..."
  local -r NS=$(cat ${SRC_TMPDIR}/ingress/00-ingress-namespace.yaml | awk '/\ \ name: /{print $2}')
  ${KUBECTL} apply -Rf ${SRC_TMPDIR}/ingress
  ${KUBECTL} -n ${NS} rollout status deploy ingress-nginx-controller
  echo ""

  echo "Viewing ingress services,pods..."
  echo "--------------------------------------"
  ${KUBECTL} get -n ingress-nginx -o wide pods,services

  echo "------ Ending of ingress deployment -------------------------"
  echo ""
}

# deploy mode: addons kubernetes-dashboard
function deploy_dashboard()
{
  [ "${MAP_DEPLOY["dashboard"]}" != "true" ] && return
  echo "-------------------------------------------------------------"
  echo "Deploying kubernetes addons - kubernetes-dashboard..."
  echo "-------------------------------------------------------------"

  echo -n "Building kubernetes-dashboard cert & key files..."
  cp -Rf ${TEMPLATE_DIR}/addons/dashboard ${SRC_TMPDIR}
  mv ${SRC_TMPDIR}/dashboard/kubernetes-dashboard-csr.json ${SRC_TMPDIR}
  pushd ${SRC_TMPDIR} >/dev/null 2>&1
  (${SSL_DIR}/cfssl gencert -ca=${SSL_DIR}/ca.pem -ca-key=${SSL_DIR}/ca-key.pem -config=${SSL_DIR}/ca-config.json -profile=kubernetes kubernetes-dashboard-csr.json | ${SSL_DIR}/cfssljson -bare kubernetes-dashboard) >/dev/null 2>&1
  local -r CERT=$(base64 -w 0 kubernetes-dashboard.pem)
  local -r KEY=$(base64 -w 0 kubernetes-dashboard-key.pem)
  popd >/dev/null 2>&1
  echo "ok"

  echo -n "Building kubernetes-dashboard deployment yaml files..."
  sed -i "s#\${DASHBOARD_CERT}#${CERT}#g" ${SRC_TMPDIR}/dashboard/05-dashboard-secret-certs.yaml
  sed -i "s#\${DASHBOARD_KEY}#${KEY}#g" ${SRC_TMPDIR}/dashboard/05-dashboard-secret-certs.yaml
  sed -i "s#\${DOCKER_HUB}#${DOCKER_HUB}#g" ${SRC_TMPDIR}/dashboard/{11-metrics-scraper-deploy,21-dashboard-deploy}.yaml
  echo "ok"
  echo ""

  echo "Deploying kubernetes-dashboard in a few seconds..."
  local -r NS=$(cat ${SRC_TMPDIR}/dashboard/00-dashboard-namespace.yaml | awk '/\ \ name: /{print $2}')
  ${KUBECTL} apply -Rf ${SRC_TMPDIR}/dashboard/
  ${KUBECTL} -n ${NS} rollout status deploy kubernetes-dashboard
  echo ""

  echo "Viewing kubernetes-dashboard deploy status, view pods,service,ingress..."
  echo "--------------------------------------"
  ${KUBECTL} get -n kubernetes-dashboard -o wide pods,services,ingresses

  echo "------ Ending of kubernetes-dashboard deployment ------------"
  echo ""
}

# deploy mode: addons prometheus
# Note: only prometheus without grafana
function deploy_prometheus()
{
  [ "${MAP_DEPLOY["prometheus"]}" != "true" ] && return
  echo "-------------------------------------------------------------"
  echo "Deploying kubernetes addons - prometheus..."
  echo "-------------------------------------------------------------"

  echo -n "Preparing prometheus deployment yaml files..."
  cp -Rf ${TEMPLATE_DIR}/addons/prometheus ${SRC_TMPDIR}
  sed -i "s#\${DOCKER_HUB}#${DOCKER_HUB}#g" ${SRC_TMPDIR}/prometheus/*/*-{deploy,daemonset}.yaml
  echo "ok"
  echo ""

  echo "deploying namespace..."
  ${KUBECTL} apply -f ${SRC_TMPDIR}/prometheus/00-namespace.yaml
  echo ""

  echo "deploying prometheus components in sequence..."
  local -r NS=$(cat ${SRC_TMPDIR}/prometheus/00-namespace.yaml | awk '/\ \ name: /{print $2}')
  # 10-operator
  echo "deploying operator..."
  ${KUBECTL} apply -Rf ${SRC_TMPDIR}/prometheus/10-operator
  ${KUBECTL} -n ${NS} rollout status deploy prometheus-operator
  echo ""
  # 20-kube-state-metrics
  echo "deploying kube-state-metrics..."
  ${KUBECTL} apply -Rf ${SRC_TMPDIR}/prometheus/20-kube-state-metrics
  ${KUBECTL} -n ${NS} rollout status deploy kube-state-metrics
  echo ""
  # 30-node-exporter
  echo "deploy node-exporter..."
  ${KUBECTL} apply -Rf ${SRC_TMPDIR}/prometheus/30-node-exporter
  ${KUBECTL} -n ${NS} rollout status daemonsets node-exporter
  echo ""
  # 40-alertmanager
  echo "deploying alertmanager..."
  ${KUBECTL} apply -Rf ${SRC_TMPDIR}/prometheus/40-alertmanager
  sleep 2s
  ${KUBECTL} -n ${NS} rollout status statefulsets alertmanager-main
  echo ""
  # 50-adapter
  echo "deploying adapter..."
  ${KUBECTL} apply -Rf ${SRC_TMPDIR}/prometheus/50-adapter
  sleep 2s
  ${KUBECTL} -n ${NS} rollout status deploy prometheus-adapter
  echo ""
  # 60-prometheus
  echo "deploying prometheus..."
  ${KUBECTL} apply -Rf ${SRC_TMPDIR}/prometheus/60-prometheus
  sleep 2s
  ${KUBECTL} -n ${NS} rollout status statefulsets prometheus-k8s
  echo ""
  # 70-blackbox-exporter
  echo "deploying blackbox-exporter..."
  ${KUBECTL} apply -Rf ${SRC_TMPDIR}/prometheus/70-blackbox-exporter
  ${KUBECTL} -n ${NS} rollout status deploy blackbox-exporter
  echo ""

  echo "View prometheus components..."
  echo "--------------------------------------"
  ${KUBECTL} get -n monitoring daemonsets,statefulsets,pods,services,ingresses -o wide

  echo "------ Ending of prometheus deployment ------------"
  echo ""
}

# deploy node: addons stroage
## to deploy one of who were rookceph, nfs, local-volume with the enviroment DEFAULT_STORAGE specified.
function deploy_storage()
{
  [ "${MAP_DEPLOY["storage"]}" != "true" ] && return
  eval "deploy_${DEFAULT_STORAGE}"
}

# deploy node: addons rook-ceph, supply storage of block & filesystem
function deploy_rookceph()
{
  # prometheus must be installed before.
  [ "${DEFAULT_STORAGE}" != "rookceph" ] && return
  echo "-------------------------------------------------------------"
  echo "Deploying kubernetes addons - Rook Ceph..."
  echo "-------------------------------------------------------------"

  local -ir OSD_COUNT=6
  local -ir WAIT_PERIOD=15
  local -ir WAIT_SECONDS=60*${WAIT_PERIOD}
  local -i i=0
  local -i wait_times=0

  echo -n "Building rook-ceph yaml..."
  cp -Rf ${TEMPLATE_DIR}/addons/rook ${SRC_TMPDIR}/
  sed -i "s#\${DOCKER_HUB}#${DOCKER_HUB}#g" ${SRC_TMPDIR}/rook/*.yaml
  sed -i "s#\${DOCKER_HUB}#${DOCKER_HUB}#g" ${SRC_TMPDIR}/rook/*/*.yaml
  echo "ok"

  echo "Deploying rook-ceph, it will take a long time..."
  local -r NS=$(cat ${SRC_TMPDIR}/rook/00-rook-namespace.yaml | awk '/\ \ name: /{print $2}')
  ${KUBECTL} apply -f ${SRC_TMPDIR}/rook/00-rook-namespace.yaml
  ${KUBECTL} apply -Rf ${SRC_TMPDIR}/rook/10-crds
  ${KUBECTL} apply -Rf ${SRC_TMPDIR}/rook/20-rbac
  ${KUBECTL} apply -Rf ${SRC_TMPDIR}/rook/21-podSecurityPolicy/
  ${KUBECTL} apply -f ${SRC_TMPDIR}/rook/30-rook-operator.yaml
  ${KUBECTL} -n ${NS} rollout status deploy rook-ceph-operator
  echo ""

  ${KUBECTL} apply -Rf ${SRC_TMPDIR}/rook/31-prometheus/
  sleep 2s
  ${KUBECTL} -n ${NS} rollout status statefulsets prometheus-rook
  echo ""

  echo "creating rook-ceph cluster in a few minutes, please wait patiently..."
  ${KUBECTL} apply -f ${SRC_TMPDIR}/rook/32-rook-cluster.yaml
  while [ ${wait_times} -lt ${WAIT_SECONDS} ]; do
    sleep "${WAIT_PERIOD}s"
    wait_times=${wait_times}+${WAIT_PERIOD}
    echo -n "."
  done
  echo ""
  ${KUBECTL} -n ${NS} rollout status deploy rook-ceph-mon-a
  ${KUBECTL} -n ${NS} rollout status deploy rook-ceph-mon-b
  ${KUBECTL} -n ${NS} rollout status deploy rook-ceph-mon-c
  ${KUBECTL} -n ${NS} rollout status deploy rook-ceph-mgr-a
  while [ ${i} -lt ${OSD_COUNT} ]; do
    ${KUBECTL} -n ${NS} rollout status deploy rook-ceph-osd-${i}
    i=${i}+1
  done
  ${KUBECTL} apply -f ${SRC_TMPDIR}/rook/33-rook-ingress.yaml
  ${KUBECTL} apply -f ${SRC_TMPDIR}/rook/34-rook-toolbox.yaml
  echo ""

  echo "creating rbd, cephfs & storageClass..."
  ${KUBECTL} apply -f ${SRC_TMPDIR}/rook/50-rook-ceph-block-pool.yaml
  ${KUBECTL} apply -f ${SRC_TMPDIR}/rook/51-rook-ceph-block-storageClass.yaml
  ${KUBECTL} apply -f ${SRC_TMPDIR}/rook/52-rook-cephfs.yaml
  ${KUBECTL} apply -f ${SRC_TMPDIR}/rook/53-rook-cephfs-storageClass.yaml
  ${KUBECTL} -n ${NS} rollout status deploy rook-ceph-mds-cephfs-a
  ${KUBECTL} -n ${NS} rollout status deploy rook-ceph-mds-cephfs-b
  echo ""

  echo "View rook-ceph deployments, daemonsets, pods , services & ingress, etc..."
  echo "--------------------------------------"
  ${KUBECTL} -n rook-ceph get deploy,daemonsets,pods,service,ingress
  echo ""

  echo "View rook-ceph cluster, rbd, cephfs, etc..."
  echo "--------------------------------------"
  ${KUBECTL} -n rook-ceph get cephcluster,cephblockpools,cephfilesystems,storageclasses
  echo ""

  echo "------ Ending of rook-ceph deployment ------------"
  echo ""
}

# deploy mode: addon local-volume-provisioner
function deploy_lvp()
{
  [ "${DEFAULT_STORAGE}" != "lvp" ] && return
  echo "-------------------------------------------------------------"
  echo "Deploying kubernetes addons - LocalVolumeProvisioner..."
  echo "-------------------------------------------------------------"

  local -r PROVISIONER_DIR="${APP_DIR}/provisioner"
  for host in ${!NODES[@]}; do
    echo -n "Running on ${host}(${NODES[${host}]}): Making directories to mount for pv prepared..."
    for i in {00..16}; do \
      mkdir -p ${PROVISIONER_DIR}/vol-\${i}; \
      (echo \"${PROVISIONER_DIR}/vol-\${i}  ${PROVISIONER_DIR}/vol-\${i}  xfs  defaults,bind  0 0\" | tee -a /etc/fstab) >/dev/null 2>&1; \
    done
    mount -a
    echo "ok"
  done

  echo "Deploying local-volume-provisioner in a few seconds..."
  local -r NS=$(cat ${TEMPLATE_DIR}/addons/storage/00-namespace.yaml | awk '/\ \ name: /{print $2}')
  ${KUBECTL} apply -f ${TEMPLATE_DIR}/addons/storage/00-namespace.yaml
  local -ar FILES=(
    01-local-provisioner-storageClass.yaml
    11-local-provisioner-rbac.yaml
    12-local-provisioner-config.yaml
    13-local-provisioner-daemonset.yaml
    14-local-provisioner-service.yaml
  )
  for file in ${FILES[@]}; do
    ${KUBECTL} apply -f ${TEMPLATE_DIR}/addons/storage/local-storage/${file}
  done
  ${KUBECTL} -n ${NS} rollout status daemonsets local-volume-provisioner
  echo ""

  echo "View LocalVoumeProvisioner components: configMap, pv, deploy, service..."
  ${KUBECTL} -n ${NS} get cm,daemonset,svc
  ${KUBECTL} get sc
  ${KUBECTL} get pv

  echo "------ Ending of LocalVolumeProvisioner deployment ------------"
  echo ""
}

# deploy mode: addons stroageClass on NFS
function deploy_nfs()
{
  [ "${DEFAULT_STORAGE}" != "nfs" ] && return
  echo "-------------------------------------------------------------"
  echo "Deploying kubernetes addons - nfs StorageClass..."
  echo "-------------------------------------------------------------"

  echo -n "Building StorageClass Provisioner yaml..."
  cp -Rf ${TEMPLATE_DIR}/addons ${SRC_TMPDIR}
  sed -i "s#\${NFS_SERVER}#${NFS_SERVER}#g" ${SRC_TMPDIR}/addons/storage/nfs/02-nfs-deploy.yaml
  sed -i "s#\${NFS_PATH}#${NFS_PATH}#g" ${SRC_TMPDIR}/addons/storage/nfs//02-nfs-deploy.yaml
  sed -i "s#\${DOCKER_HUB}#${DOCKER_HUB}#g" ${SRC_TMPDIR}/addons/storage/nfs/02-nfs-deploy.yaml
  echo "ok"
  echo "NFS_SERVER=${NFS_SERVER}"
  echo "NFS_PATH=${NFS_PATH}"

  echo "Deploying StorageClass Provisioner in a few seconds..."
  local -r NS=$(cat ${SRC_TMPDIR}/addons/storage/00-namespace.yaml | awk '/\ \ name: /{print $2}')
  ${KUBECTL} apply -f ${SRC_TMPDIR}/addons/storage/00-namespace.yaml
  ${KUBECTL} apply -Rf ${SRC_TMPDIR}/addons/storage/nfs/
  ${KUBECTL} -n ${NS} rollout status deploy nfs-provisioner
  echo ""

  # view
  echo "Viewing storageClass provisioner pod.."
  echo "--------------------------------------"
  ${KUBECTL} -n ${NS} get deploy,pods
  echo ""
  ${KUBECTL} get storageclasses
  echo ""

  echo "------ Ending of StorageClass deployment ---------------------"
  echo ""
}

# deploy node: addon grafana
function deploy_grafana()
{
  [ "${MAP_DEPLOY["grafana"]}" != "true" ] && return
  echo "-------------------------------------------------------------"
  echo "Deploying kubernetes addons - Granfa..."
  echo "-------------------------------------------------------------"

  echo -n "Build granfa deployment yaml files..."
  cp -Rf ${TEMPLATE_DIR}/addons/grafana ${SRC_TMPDIR}
  sed -i "s#\${DOCKER_HUB}#${DOCKER_HUB}#g" ${SRC_TMPDIR}/grafana/11-grafana-deploy.yaml
  echo "ok"

  echo "Deploying grafana in a few seconds..."
  local -r NS="monitoring"
  ${KUBECTL} apply -Rf ${SRC_TMPDIR}/grafana/
  ${KUBECTL} -n ${NS} rollout status deploy grafana
  echo ""

  echo "Viewing grafana components: configMap, pvc, deploy, service, ingress..."
  echo "--------------------------------------"
  ${KUBECTL} -n monitoring get cm,pvc,deploy,svc,ingress
  echo ""

  echo "------ Ending of grafana deployment ------------"
  echo ""
}

# deploy node: addon efk (elasticsearch, fluentd, kibana)
function deploy_efk()
{
  [ "${MAP_DEPLOY["efk"]}" != "true" ] && return
  echo "-------------------------------------------------------------"
  echo "Deploying kubernetes addons - efk..."
  echo "-------------------------------------------------------------"

  echo -n "Building efk deployment yaml files..."
  local -r DOCKER_DATA_ROOT=/appdata/docker
  cp -Rf ${TEMPLATE_DIR}/addons/efk ${SRC_TMPDIR}
  sed -i "s#\${DOCKER_HUB}#${DOCKER_HUB}#g" ${SRC_TMPDIR}/efk/*-{deploy,stateful,daemonset}.yaml
  sed -i "s#\${DOCKER_DATA_ROOT}#${DOCKER_DATA_ROOT}#g" ${SRC_TMPDIR}/efk/23-fluentd-daemonset.yaml
  echo "ok"
  echo "DOCKER_DATA_ROOT=${DOCKER_DATA_ROOT}"

  # kubectl label nodes k8s-xxx beta.kubernetes.io/fluentd-ds-ready=true
  ${KUBECTL} label nodes local beta.kubernetes.io/fluentd-ds-ready=true

  echo "Deploying efk - namespace.."
  local -r NS=$(cat ${SRC_TMPDIR}/efk/00-namespace.yaml | awk '/\ \ name: /{print $2}')
  ${KUBECTL} apply -f ${SRC_TMPDIR}/efk/00-namespace.yaml
  ${KUBECTL} apply -f ${SRC_TMPDIR}/efk/09-prometheus-rbac.yaml
  echo ""

  echo "Deploying efk - elasticsearch..."
  ${KUBECTL} apply -f ${SRC_TMPDIR}/efk/11-elasticsearch-configmap.yaml
  ${KUBECTL} apply -f ${SRC_TMPDIR}/efk/12-elasticsearch-rbac.yaml
  ${KUBECTL} apply -f ${SRC_TMPDIR}/efk/13-elasticsearch-stateful.yaml
  ${KUBECTL} apply -f ${SRC_TMPDIR}/efk/14-elasticsearch-service.yaml
  #${KUBECTL} apply -f ${SRC_TMPDIR}/efk/15-elasticsearch-hpa.yaml
  ${KUBECTL} -n ${NS} rollout status statefulsets elasticsearch-logging
  echo ""

  echo "Deploying efk - fluentd-es..."
  ${KUBECTL} apply -f ${SRC_TMPDIR}/efk/21-fluentd-configmap.yaml
  ${KUBECTL} apply -f ${SRC_TMPDIR}/efk/22-fluentd-rbac.yaml
  ${KUBECTL} apply -f ${SRC_TMPDIR}/efk/23-fluentd-daemonset.yaml
  ${KUBECTL} -n ${NS} rollout status daemonsets fluentd-es-v3.2.0
  echo ""

  echo "Deploying efk - kibana..."
  ${KUBECTL} apply -f ${SRC_TMPDIR}/efk/31-kibana-deploy.yaml
  ${KUBECTL} apply -f ${SRC_TMPDIR}/efk/32-kibana-service.yaml
  ${KUBECTL} apply -f ${SRC_TMPDIR}/efk/33-kibana-ingress.yaml
  ${KUBECTL} -n ${NS} rollout status deploy kibana-logging
  echo ""

  echo "Viewing efk components..."
  echo "--------------------------------------"
  ${KUBECTL} get -n logging daemonsets,statefulsets,pods,services,ingresses -o wide
  echo ""

  echo "------ Ending of efk deployment ------------"
  echo ""
}

# Undeploy init
function undeploy_init()
{
  [ "${MAP_UNDEPLOY["init"]}" != "true" ] && return
  echo "-------------------------------------------------------------"
  echo "Clear initialization directories, declares, alias, etc..."
  echo "-------------------------------------------------------------"

  local -r BASHRC_FILE="${HOME}/.bashrc"
  for host in ${!NODES[@]}; do
    echo -n "Running on ${host}(${NODES[${host}]}): Clearing initialization..."
    sed -i "/^declare -a MASTERS.*$/d" ${BASHRC_FILE}
    sed -i "/^declare -a NODES.*$/d" ${BASHRC_FILE}
    sed -i "/^alias masterExec.*$/d" ${BASHRC_FILE}
    sed -i "/^alias nodeExec.*$/d" ${BASHRC_FILE}
    docker ps -a | awk 'NR>1{cmd=\"docker rm -f \"\$1; system(cmd)}' >/dev/null 2>&1
    docker images | awk 'NR>1{cmd=\"docker rmi \"\$1\":\"\$2; system(cmd)}' >/dev/null 2>&1
    rm -rf /tmp/k8s-setup.*
    echo "ok"
  done

  echo "------ Ending of clear all ------------"
  echo ""
}

# Undeploy CA
function undeploy_ca()
{
  [ "${MAP_UNDEPLOY["ca"]}" != "true" ] && return
  echo "-------------------------------------------------------------"
  echo "Undeploying CA..."
  echo "-------------------------------------------------------------"

  for host in ${!NODES[@]}; do
    echo -n "Running on ${host}(${NODES[${host}]}): Removing ca files..."
    rm -rf ${SSL_DIR}
    echo "ok"
  done

  echo "------ Ending of undeploy CA ------------"
  echo ""
}

# Undeploy etcd
function undeploy_etcd()
{
  [ "${MAP_UNDEPLOY["etcd"]}" != "true" ] && return
  echo "-------------------------------------------------------------"
  echo "Undeploying etcd service..."
  echo "-------------------------------------------------------------"

  local -r ETCD_DEST_DIR="etcd-v${ETCD_VER}-linux-amd64"

  for host in ${!MASTERS[@]}; do
    echo -n "Running on ${host}(${MASTERS[${host}]}): Removing etcd service..."
    systemctl stop etcd >/dev/null 2>&1
    systemctl disable etcd >/dev/null 2>&1
    rm -f /opt/etcd \
      ${SERVICE_DIR}/etcd.service \
      ${SSL_DIR}/etcd*.pem \
      /etc/profile.d/etcd.sh
    rm -rf /opt/${ETCD_DEST_DIR} ${APP_DIR}/etcd
    echo "ok"
  done

  echo "------ Ending of undeploy etcd service ------------"
  echo ""
}

# Undeploy ha
function undeploy_ha()
{
  [ "${MAP_UNDEPLOY["ha"]}" != "true" ] && return
  echo "-------------------------------------------------------------"
  echo "Undeploying HA services (haproxy & keepalived)..."
  echo "-------------------------------------------------------------"

  for host in ${!MASTERS[@]}; do
    echo -n "Running on ${host}(${MASTERS[${host}]}): Removing haproxy & keepalived service..."
    systemctl stop keepalived haproxy >/dev/null 2>&1
    systemctl disable keepalived haproxy >/dev/null 2>&1
    yum remove -y net-snmp-libs \
      net-snmp-agent-libs \
      lm_sensors-libs \
      psmisc \
      keepalived \
      haproxy >/dev/null 2>&1
    rm -rf /etc/keepalived /etc/haproxy ${APP_DIR}/haproxy
    echo "ok"
  done

  echo "------ Ending of undeploy HA services ------------"
  echo ""
}

# Undeploy k8s packages
function undeploy_k8s()
{
  [ "${MAP_UNDEPLOY["k8s"]}" != "true" ] && return
  echo "-------------------------------------------------------------"
  echo "Undeploying kubernetes packages..."
  echo "-------------------------------------------------------------"

  local -r BASHRC_FILE="${HOME}/.bashrc"

  # undeploy autoCompletion
  for host in ${!NODES[@]}; do
    echo -n "Running on ${host}(${NODES[${host}]}): Removing autoCompletion...."
    sed -i "/kubectl.*$/d" ${BASHRC_FILE}
    yum remove -y bash-completion >/dev/null 2>&1
    rm -f /etc/sysctl.d/kubernetes.conf /etc/profile.d/kubernetes.sh
    rm -rf ${APP_DIR}/k8s /opt/k8s /opt/kubernetes
    echo "ok"
  done

  echo "------ Ending of undeploy kubernetes packages ------------"
  echo ""
}

# Undeploy kube-apiserver service
function undeploy_apiserver()
{
  [ "${MAP_UNDEPLOY["apiserver"]}" != "true" ] && return
  echo "-------------------------------------------------------------"
  echo "Undeploying kube-apiserver service..."
  echo "-------------------------------------------------------------"

  # remove service
  for host in ${!MASTERS[@]}; do
    echo -n "Running on ${host}(${MASTERS[${host}]}): Removing kube-apiserver service..."
    systemctl stop kube-apiserver >/dev/null 2>&1
    systemctl disable kube-apiserver >/dev/null 2>&1
    rm -f ${K8S_CONFIG_DIR}/kube-apiserver.conf \
      ${SERVICE_DIR}/kube-apiserver.service \
      ${K8S_YAML_DIR}/audit-policy-min.yaml \
      ${K8S_TOKEN_DIR}/{bootstrap-token,basic-auth}.csv \
      ${SSL_DIR}/{kubernetes,admin,metrics-server}*.pem
    rm -rf ${APP_DIR}/k8s/apiserver
    echo "ok"
  done

  # remove config file on all nodes
  for host in ${!NODES[@]}; do
    echo -n "Running on ${host}(${NODES[${host}]}): Removing kubectl kubeconfig file..."
    rm -rf ${HOME}/.kube /root/.kube
    echo "ok"
  done

  echo "------ Ending of undeploy kube-apiserver service ------------"
  echo ""
}

# Undeploy kube-controller-manager service.
function undeploy_controller()
{
  [ "${MAP_UNDEPLOY["controller"]}" != "true" ] && return
  echo "-------------------------------------------------------------"
  echo "Undeploying kube-controller-manager service..."
  echo "-------------------------------------------------------------"

  for host in ${!MASTERS[@]}; do
    echo -n "Running on ${host}(${MASTERS[${host}]}): Removing kube-controller-manager service..."
    systemctl stop kube-controller-manager >/dev/null 2>&1
    systemctl disable kube-controller-manager >/dev/null 2>&1
    rm -f ${SERVICE_DIR}/kube-controller-manager.service \
      ${K8S_CONF_DIR}/kube-controller-manager.{kubeconfig,conf} \
      ${SSL_DIR}/kube-controller-manager*.pem
    rm -rf ${APP_DIR}/k8s/controller
    echo "ok"
  done

  echo "------ Ending of undeploy kube-controller-manager service ------------"
  echo ""
}

# Undeploy kube-scheduler service.
function undeploy_scheduler()
{
  [ "${MAP_UNDEPLOY["scheduler"]}" != "true" ] && return
  echo "-------------------------------------------------------------"
  echo "Undeploying kube-scheduler service..."
  echo "-------------------------------------------------------------"

  for host in ${!MASTERS[@]}; do
    echo -n "Running on ${host}(${MASTERS[${host}]}): Removing kube-scheduler service..."
    systemctl stop kube-scheduler >/dev/null 2>&1
    systemctl disable kube-scheduler >/dev/null 2>&1
    rm -f ${SERVICE_DIR}/kube-scheduler.service \
      ${K8S_CONF_DIR}/kube-scheduler.{kubeconfig,conf} \
      ${SSL_DIR}/kube-scheduler*.pem
    rm -rf ${APP_DIR}/k8s/scheduler
    echo "ok"
  done

  echo "------ Ending of undeploy kube-scheduler service ------------"
  echo ""
}

# Undeploy kublet service.
function undeploy_kubelet()
{
  [ "${MAP_UNDEPLOY["kubelet"]}" != "true" ] && return
  echo "-------------------------------------------------------------"
  echo "Undeploying kubelet service..."
  echo "-------------------------------------------------------------"

  for host in ${!NODES[@]}; do
    echo -n "Running on ${host}(${NODES[${host}]}): Removing kubelet service..."
    systemctl stop kubelet >/dev/null 2>&1
    systemctl disable kubelet >/dev/null 2>&1
    rm -f ${K8S_YAML_DIR}/kubelet.yaml \
      ${K8S_CONF_DIR}/kubelet.{conf,kubeconfig} ${K8S_CONF_DIR}/bootstrap.kubeconfig \
      ${SERVICE_DIR}/kubelet.service \
      ${SSL_DIR}/kubelet*.*
    rm -rf ${APP_DIR}/k8s/kubelet
    yum remove -y libnetfilter_queue \
      libnetfilter_cttimeout \
      libnetfilter_cthelper \
      conntrack-tools \
      bridge-utils \
      ipvsadm >/dev/null 2>&1
    echo "ok"
  done

  echo "------ Ending of undeploy kubelet service ------------"
  echo ""
}

# Undeploy kubelet-proxy service.
function undeploy_proxy()
{
  [ "${MAP_UNDEPLOY["proxy"]}" != "true" ] && return
  echo "-------------------------------------------------------------"
  echo "Undeploying kubelet-proxy service..."
  echo "-------------------------------------------------------------"

  for host in ${!NODES[@]}; do
    echo -n "Running on host ${host}(${NODES[${host}]}): Remving kube-proxy service..."
    systemctl stop kube-proxy >/dev/null 2>&1
    systemctl disable kube-proxy >/dev/null 2>&1
    rm -f ${K8S_YAML_DIR}/kube-proxy.yaml \
      ${K8S_CONF_DIR}/kube-proxy.{kubeconfig,conf} \
      ${SERVICE_DIR}/kube-proxy.service \
      ${SSL_DIR}/kube-proxy*.pem
    rm -rf ${APP_DIR}/k8s/proxy
    echo "ok"
  done

  echo "------ Ending of undeploy kubelet-proxy service ------------"
  echo ""
}

# Undeploy addons calico, remove files...
function undeploy_calico()
{
  [ "${MAP_UNDEPLOY["calico"]}" != "true" ] && return
  echo "-------------------------------------------------------------"
  echo "Undeploying calico (cni-plugin)..."
  echo "-------------------------------------------------------------"

  echo "removing calico daemonset..."
  ${KUBECTL} delete -f ${TEMPLATE_DIR}/addons/calico/calico.yaml
  echo ""

  for host in ${!NODES[@]}; do
    echo -n "Running on ${host}(${NODES[${host}]}): Removing cni packages, including calico.sh, conf, cfg files..."
    rm -rf /etc/calico /etc/cni /opt/calico /opt/cni /var/lib/calico
    rm -f /etc/profile.d/calico.sh
    echo "ok"
  done
  echo ""

  echo "------ Ending of undeploy calico (cni-plugin) ------------"
  echo ""
}


# Undeploy addons coreDNS
function undeploy_coredns()
{
  [ "${MAP_UNDEPLOY["coredns"]}" != "true" ] && return
  echo "-------------------------------------------------------------"
  echo "Undeploying coreDNS..."
  echo "-------------------------------------------------------------"

  local -ar FILES=($(ls ${TEMPLATE_DIR}/addons/coredns/*.yaml | sort -r))
  for file in ${FILES[@]}; do
    ${KUBECTL} delete -f ${file}
  done
  echo ""

  echo "------ Ending of undeploy coreDNS ------------"
  echo ""
}

# Undeploy addons metrics
function undeploy_metrics()
{
  [ "${MAP_UNDEPLOY["metrics"]}" != "true" ] && return
  echo "-------------------------------------------------------------"
  echo "Undeploying metrics-server..."
  echo "-------------------------------------------------------------"

  local -ar FILES=($(ls ${TEMPLATE_DIR}/addons/metrics-server/*.yaml | sort -r))
  for file in ${FILES[@]}; do
    ${KUBECTL} delete -f ${file}
  done
  echo ""

  echo "------ Ending of undeploy metrics-server ------------"
  echo ""
}

# Undeploy addons ingress
function undeploy_ingress()
{
  [ "${MAP_UNDEPLOY["ingress"]}" != "true" ] && return
  echo "-------------------------------------------------------------"
  echo "Undeploying ingress..."
  echo "-------------------------------------------------------------"

  local -ar FILES=($(ls ${TEMPLATE_DIR}/addons/ingress/*.yaml | sort -r))
  for file in ${FILES[@]}; do
    ${KUBECTL} delete -f ${file}
  done
  echo ""

  echo "------ Ending of undeploy ingress ------------"
  echo ""
}

# Undeploy addons kubernetes-dashboard.
function undeploy_dashboard()
{
  [ "${MAP_UNDEPLOY["dashboard"]}" != "true" ] && return
  echo "-------------------------------------------------------------"
  echo "Undeploying kubernetes-dashboard..."
  echo "-------------------------------------------------------------"

  local -ar FILES=($(ls ${TEMPLATE_DIR}/addons/dashboard/*.yaml | sort -r))
  for file in ${FILES[@]}; do
    ${KUBECTL} delete -f ${file}
  done
  echo ""

  echo "------ Ending of undeploy kubernetes-dashboard ------------"
  echo ""
}

# Undeploy addons prometheus, include blackbox, prometheus, adapter, altermanager, node-exporter, etc.
function undeploy_prometheus()
{
  [ "${MAP_UNDEPLOY["prometheus"]}" != "true" ] && return
  echo "-------------------------------------------------------------"
  echo "Undeploying Prometheus..."
  echo "-------------------------------------------------------------"

  local -ra DIRECTORY=(
    70-blackbox-exporter
    60-prometheus
    50-adapter
    40-alertmanager
    30-node-exporter
    20-kube-state-metrics
    10-operator
  )
  for directory in ${DIRECTORY[@]}; do
    echo "undeploying ${directory} in 5 seconds..."
    local -ar FILES=($(ls ${TEMPLATE_DIR}/addons/prometheus/${directory}/*.yaml | sort -r))
    for file in ${FILES[@]}; do
      ${KUBECTL} delete -f ${file}
    done
    echo ""
  done

  echo "removing namespace..."
  ${KUBECTL} delete namespace monitoring
  echo ""

  echo "------ Ending of undeploy prometheus ------------"
  echo ""
}

# Undeploy rook-ceph manually
function undeploy_rookceph()
{
  [ "${MAP_UNDEPLOY["rookceph"]}" != "true" ] && return
  echo "-------------------------------------------------------------"
  echo "Undeploying Rook-Ceph..."
  echo "-------------------------------------------------------------"

  # To ensure pvc has been deleted first.
  # delete block & file atifacts, cephcluster crd, operator & related resources in sequence, zap data on hosts at last.
  local -r WORK_DIR="${TEMPLATE_DIR}/addons/rook"
  # Deleting ceph tools
  echo "deleting ceph tools..."
  ${KUBECTL} -n rook-ceph get deploy rook-ceph-tools >/dev/null 2>&1
  if [ $? -eq 0 ]; then
    ${KUBECTL} -n rook-ceph delete deploy rook-ceph-tools
  fi
  echo ""

  # Deleting ceph ingress
  echo "deleteing ceph ingress..."
  ${KUBECTL} -n rook-ceph get ingress rook-ceph-mgr-dashboard >/dev/null 2>&1
  if [ $? -eq 0 ]; then
    ${KUBECTL} -n rook-ceph delete ingress rook-ceph-mgr-dashboard
  fi
  echo ""

  # Deleting storageClass depend on rook-ceph
  echo "deleting storageClass created on rook-ceph..."
  local -i RET
  RET=$(${KUBECTL} get sc | awk '/rook-ceph\./{print $1}' | wc -l)
  if [ ${RET} -gt 0 ]; then
    ${KUBECTL} get sc | awk '/rook-ceph\./{print $1}' | xargs -I {} ${KUBECTL} delete sc {}
  fi
  echo ""

  # Deleting cephfilesystem
  echo "deleting ceph filesystems..."
  RET=$(${KUBECTL} -n rook-ceph get cephfilesystem | wc -l)
  if [ ${RET} -gt 1 ]; then
    ${KUBECTL} -n rook-ceph get cephfilesystem | awk 'NR>1{print $1}' | xargs -I {} ${KUBECTL} -n rook-ceph delete cephfilesystem {}
  fi
  echo ""

  # Deleting cephblockpool
  echo "deleting ceph block pool..."
  RET=$(${KUBECTL} -n rook-ceph get cephblockpools | wc -l)
  if [ ${RET} -gt 1 ]; then
    ${KUBECTL} -n rook-ceph get cephblockpools | awk 'NR>1{print $1}' | xargs -I {} ${KUBECTL} -n rook-ceph delete cephblockpools {}
  fi
  echo ""

  # Deleting ceph-cluster on crds
  echo "deleting ceph cluster on crds in 60 seconds..."
  local -r CLUSTER_NAME=$(${KUBECTL} -n rook-ceph get cephcluster | awk 'NR>1{print $1}')
  local -r VARLIBROOK=$(${KUBECTL} -n rook-ceph get cephcluster ${CLUSTERNAME} | awk 'NR>1{print $2}')
  if [ -n ${CLUSTER_NAME} ]; then
    echo "delete ceph cluster ${CLUSTER_NAME}..."
    ${KUBECTL} -n rook-ceph patch cephcluster ${CLUSTER_NAME} --type merge -p '{"spec":{"cleanupPolicy":{"confirmation":"yes-really-destroy-data"}}}'
    ${KUBECTL} -n rook-ceph delete cephcluster ${CLUSTER_NAME}
    sleep 60s
  fi
  echo ""

  # remove prometheus
  echo "removing prometheus for rook-ceph..."
  ${KUBECTL} delete -Rf ${TEMPLATE_DIR}/addons/rook/31-prometheus/
  echo ""

  # delete operator pod, crds, namespac.
  echo "deleting rook-ceph operator, crds..."
  ${KUBECTL} delete -f ${WORK_DIR}/30-rook-operator.yaml
  ${KUBECTL} delete -Rf ${WORK_DIR}/21-podSecurityPolicy/
  ${KUBECTL} delete -Rf ${WORK_DIR}/20-rbac/
  ${KUBECTL} delete -Rf ${WORK_DIR}/10-crds/
  ${KUBECTL} delete ns rook-ceph
  echo ""

  # delete the direcory /var/lib/rook and wipe osd driver
  local -ar OSDS=("/dev/sdb")
  for host in ${!NODES[@]}; do
    echo "Running on host ${host}(${NODES[${host}]}): Wipe rook-ceph directory & osds..."
    rm -rf ${VARLIBROOK}
    for osd in ${OSDS[@]}; do dd if=/dev/zero of=\${osd} bs=1M count=100 oflag=direct,dsync; done
    partprobe ${OSDS[@]}
    echo "ok"
    echo ""
  done

  echo "------ Ending of undeploy rook-ceph ------------"
  echo ""
}

function undeploy_lvp()
{
  [ "${MAP_UNDEPLOY["lvp"]}" != "true" ] && return
  echo "-------------------------------------------------------------"
  echo "Undeploying LocalVolumeProvisioner..."
  echo "-------------------------------------------------------------"

  echo "Remove all pvc(s) ..."
  ${KUBECTL} get pvc -A | awk '{cmd="kubectl -n "$1" delete pvc "$2; system(cmd)}'

  echo "remove components ..."
  local -ar FILES=(
    14-local-provisioner-service.yaml
    13-local-provisioner-daemonset.yaml
    12-local-provisioner-config.yaml
    11-local-provisioner-rbac.yaml
    01-local-provisioner-storageClass.yaml
  )
  for file in ${FILES[@]}; do
    ${KUBECTL} delete -f ${TEMPLATE_DIR}/addons/storage/local-storage/${file}
  done
  ${KUBECTL} delete -f ${TEMPLATE_DIR}/addons/storage/00-namespace.yaml
  echo ""

  echo -n "Remove directories mounted..."
  for dir in /appdata/provisioner/*; do
    umount ${dir}
  done
  rm -Rf /appdata/provisioner >/dev/null 2>&1
  sed -i "/^\/appdata\/provisioner.*$/d" /etc/fstab >/dev/null 2>&1
  echo ""

  echo "------ Ending of undeploy LocalVolumeProvisioner ------------"
  echo ""
}

function undeploy_grafana()
{
  [ "${MAP_UNDEPLOY["grafana"]}" != "true" ] && return
  echo "-------------------------------------------------------------"
  echo "Undeploying granfa..."
  echo "-------------------------------------------------------------"

  local -ar FILES=($(ls ${TEMPLATE_DIR}/addons/grafana/*.yaml | sort -r))
  for file in ${FILES[@]}; do
    ${KUBECTL} delete -f ${file}
  done
  echo ""

  echo "------ Ending of undeploy grafana ------------"
  echo ""
}

# undeploy efk, remove kibana, fluentd, elasticsearch in sequnce
function undeploy_efk()
{
  [ "${MAP_UNDEPLOY["efk"]}" != "true" ] && return
  echo "-------------------------------------------------------------"
  echo "Undeploying efk..."
  echo "-------------------------------------------------------------"

  local -r WORK_DIR="${TEMPLATE_DIR}/addons/efk"

  echo "Undeploying efk - remove kibana..."
  ${KUBECTL} delete -f ${WORK_DIR}/33-kibana-ingress.yaml
  ${KUBECTL} delete -f ${WORK_DIR}/32-kibana-service.yaml
  ${KUBECTL} delete -f ${WORK_DIR}/31-kibana-deploy.yaml
  echo ""

  echo "Undeploying efk - remove fluentd..."
  ${KUBECTL} delete -f ${WORK_DIR}/23-fluentd-daemonset.yaml
  ${KUBECTL} delete -f ${WORK_DIR}/22-fluentd-rbac.yaml
  ${KUBECTL} delete -f ${WORK_DIR}/21-fluentd-configmap.yaml
  echo ""

  echo "Undeploying efk - remove elasticsearch..."
  ${KUBECTL} delete -f ${WORK_DIR}/15-elasticsearch-hpa.yaml
  ${KUBECTL} delete -f ${WORK_DIR}/14-elasticsearch-service.yaml
  ${KUBECTL} delete -f ${WORK_DIR}/13-elasticsearch-stateful.yaml
  ${KUBECTL} delete -f ${WORK_DIR}/12-elasticsearch-rbac.yaml
  ${KUBECTL} delete -f ${WORK_DIR}/11-elasticsearch-configmap.yaml
  echo ""

  echo "Undeploying efk - remove namespace..."
  ${KUBECTL} delete -f ${WORK_DIR}/09-prometheus-rbac.yaml
  ${KUBECTL} delete -f ${WORK_DIR}/00-namespace.yaml
  echo ""

  echo "------ Ending of undeploy efk ------------"
  echo ""
}

function clear_temporary()
{
  echo "-------------------------------------------------------------"
  echo "Clearing temporary directories & files..."
  echo "-------------------------------------------------------------"

  echo -n "Clearing local source temporary directories & files..."
  rm -rf ${SRC_TMPDIR} >/dev/null 2>&1
  echo "ok"

  for host in ${!NODES[@]}; do
    echo -n "Running on host ${host}(${NODES[${host}]}): Clearing temporary directories & files..."
    rm -rf ${TMP_DIR} >/dev/null 2>&1
    echo "ok"
  done

  echo "------ Ending of clearing temporary ------------"
  echo ""
}
# ------ function end -----------------------------------------------

# ------ variables declaration --------------------------------------
set -e
declare -r SHELL_DIR="$(cd "$(dirname "${0}")" && pwd)"

declare -A MASTERS
MASTERS[ghxt-app]="172.18.3.5"
declare -A WORKERS
declare -A NODES
build_nodes # combine masters and workers into nodes

declare -r DEFAULT_VIP="172.18.3.5"
declare -ri MASTER_COUNT=${#MASTERS[@]}  # to check master cluster or single
declare -r APISERVER_IP=$(build_apiserver_ip)
declare -r APISERVER_PORT=$(build_apiserver_port)

declare -r BASE_SRC_DIR="/app/data"
declare -r APP_DIR="/app"
declare -r SSL_DIR="/app/ssl"
declare -r RPM_DIR="${BASE_SRC_DIR}/rpm"
declare -r TEMPLATE_DIR="${SHELL_DIR}/template"
declare -r SRC_TMPDIR="$(mktemp -du /tmp/k8s-setup.XXXXX)"
declare -r TMP_DIR="$(mktemp -du /tmp/k8s-setup.XXXXX)"

declare -r CLUSTER_IP_SEGMENT="100.0.0"
declare -r POD_IP_SEGMENT="200.0.0"

declare -r NFS_SERVER="172.18.3.5"
declare -r NFS_PATH="/app/nfs"

declare -r SERVICE_DIR="/etc/systemd/system"
declare -r K8S_BASE_DIR="/app/k8s"
declare -r K8S_BIN_DIR="${K8S_BASE_DIR}/bin"
declare -r K8S_CONF_DIR="${K8S_BASE_DIR}/conf"
declare -r K8S_YAML_DIR="${K8S_BASE_DIR}/yaml"
declare -r K8S_TOKEN_DIR="${K8S_BASE_DIR}/token"

declare -r KUBECTL="${K8S_BIN_DIR}/kubectl"
declare -r ETCD_VER="3.5.2"
declare -r DOCKER_HUB="docker-hub:5000"

# Modify it with your card.
declare -r NETWORK_CARD="bond0"

declare -r ETCD_CLUSTER=$(build_etcd_initialCluster)
declare -r ETCD_ENDPOINTS=$(build_etcd_endpoints)

# component flags
declare -ar COMPONENTS=(init ca etcd ha k8s apiserver controller scheduler kubelet proxy calico coredns metrics ingress dashboard storage prometheus grafana efk)
declare -ar STORAGE_COMPONENTS=(rookceph nfs lvp)
declare -r DEFAULT_STORAGE="nfs"
declare -A MAP_DEPLOY
MAP_DEPLOY["init"]="true"
declare -A MAP_UNDEPLOY

# echo color
declare -r COLOR_RED="\033[31m"
declare -r COLOR_GREEN="\033[32m"
declare -r COLOR_NORMAL="\033[0m"

# temporary declare
declare -r GROUP=$(id -gn)

# ------ variables end ----------------------------------------------

# ------  main ---------------------------------------
# 1. check options, set install component flags
parse_shell_args $@
valid_execnode
show_vars

# mkdir temporary directory
echo -n "Making temporarty directories..."
for host in ${!NODES[@]}; do
  rm -rf /tmp/k8s-setup*
  mkdir -p ${TMP_DIR}
done

# prepare local temporary directory
mkdir -p ${SRC_TMPDIR}
echo "ok"
echo ""

# run undeploy first, because maybe redeploy needed
set +e
declare comp
for (( i=${#COMPONENTS[@]}-1; i>=0; i-- )); do
  comp=${COMPONENTS[$i]}
  if [ "${MAP_UNDEPLOY[${comp}]}" == "true" ]; then
    eval "undeploy_${comp}"
  fi
done
set -e
# run deploy
for c in ${COMPONENTS[@]} ; do
  if [ "${MAP_DEPLOY[$c]}" == "true" ]; then
    eval "deploy_$c"
  fi
done

# clear temporary directory & files.
clear_temporary

# To show dashboard token if dashboard deployed this time.
if [ "${MAP_DEPLOY["dashboard"]}" = "true" ]; then
  echo ""
  echo "To view kubernetes-dashboard web page, visit: https://dashboard.local.ghxt.com"
  echo "To get token, run command below:"
  echo " kubectl -n kubernetes-dashboard describe secret \$(kubectl -n kubernetes-dashboard get secret | grep admin-user | awk '{print \$1}')"
  echo ""
  ${KUBECTL} -n kubernetes-dashboard describe secret $(${KUBECTL} -n kubernetes-dashboard get secret | grep admin-user | awk '{print $1}')
fi
# To show rook-ceph password if rook-ceph deployed this time.
if [ "${MAP_DEPLOY["rookceph"]}" = "true" ]; then
  echo ""
  echo "View rook-ceph dashboard password."
  echo "To get token, run command below:"
  echo " kubectl -n rook-ceph get secret rook-ceph-dashboard-password -o jsonpath=\"{['data']['password']}\" | base64 --decode && echo"
  echo -n " rook-ceph password: "
  ${KUBECTL} -n rook-ceph get secret rook-ceph-dashboard-password -o jsonpath="{['data']['password']}" | base64 --decode && echo
fi

echo ""
echo "--------------------------------------"
echo "Deploy end."
