#!/bin/bash
###################################################################
# Script Name    : kubeeasy
# Version:       : v1.0.0
# Description    : Install kubernetes cluster using kubeadm.
# Create Date    : 2021-08-14
# Author         : KongYu
# Email          : 2385569970@qq.com
###################################################################

[[ -n $DEBUG ]] && set -x
set -o errtrace # Make sure any error trap is inherited
set -o nounset  # Disallow expansion of unset variables
set -o pipefail # Use last non-zero exit code in a pipeline

######################################################################################################
# environment configuration
######################################################################################################

# 版本
KUBE_VERSION="${KUBE_VERSION:-1.21.3}"
FLANNEL_VERSION="${FLANNEL_VERSION:-0.14.0}"
METRICS_SERVER_VERSION="${METRICS_SERVER_VERSION:-0.5.0}"
CALICO_VERSION="${CALICO_VERSION:-3.19.1}"
LONGHORN_VERSION="${LONGHORN_VERSION:-1.1.1}"
KUBERNETES_DASHBOARD_VERSION="${KUBERNETES_DASHBOARD_VERSION:-2.3.1}"
KUBESPHERE_VERSION="${KUBESPHERE_VERSION:-3.1.0}"

# 集群配置
KUBE_DNSDOMAIN="${KUBE_DNSDOMAIN:-cluster.local}"
KUBE_APISERVER="${KUBE_APISERVER:-apiserver.$KUBE_DNSDOMAIN}"
KUBE_POD_SUBNET="${KUBE_POD_SUBNET:-10.244.0.0/16}"
KUBE_SERVICE_SUBNET="${KUBE_SERVICE_SUBNET:-10.96.0.0/16}"
KUBE_IMAGE_REPO="${KUBE_IMAGE_REPO:-swr.cn-north-1.myhuaweicloud.com/kongyu/kubernetes}"
KUBE_NETWORK="${KUBE_NETWORK:-calico}"
KUBE_STORAGE="${KUBE_STORAGE:-nfs}"
KUBE_UI="${KUBE_UI:-kuboard}"
KUBE_VIRT=${KUBE_VIRT:-kubevirt}
KUBE_ADDON="${KUBE_ADDON:-metrics-server}"
KUBE_FLANNEL_TYPE="${KUBE_FLANNEL_TYPE:-vxlan}"
KUBE_CRI="${KUBE_CRI:-docker}"
KUBE_CRI_VERSION="${KUBE_CRI_VERSION:-latest}"
KUBE_CRI_ENDPOINT="${KUBE_CRI_ENDPOINT:-/var/run/dockershim.sock}"
DOCKER_DATA_ROOT="${DOCKER_DATA_ROOT:-/data/docker}"

# 定义的master和worker节点地址，以逗号分隔
MASTER_NODES="${MASTER_NODES:-}"
WORKER_NODES="${WORKER_NODES:-}"
HOST="${HOST:-}"

# 高可用配置
VIRTUAL_IP=${VIRTUAL_IP:-}
KUBE_APISERVER_PORT=${KUBE_APISERVER_PORT:-8443}

# 定义在哪个节点上进行设置
MGMT_NODE="${MGMT_NODE:-127.0.0.1}"

# 节点的连接信息
SSH_USER="${SSH_USER:-root}"
SSH_PASSWORD="${SSH_PASSWORD:-000000}"
SSH_PRIVATE_KEY="${SSH_PRIVATE_KEY:-}"
SSH_PORT="${SSH_PORT:-22}"
SUDO_USER="${SUDO_USER:-root}"

# 节点命名设置
HOSTNAME_PREFIX="${HOSTNAME_PREFIX:-k8s}"

# 脚本设置
TMP_DIR="/tmp"
LOG_FILE="/var/log/kubeinstall.log"
SSH_OPTIONS="-o ConnectTimeout=600 -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -q"
ERROR_INFO="\n\033[31mERROR Summary: \033[0m\n  "
ACCESS_INFO="\n\033[32mACCESS Summary: \033[0m\n  "
COMMAND_OUTPUT=""
SCRIPT_PARAMETER="$*"
OFFLINE_DIR="${TMP_DIR}/kubeeasy"
OFFLINE_TAG="${OFFLINE_TAG:-0}"
OFFLINE_FILE=""
DEPEND_FILE="${DEPEND_FILE:-$(pwd)/dependencies/centos-7-rpms.tar.gz}"
OS_SUPPORT="centos7 centos8"
GITHUB_PROXY="${GITHUB_PROXY:-https://gh.lework.workers.dev/}"
GCR_PROXY="${GCR_PROXY:-k8sgcr.lework.workers.dev}"
SKIP_UPGRADE_PLAN=${SKIP_UPGRADE_PLAN:-false}
UPGRADE_KERNEL_TAG="${UPGRADE_KERNEL_TAG:-0}"
HELP_TAG="${HELP_TAG:-0}"

trap trap::info 1 2 3 15 EXIT

######################################################################################################
# function
######################################################################################################

function trap::info() {
  # 信号处理

  [[ ${#ERROR_INFO} -gt 37 ]] && echo -e "$ERROR_INFO"
  [[ ${#ACCESS_INFO} -gt 38 ]] && echo -e "$ACCESS_INFO"
  [ -f "$LOG_FILE" ] && echo -e "\n  See detailed log >> $LOG_FILE \n"
  trap '' EXIT
  exit
}

function log::error() {
  # 错误日志

  local item
  item="[$(date +'%Y-%m-%d %H:%M:%S')] \033[31mERROR:   \033[0m$*"
  ERROR_INFO="${ERROR_INFO}${item}\n  "
  echo -e "${item}" | tee -a "$LOG_FILE"
}

function log::info() {
  # 基础日志

  printf "[%s] \033[32mINFO:    \033[0m%s\n" "$(date +'%Y-%m-%d %H:%M:%S')" "$*" | tee -a "$LOG_FILE"
}

function log::warning() {
  # 警告日志

  printf "[%s] \033[33mWARNING: \033[0m%s\n" "$(date +'%Y-%m-%d %H:%M:%S')" "$*" | tee -a "$LOG_FILE"
}

function log::access() {
  # 访问信息

  ACCESS_INFO="${ACCESS_INFO}$*\n  "
  printf "[%s] \033[32mINFO:    \033[0m%s\n" "$(date +'%Y-%m-%d %H:%M:%S')" "$*" | tee -a "$LOG_FILE"
}

function log::exec() {
  # 执行日志

  printf "[%s] \033[34mEXEC:    \033[0m%s\n" "$(date +'%Y-%m-%d %H:%M:%S')" "$*" >>"$LOG_FILE"
}

function utils::version_to_number() {
  # 版本号转数字

  echo "$@" | awk -F. '{ printf("%d%03d%03d%03d\n", $1,$2,$3,$4); }'
}

function utils::retry {
  # 重试

  local retries=$1
  shift

  local count=0
  until eval "$*"; do
    exit=$?
    wait=$((2 ** count))
    count=$((count + 1))
    if [ "$count" -lt "$retries" ]; then
      echo "Retry $count/$retries exited $exit, retrying in $wait seconds..."
      sleep $wait
    else
      echo "Retry $count/$retries exited $exit, no more retries left."
      return $exit
    fi
  done
  return 0
}

function utils::quote() {
  # 转义引号

  # shellcheck disable=SC2046
  if [ $(echo "$*" | tr -d "\n" | wc -c) -eq 0 ]; then
    echo "''"
  elif [ $(echo "$*" | tr -d "[a-z][A-Z][0-9]:,.=~_/\n-" | wc -c) -gt 0 ]; then
    printf "%s" "$*" | sed -e "1h;2,\$H;\$!d;g" -e "s/'/\'\"\'\"\'/g" | sed -e "1h;2,\$H;\$!d;g" -e "s/^/'/g" -e "s/$/'/g"
  else
    echo "$*"
  fi
}

function utils::download_file() {
  # 下载文件

  local url="$1"
  local dest="$2"
  local unzip_tag="${3:-1}"

  local dest_dirname
  dest_dirname=$(dirname "$dest")
  local filename
  filename=$(basename "$dest")

  log::info "[download]" "download ${filename} file"
  command::exec "${MGMT_NODE}" "
    set -e
    if [ ! -f \"${dest}\" ]; then
      [ ! -d \"${dest_dirname}\" ] && mkdir -pv \"${dest_dirname}\" 
      wget --timeout=10 --waitretry=3 --tries=5 --retry-connrefused \"${url}\" -O \"${dest}\"
      if [[ \"${unzip_tag}\" == \"unzip\" ]]; then
        command -v unzip 2>/dev/null || yum install -y unzip
        unzip -o \"${dest}\" -d \"${dest_dirname}\"
      fi
    else
      echo \"${dest} is exists!\"
    fi
  "
  local status="$?"
  check::exit_code "$status" "download" "download ${filename} file"
  return "$status"
}

function utils::is_element_in_array() {
  # 判断是否在数组中存在元素

  local -r element="${1}"
  local -r array=("${@:2}")

  local walker=''

  for walker in "${array[@]}"; do
    [[ "${walker}" = "${element}" ]] && return 0
  done

  return 1
}

function command::exec() {
  # 执行命令

  local host=${1:-}
  shift
  local command="$*"

  if [[ "${SUDO_TAG:-}" == "1" ]]; then
    sudo_options="sudo -H -n -u ${SUDO_USER}"

    if [[ "${SUDO_PASSWORD:-}" != "" ]]; then
      sudo_options="${sudo_options// -n/} -p \"\" -S <<< \"${SUDO_PASSWORD}\""
    fi
    command="$sudo_options bash -c $(utils::quote "$command")"
  fi

  command="$(utils::quote "$command")"

  if [[ "${host}" == "127.0.0.1" ]]; then
    # 本地执行
    log::exec "[command]" "bash -c $(printf "%s" "${command//${SUDO_PASSWORD:-}/******}")"
    # shellcheck disable=SC2094
    COMMAND_OUTPUT=$(eval bash -c "${command}" 2>>"$LOG_FILE" | tee -a "$LOG_FILE")
    local status=$?
  else
    # 远程执行
    local ssh_cmd="ssh"
    if [[ "${SSH_PASSWORD}" != "" ]]; then
      ssh_cmd="sshpass -p \"${SSH_PASSWORD}\" ${ssh_cmd}"
    elif [[ "$SSH_PRIVATE_KEY" != "" ]]; then
      [ -f "${SSH_PRIVATE_KEY}" ] || {
        log::error "[exec]" "ssh private_key:${SSH_PRIVATE_KEY} not found."
        exit 1
      }
      ssh_cmd="${ssh_cmd} -i $SSH_PRIVATE_KEY"
    fi
    log::exec "[command]" "${ssh_cmd//${SSH_PASSWORD:-}/******} ${SSH_OPTIONS} ${SSH_USER}@${host} -p ${SSH_PORT} bash -c $(printf "%s" "${command//${SUDO_PASSWORD:-}/******}")"
    # shellcheck disable=SC2094
    COMMAND_OUTPUT=$(eval "${ssh_cmd} ${SSH_OPTIONS} ${SSH_USER}@${host} -p ${SSH_PORT}" bash -c '"${command}"' 2>>"$LOG_FILE" | tee -a "$LOG_FILE")
    local status=$?
  fi
  return $status
}

function command::scp() {
  # 拷贝文件

  local host=${1:-}
  local src=${2:-}
  local dest=${3:-/tmp/}

  if [[ "${host}" == "127.0.0.1" ]]; then
    local command="rsync -az ${src} ${dest}"
    log::exec "[command]" "bash -c \"${command}\""
    # shellcheck disable=SC2094
    COMMAND_OUTPUT=$(bash -c "${command}" 2>>"$LOG_FILE" | tee -a "$LOG_FILE")
    local status=$?
  else
    local scp_cmd="scp"
    if [[ "${SSH_PASSWORD}" != "" ]]; then
      scp_cmd="sshpass -p \"${SSH_PASSWORD}\" ${scp_cmd}"
    elif [[ "$SSH_PRIVATE_KEY" != "" ]]; then
      [ -f "${SSH_PRIVATE_KEY}" ] || {
        log::error "[exec]" "ssh private_key:${SSH_PRIVATE_KEY} not found."
        exit 1
      }
      scp_cmd="${scp_cmd} -i $SSH_PRIVATE_KEY"
    fi
    log::exec "[command]" "${scp_cmd//${SSH_PASSWORD:-}/******} ${SSH_OPTIONS} -P ${SSH_PORT} -r ${src} ${SSH_USER}@${host}:${dest}" >>"$LOG_FILE"
    # shellcheck disable=SC2094
    COMMAND_OUTPUT=$(eval "${scp_cmd} ${SSH_OPTIONS} -P ${SSH_PORT} -r ${src} ${SSH_USER}@${host}:${dest}" 2>>"$LOG_FILE" | tee -a "$LOG_FILE")
    local status=$?
  fi
  return $status
}

function command::rsync() {
  # 增量拷贝文件

  local host=${1:-}
  local src=${2:-}
  local dest=${3:-/tmp/}

  if [[ "${host}" == "127.0.0.1" ]]; then
    local command="rsync -az ${src} ${dest}"
    log::exec "[command]" "bash -c \"${command}\""
    # shellcheck disable=SC2094
    COMMAND_OUTPUT=$(bash -c "${command}" 2>>"$LOG_FILE" | tee -a "$LOG_FILE")
    local status=$?
  else
    local rsync_cmd="rsync -avz"
    if [[ "${SSH_PASSWORD}" != "" ]]; then
      rsync_cmd="sshpass -p \"${SSH_PASSWORD}\" ${rsync_cmd}"
    elif [[ "$SSH_PRIVATE_KEY" != "" ]]; then
      [ -f "${SSH_PRIVATE_KEY}" ] || {
        log::error "[exec]" "ssh private_key:${SSH_PRIVATE_KEY} not found."
        exit 1
      }
      rsync_cmd="${rsync_cmd} -i $SSH_PRIVATE_KEY"
    fi
    log::exec "[command]" "
    ${rsync_cmd//${SSH_PASSWORD:-}/******} --port ${SSH_PORT} ${src} ${SSH_USER}@${host}:${dest}
    " >>"$LOG_FILE"
    # shellcheck disable=SC2094
    COMMAND_OUTPUT=$(eval "${rsync_cmd} --port ${SSH_PORT} ${src} ${SSH_USER}@${host}:${dest}" 2>>"$LOG_FILE" | tee -a "$LOG_FILE")
    local status=$?
  fi
  return $status
}

function mount::disk() {

  local hosts=${HOST}
  local disk="/dev/${MOUNT_DISK}"
  local mount_dir="${DOCKER_DATA_ROOT:-}"
  # mount disk
  for host in ${hosts}; do
    log::info "[mount]" "${host}: mount ${disk} to /data"
    command::exec "${host}" "
      mkdir -p ${mount_dir}
      umount ${disk} &> /dev/null || true
      sed -i '/${MOUNT_DISK}/d' /etc/fstab
      pvcreate -f ${disk} && vgcreate -f data-volumes ${disk} && \
        lvcreate -n data -l 100%vg data-volumes && \
        mkfs.xfs -f /dev/data-volumes/data && \
        echo '/dev/data-volumes/data ${mount_dir} xfs defaults 0 0' >> /etc/fstab && \
        mount /dev/data-volumes/data ${mount_dir}
   "
    check::exit_code "$?" "mount" "${host}: mount ${disk} to /data"
  done
}

function create::password() {

  local hosts=${HOST}
  local new_password="${NEW_SSH_PASSWORD:-}"
  # mount disk
  for host in ${hosts}; do
    log::info "[password]" "${host}: change root password"
    command::exec "${host}" "
      echo ${new_password} | passwd --stdin root
   "
    check::exit_code "$?" "password" "${host}: change root password"
  done
}

function script::stop_security() {
  # Disable firewalld
  for target in firewalld python-firewall firewalld-filesystem iptables; do
    systemctl stop $target &>/dev/null || true
    systemctl disable $target &>/dev/null || true
  done

  # selinux
  setenforce 0
  sed -i "s/SELINUX=.*/SELINUX=disabled/g" /etc/selinux/config

  echo
}

function script::init_node() {
  # 节点初始化脚本

  # clean
  sed -i -e "/$KUBE_APISERVER/d" -e '/worker/d' -e '/master/d' /etc/hosts

  sed -i '/## kubeeasy managed start/,/## kubeeasy managed end/d' /etc/security/limits.conf /etc/systemd/system.conf /etc/bashrc /etc/rc.local /etc/audit/rules.d/audit.rules

  # Disable selinux
  sed -i 's/SELINUX=.*/SELINUX=disabled/' /etc/selinux/config
  setenforce 0

  # Disable swap
  swapoff -a && sysctl -w vm.swappiness=0
  sed -ri '/^[^#]*swap/s@^@#@' /etc/fstab

  # Disable firewalld
  for target in firewalld python-firewall firewalld-filesystem iptables; do
    systemctl stop $target &>/dev/null || true
    systemctl disable $target &>/dev/null || true
  done

  # ssh
  # 关闭反向解析，加快连接速度
  sed -i  \
      -e 's/#UseDNS yes/UseDNS no/g' \
      -e 's/GSSAPIAuthentication yes/GSSAPIAuthentication no/g' \
      /etc/ssh/sshd_config
  # 取消确认键
  sed -i 's/#   StrictHostKeyChecking ask/   StrictHostKeyChecking no/g' /etc/ssh/ssh_config
  systemctl restart sshd

  # history
  cat << EOF >> /etc/bashrc
## kubeeasy managed start
# history actions record，include action time, user, login ip
HISTFILESIZE=5000
HISTSIZE=5000
USER_IP=\$(who -u am i 2>/dev/null | awk '{print \$NF}' | sed -e 's/[()]//g')
if [ -z \$USER_IP ]
then
  USER_IP=\$(hostname -i)
fi
HISTTIMEFORMAT="%Y-%m-%d %H:%M:%S \$USER_IP:\$(whoami) "
export HISTFILESIZE HISTSIZE HISTTIMEFORMAT

# PS1
PS1='\[\033[0m\]\[\033[1;36m\][\u\[\033[0m\]@\[\033[1;32m\]\h\[\033[0m\] \[\033[1;31m\]\W\[\033[0m\]\[\033[1;36m\]]\[\033[33;1m\]\\$ \[\033[0m\]'
## kubeeasy managed end
EOF

  # motd
  cat <<EOF >/etc/profile.d/ssh-login-info.sh
#!/bin/sh
#
# @Time    : 2021-08-08
# @Author  : KongYu
# @Desc    : ssh login banner

export PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
shopt -q login_shell && : || return 0
echo -e "\033[1;3\$((RANDOM%10%8))m

  ██╗  ██╗ █████╗ ███████╗
  ██║ ██╔╝██╔══██╗██╔════╝
  █████╔╝ ╚█████╔╝███████╗
  ██╔═██╗ ██╔══██╗╚════██║
  ██║  ██╗╚█████╔╝███████║
  ╚═╝  ╚═╝ ╚════╝ ╚══════╝ \033[0m"

# os
upSeconds="\$(cut -d. -f1 /proc/uptime)"
secs=\$((\${upSeconds}%60))
mins=\$((\${upSeconds}/60%60))
hours=\$((\${upSeconds}/3600%24))
days=\$((\${upSeconds}/86400))
UPTIME_INFO=\$(printf "%d days, %02dh %02dm %02ds" "\$days" "\$hours" "\$mins" "\$secs")

if [ -f /etc/redhat-release ] ; then
    PRETTY_NAME=\$(< /etc/redhat-release)

elif [ -f /etc/debian_version ]; then
   DIST_VER=\$(</etc/debian_version)
   PRETTY_NAME="\$(grep PRETTY_NAME /etc/os-release | sed -e 's/PRETTY_NAME=//g' -e  's/"//g') (\$DIST_VER)"

else
    PRETTY_NAME=\$(cat /etc/*-release | grep "PRETTY_NAME" | sed -e 's/PRETTY_NAME=//g' -e 's/"//g')
fi

if [[ -d "/system/app/" && -d "/system/priv-app" ]]; then
    model="\$(getprop ro.product.brand) \$(getprop ro.product.model)"

elif [[ -f /sys/devices/virtual/dmi/id/product_name ||
        -f /sys/devices/virtual/dmi/id/product_version ]]; then
    model="\$(< /sys/devices/virtual/dmi/id/product_name)"
    model+=" \$(< /sys/devices/virtual/dmi/id/product_version)"

elif [[ -f /sys/firmware/devicetree/base/model ]]; then
    model="\$(< /sys/firmware/devicetree/base/model)"

elif [[ -f /tmp/sysinfo/model ]]; then
    model="\$(< /tmp/sysinfo/model)"
fi

MODEL_INFO=\${model}
KERNEL=\$(uname -srmo)
USER_NUM=\$(who -u | wc -l)
RUNNING=\$(ps ax | wc -l | tr -d " ")

# disk
totaldisk=\$(df -h -x devtmpfs -x tmpfs -x debugfs -x aufs -x overlay --total 2>/dev/null | tail -1)
disktotal=\$(awk '{print \$2}' <<< "\${totaldisk}")
diskused=\$(awk '{print \$3}' <<< "\${totaldisk}")
diskusedper=\$(awk '{print \$5}' <<< "\${totaldisk}")
DISK_INFO="\033[0;33m\${diskused}\033[0m of \033[1;34m\${disktotal}\033[0m disk space used (\033[0;33m\${diskusedper}\033[0m)"

# cpu
cpu=\$(awk -F':' '/^model name/ {print \$2}' /proc/cpuinfo | uniq | sed -e 's/^[ \t]*//')
cpun=\$(grep -c '^processor' /proc/cpuinfo)
cpuc=\$(grep '^cpu cores' /proc/cpuinfo | tail -1 | awk '{print \$4}')
cpup=\$(grep '^physical id' /proc/cpuinfo | wc -l)
CPU_INFO="\${cpu} \${cpup}P \${cpuc}C \${cpun}L"

# get the load averages
read one five fifteen rest < /proc/loadavg
LOADAVG_INFO="\033[0;33m\${one}\033[0m / \${five} / \${fifteen} with \033[1;34m\$(( cpun*cpuc ))\033[0m core(s) at \033[1;34m\$(grep '^cpu MHz' /proc/cpuinfo | tail -1 | awk '{print \$4}')\033 MHz"

# mem
MEM_INFO="\$(cat /proc/meminfo | awk '/MemTotal:/{total=\$2/1024/1024;next} /MemAvailable:/{use=total-\$2/1024/1024; printf("\033[0;33m%.2fGiB\033[0m of \033[1;34m%.2fGiB\033[0m RAM used (\033[0;33m%.2f%%\033[0m)",use,total,(use/total)*100);}')"

# network
# extranet_ip=" and \$(curl -s ip.cip.cc)"
IP_INFO="\$(ip a | grep glo | awk '{print \$2}' | head -1 | cut -f1 -d/)\${extranet_ip:-}"

# Container info
CONTAINER_INFO="\$(sudo /usr/bin/crictl ps -a -o yaml 2> /dev/null | awk '/^  state: /{gsub("CONTAINER_", "", \$NF) ++S[\$NF]}END{for(m in S) printf "%s%s:%s ",substr(m,1,1),tolower(substr(m,2)),S[m]}')Images:\$(sudo /usr/bin/crictl images -q 2> /dev/null | wc -l)"

# info
echo -e "
 Information as of: \033[1;34m\$(date +"%Y-%m-%d %T")\033[0m

 \033[0;1;31mProduct\033[0m............: \${MODEL_INFO}
 \033[0;1;31mOS\033[0m.................: \${PRETTY_NAME}
 \033[0;1;31mKernel\033[0m.............: \${KERNEL}
 \033[0;1;31mCPU\033[0m................: \${CPU_INFO}

 \033[0;1;31mHostname\033[0m...........: \033[1;34m\$(hostname)\033[0m
 \033[0;1;31mIP Addresses\033[0m.......: \033[1;34m\${IP_INFO}\033[0m

 \033[0;1;31mUptime\033[0m.............: \033[0;33m\${UPTIME_INFO}\033[0m
 \033[0;1;31mMemory\033[0m.............: \${MEM_INFO}
 \033[0;1;31mLoad Averages\033[0m......: \${LOADAVG_INFO}
 \033[0;1;31mDisk Usage\033[0m.........: \${DISK_INFO}

 \033[0;1;31mUsers online\033[0m.......: \033[1;34m\${USER_NUM}\033[0m
 \033[0;1;31mRunning Processes\033[0m..: \033[1;34m\${RUNNING}\033[0m
 \033[0;1;31mContainer Info\033[0m.....: \${CONTAINER_INFO}
"
EOF

  chmod +x /etc/profile.d/ssh-login-info.sh
  echo 'ALL ALL=(ALL) NOPASSWD:/usr/bin/crictl' >/etc/sudoers.d/crictl

  # repo
  [ -f /etc/yum.repos.d/CentOS-Base.repo ] && sed -e 's!^#baseurl=!baseurl=!g' \
    -e 's!^mirrorlist=!#mirrorlist=!g' \
    -e 's!mirror.centos.org!mirrors.aliyun.com!g' \
    -i /etc/yum.repos.d/CentOS-Base.repo

  [[ "${OFFLINE_TAG:-}" != "1" ]] && yum install -y epel-release

  [ -f /etc/yum.repos.d/epel.repo ] && sed -e 's!^mirrorlist=!#mirrorlist=!g' \
    -e 's!^metalink=!#metalink=!g' \
    -e 's!^#baseurl=!baseurl=!g' \
    -e 's!//download\.fedoraproject\.org/pub!//mirrors.aliyun.com!g' \
    -e 's!http://mirrors\.aliyun!https://mirrors.aliyun!g' \
    -i /etc/yum.repos.d/epel.repo

  # Change sysctl
  cat <<EOF >/etc/sysctl.d/99-kube.conf
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-arptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_nonlocal_bind = 1
vm.max_map_count = 262144
vm.swappiness = 1
fs.inotify.max_user_instances = 524288
EOF

  # sync time
  local segment=$(ip route | grep kernel | awk '{print $1}' | head -1)
  timedatectl set-timezone Asia/Shanghai
  hwclock --systohc
  timedatectl set-local-rtc 0
  date
  hwclock -r

  [[ "${OFFLINE_TAG:-}" != "1" ]] && yum install -y chrony
  if [ "${host}" == "${MGMT_NODE}" ]; then
      cat <<EOF >/etc/chrony.conf
driftfile /var/lib/chrony/drift
makestep 1.0 3
rtcsync
logdir /var/log/chrony
server ${MGMT_NODE} iburst
allow ${segment}
local stratum 10
EOF
  else
    cat <<EOF >/etc/chrony.conf
driftfile /var/lib/chrony/drift
makestep 1.0 3
rtcsync
logdir /var/log/chrony
server ${MGMT_NODE} iburst
#allow ${segment}
local stratum 10
EOF
  fi

  systemctl restart chronyd
  systemctl enable chronyd

  # package
  [[ "${OFFLINE_TAG:-}" != "1" ]] && yum install -y curl wget net-tools

  # ipvs
  [[ "${OFFLINE_TAG:-}" != "1" ]] && yum install -y ipvsadm ipset sysstat conntrack libseccomp
  module=(
    ip_vs
    ip_vs_rr
    ip_vs_wrr
    ip_vs_sh
    overlay
    nf_conntrack
    br_netfilter
  )
  [ -f /etc/modules-load.d/ipvs.conf ] && cp -f /etc/modules-load.d/ipvs.conf{,_bak}
  for kernel_module in "${module[@]}"; do
    /sbin/modinfo -F filename "$kernel_module" |& grep -qv ERROR && echo "$kernel_module" >>/etc/modules-load.d/ipvs.conf
  done
  systemctl restart systemd-modules-load
  systemctl enable systemd-modules-load
  sysctl --system

  grep single-request-reopen /etc/resolv.conf || sed -i '1ioptions timeout:2 attempts:3 rotate single-request-reopen' /etc/resolv.conf

  ipvsadm --clear
  iptables -F && iptables -t nat -F && iptables -t mangle -F && iptables -X
}

function script::upgrade_kernel() {
  # 升级内核

  local ver
  ver=$(rpm --eval "%{centos_ver}")

  [[ "${OFFLINE_TAG:-}" != "1" ]] && yum install -y "https://www.elrepo.org/elrepo-release-${ver}.el${ver}.elrepo.noarch.rpm" && \
  sed -e "s/^mirrorlist=/#mirrorlist=/g" \
    -e "s/elrepo.org\/linux/mirrors.tuna.tsinghua.edu.cn\/elrepo/g" \
    -i /etc/yum.repos.d/elrepo.repo
#  [[ "${OFFLINE_TAG:-}" != "1" ]] && yum install -y --disablerepo="*" --enablerepo=elrepo-kernel kernel-ml{,-devel}
  [[ "${OFFLINE_TAG:-}" != "1" ]] && yum install -y --enablerepo=elrepo-kernel kernel-ml{,-devel}

  grub2-set-default 0 && grub2-mkconfig -o /etc/grub2.cfg
  grubby --default-kernel
  grubby --args="user_namespace.enable=1" --update-kernel="$(grubby --default-kernel)"
}

function script::upgrage_kube() {
  # 节点软件升级

  local role=${1:-init}
  local version="-${2:-latest}"
  version="${version#-latest}"

  set -e
  echo '[install] kubeadm'
  kubeadm version
  yum install -y "kubeadm${version}" --disableexcludes=kubernetes
  kubeadm version

  echo '[upgrade]'
  if [[ "$role" == "init" ]]; then
    local plan_info
    plan_info=$(kubeadm upgrade plan)
    local v
    v=$(printf "%s" "$plan_info" | grep 'kubeadm upgrade apply ' | awk '{print $4}' | tail -1)
    printf "%s\n" "${plan_info}"
    kubeadm upgrade apply "${v}" -y
  else
    kubeadm upgrade node
  fi

  echo '[install] kubelet kubectl'
  kubectl version --client=true
  yum install -y "kubelet${version}" "kubectl${version}" --disableexcludes=kubernetes
  kubectl version --client=true

  [ -f /usr/lib/systemd/system/kubelet.service.d/10-kubeadm.conf ] &&
    sed -i 's#^\[Service\]#[Service]\nCPUAccounting=true\nMemoryAccounting=true#g' /usr/lib/systemd/system/kubelet.service.d/10-kubeadm.conf

  systemctl daemon-reload
  systemctl restart kubelet
}

function script::install_docker() {
  # 安装 docker

  local version="-${1:-latest}"
  version="${version#-latest}"
  local OFFLINE_TAG=${OFFLINE_TAG}

  if [[ "${OFFLINE_TAG:-}" != "1" ]]; then
    [ -f /usr/bin/docker ] && yum remove -y docker-ce docker-ce-cli
    [ -f /usr/bin/containerd ] && yum remove -y containerd.io
    cat <<EOF >/etc/yum.repos.d/docker-ce.repo
[docker-ce-stable]
name=Docker CE Stable - \$basearch
baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/$(rpm --eval '%{centos_ver}')/\$basearch/stable
enabled=1
gpgcheck=1
gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg
EOF
	curl -o /etc/yum.repos.d/Centos-7.repo http://mirrors.aliyun.com/repo/Centos-7.repo
	curl -o /etc/yum.repos.d/epel-7.repo http://mirrors.aliyun.com/repo/epel-7.repo
    yum install -y "docker-ce${version}" "docker-ce-cli${version}" containerd.io bash-completion
  fi

  [ -f /usr/share/bash-completion/completions/docker ] &&
    cp -f /usr/share/bash-completion/completions/docker /etc/bash_completion.d/
  [ ! -d ${DOCKER_DATA_ROOT} ] && mkdir -p ${DOCKER_DATA_ROOT}
  [ ! -d /etc/docker ] && mkdir /etc/docker
  cat <<EOF >/etc/docker/daemon.json
{
  "log-driver": "json-file",
  "log-opts": {
    "max-size": "200m",
    "max-file": "5"
  },
  "default-ulimits": {
    "nofile": {
      "Name": "nofile",
      "Hard": 655360,
      "Soft": 655360
    },
    "nproc": {
      "Name": "nproc",
      "Hard": 655360,
      "Soft": 655360
    }
  },
  "live-restore": true,
  "oom-score-adjust": -1000,
  "max-concurrent-downloads": 10,
  "max-concurrent-uploads": 10,
  "storage-driver": "overlay2",
  "storage-opts": ["overlay2.override_kernel_check=true"],
  "insecure-registries": ["0.0.0.0/0"],
  "registry-mirrors": [
    "https://registry.cn-hangzhou.aliyuncs.com"
  ]
}
EOF
  sed -i 's|#oom_score = 0|oom_score = -999|' /etc/containerd/config.toml
  cat <<EOF >/etc/crictl.yaml
runtime-endpoint: unix:///var/run/dockershim.sock
image-endpoint: unix:///var/run/dockershim.sock
timeout: 2
debug: false
pull-image-on-create: true
disable-pull-on-run: false
EOF
  # 开启docker2 375端口，设置systemd和存放路径
  sed -i "s#ExecStart=.*#ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2375 -H fd:// --containerd=/run/containerd/containerd.sock --exec-opt native.cgroupdriver=systemd  --graph=${DOCKER_DATA_ROOT}#g" /usr/lib/systemd/system/docker.service
	systemctl daemon-reload

  systemctl enable containerd
  systemctl restart containerd

  systemctl enable docker
  systemctl restart docker

  [ "${OFFLINE_TAG}" == "1" ] && docker load -i ${OFFLINE_DIR}/images/k8s-images.tar.gz || true

}

function script::install_kube() {
  # 安装kube组件

  local version="-${1:-latest}"
  version="${version#-latest}"

  if [[ "${OFFLINE_TAG:-}" != "1" ]]; then
    [ -f /usr/bin/kubeadm ] && yum remove -y kubeadm
    [ -f /usr/bin/kubelet ] && yum remove -y kubelet
    [ -f /usr/bin/kubectl ] && yum remove -y kubectl
  cat <<EOF >/etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
    yum install -y "kubeadm${version}" "kubelet${version}" "kubectl${version}" kubernetes-cni iscsi-initiator-utils nfs-utils  --disableexcludes=kubernetes
  fi

  [ -d /etc/bash_completion.d ] &&
    {
      kubectl completion bash >/etc/bash_completion.d/kubectl
      kubeadm completion bash >/etc/bash_completion.d/kubadm
    }

  [ ! -d /usr/lib/systemd/system/kubelet.service.d ] && mkdir -p /usr/lib/systemd/system/kubelet.service.d
  cat <<EOF >/usr/lib/systemd/system/kubelet.service.d/11-cgroup.conf
[Service]
CPUAccounting=true
MemoryAccounting=true
BlockIOAccounting=true
ExecStartPre=/bin/bash -c '/bin/mkdir -p /sys/fs/cgroup/{cpuset,memory,hugetlb,systemd,pids,"cpu,cpuacct"}/{system,kube,kubepods}.slice||:'
Slice=kube.slice
EOF
  systemctl daemon-reload
  systemctl enable --now iscsid
  # 启动kubelet服务
  systemctl enable kubelet
  systemctl restart kubelet
}

function script::install_haproxy() {
  # 安装haproxy

  local api_servers="$*"
  local hostname_prefix="${hostname_prefix}"
  local virtual_ip=${virtual_ip}
  local KUBE_PORT=6443
  [ -n "${virtual_ip}" ] && KUBE_PORT=${KUBE_APISERVER_PORT}

  if [[ "${OFFLINE_TAG:-}" != "1" ]]; then
    [ -f /usr/bin/haproxy ] && yum remove -y haproxy
    yum install -y haproxy
  fi

  [ ! -f /etc/haproxy/haproxy.cfg_bak ] && cp /etc/haproxy/haproxy.cfg{,_bak}
  cat <<EOF >/etc/haproxy/haproxy.cfg
global
  log 127.0.0.1 local0 info
  maxconn 10240
  daemon
defaults
  log global
  mode http
  retries  3
  timeout  http-request 10s
  timeout  queue 1m
  timeout  connect 10s
  timeout  client 1m
  timeout  server 1m
  timeout  check 10s

listen admin_stat
  bind 0.0.0.0:1080
  mode http
  option httplog
  stats uri /
  stats auth admin:admin
  stats refresh 5s
  stats enable
  stats  show-node
  stats  show-legends
  stats  hide-version

listen kube-apiserver:${KUBE_PORT}
  bind *:${KUBE_PORT}
  mode tcp
  option tcplog
  option tcp-check
  balance source
$(
    index=1
    for h in $api_servers; do
      echo "  server ${hostname_prefix}-master-node${index} $h:6443 check"
      index=$((index + 1))
    done
  )
EOF

  systemctl enable haproxy
  systemctl restart haproxy
}

function script::install_keepalived() {
  # 安装haproxy
  local api_servers="$*"
  local state=${state}
  local nic_name=${nic_name}
  local virtual_ip=${virtual_ip}
  local host=${host}
  local priority=${priority}
  local KUBE_PORT=6443
  [ -n "${virtual_ip}" ] && KUBE_PORT=${KUBE_APISERVER_PORT}
  # 测试
  echo api_servers=$api_servers state=$state nic_name=$nic_name virtual_ip=$virtual_ip host=$host
  if [[ "${OFFLINE_TAG:-}" != "1" ]]; then
    [ -f /usr/bin/keepalived ] && yum remove -y keepalived
    yum install -y keepalived libnl* popt*
  fi

  [ ! -f /etc/keepalived/keepalived.conf_bak ] && cp /etc/keepalived/keepalived.conf{,_bak}
  cat <<EOF >/etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
   router_id lvs-keepalived01      #router_id 机器标识，通常为hostname，但不一定非得是hostname。故障发生时，邮件通知会用到。
}

vrrp_instance VI_1 {        #vrrp实例定义部分
  state ${state}                   #设置lvs的状态，MASTER和BACKUP两种，必须大写
  interface ${nic_name}            #设置对外服务的接口
  virtual_router_id 188             #设置虚拟路由标示，这个标示是一个数字，同一个vrrp实例使用唯一标示
  priority ${priority}             #定义优先级，数字越大优先级越高，在一个vrrp——instance下，master的优先级必须大于backup
  advert_int 1                     #设定master与backup负载均衡器之间同步检查的时间间隔，单位是秒
  authentication {                 #设置验证类型和密码
    auth_type PASS                 #主要有PASS和AH两种
    auth_pass yidaoyun              #验证密码，同一个vrrp_instance下MASTER和BACKUP密码必须相同
  }
#  mcast_src_ip ${host}           #设置对外服务的接口的IP
#  ## The IP address of this machine

  virtual_ipaddress {              #设置虚拟ip地址，可以设置多个，每行一个
    ${virtual_ip}
  ## The VIP address
  }
}
virtual_server ${virtual_ip} ${KUBE_PORT} {  #设置虚拟服务器，需要指定虚拟ip和服务端口
    delay_loop 6                     #健康检查时间间隔
    lb_algo wrr                      #负载均衡调度算法
    lb_kind DR                       #负载均衡转发规则
    #persistence_timeout 50          #设置会话保持时间，对动态网页非常有用
    protocol TCP                     #指定转发协议类型，有TCP和UDP两种
    ## The Virtual Server
$(
    for h in $api_servers; do
      echo "    real_server $h 6443 {  #配置服务器节点1，需要指定real server的真实IP地址和端口
    weight 10                        #设置权重，数字越大权重越高
    TCP_CHECK {                      #realserver的状态监测设置部分单位秒
       connect_timeout 10            #连接超时为10秒
       retry 3                       #重连次数
       delay_before_retry 3          #重试间隔
       connect_port 6443             #连接端口为6443，要和上面的保持一致
       }
    }
"
    done
  )
}
EOF

  systemctl enable keepalived
  systemctl restart keepalived
}

function install::ha-service() {
  MGMT_NODE=$(echo "${MASTER_NODES}" | awk '{print $1}')

  local apiservers=$MASTER_NODES
  local VIRTUAL_IP=${VIRTUAL_IP:-}
  local ha_service_node=$MASTER_NODES

  # 判断是否有vip参数，没有的话haproxy服务就安装在node节点
  [ ! -n "${VIRTUAL_IP}" ] && help::usage

  for host in $ha_service_node; do
    # install haproxy
    log::info "[install]" "install haproxy on $host"
    command::exec "${host}" "
      export OFFLINE_TAG=${OFFLINE_TAG:-0}
      export hostname_prefix=${HOSTNAME_PREFIX}
      export virtual_ip=${VIRTUAL_IP}
      export KUBE_APISERVER_PORT=${KUBE_APISERVER_PORT}
      $(declare -f script::stop_security)
      script::stop_security
      $(declare -f script::install_haproxy)
      script::install_haproxy \"$apiservers\"
  "
    check::exit_code "$?" "install" "install haproxy on $host"

    # 判断是否有vip参数，有就安装keepalived
    if [ -n "${VIRTUAL_IP}" ]; then
      # install keepalived
      local state="MASTER"
      local nic_name=""
      local virtual_ip=${VIRTUAL_IP}
      local host=${host}
      local priority=100
      log::info "[init]" "Get ${host} NIC Name."
      command::exec "${host}" "
        ip -4 route get 8.8.8.8 2>/dev/null | head -1 | awk '{print \$5}'
      "
      get::command_output "nic_name" "$?" "exit"

      [ ${host} != ${MGMT_NODE} ] && state="BACKUP"
      [ ${host} != ${MGMT_NODE} ] && priority="$(echo $RANDOM|cksum |cut -c 1-2)"
      log::info "[install]" "install keepalived on $host"
      command::exec "${host}" "
        export OFFLINE_TAG=${OFFLINE_TAG:-0}
        export state=${state}
        export nic_name=${nic_name}
        export virtual_ip=${VIRTUAL_IP}
        export host=${host}
        export priority=${priority}
        export KUBE_APISERVER_PORT=${KUBE_APISERVER_PORT}
        $(declare -f script::install_keepalived)
        script::install_keepalived \"$apiservers\"
      "
      check::exit_code "$?" "install" "install keepalived on $host"
      [ ${host} == ${MGMT_NODE} ] && log::info "[install]" "${virtual_ip}: virtual ip on $host"
    fi
  done


}

function check::command_exists() {
  # 检查命令是否存在

  local cmd=${1}
  local package=${2}

  if command -V "$cmd" >/dev/null 2>&1; then
    log::info "[check]" "$cmd command exists."
  else
    log::warning "[check]" "I require $cmd but it's not installed."
    log::warning "[check]" "install $package package."
    command::exec "127.0.0.1" "yum install -y ${package}"
    check::exit_code "$?" "check" "$package install" "exit"
  fi
}

function check::command() {
  # 检查用到的命令

#  check::command_exists ssh openssh-clients
  check::command_exists sshpass sshpass
  [[ "${INSTALL_TAG:-}" == "1" && "${KUBE_INSTALL_TAG:-}" == "1" && "${OFFLINE_TAG:-}" != "1" || "${ADD_TAG:-}" == "1" ]] && check::command_exists wget wget
  [[ "${OFFLINE_TAG:-}" == "1" ]] && check::command_exists rsync rsync
#  [[ "${OFFLINE_TAG:-}" == "1" ]] && check::command_exists tar tar
}

function check::ssh_conn() {
  # 检查ssh连通性

  local OFFLINE_TAG=${OFFLINE_TAG:-}
  for host in $MASTER_NODES $WORKER_NODES; do
    [ "$host" == "127.0.0.1" ] && continue
    command::exec "${host}" "
      [[ "${OFFLINE_TAG}" == "1" ]] && rm -rf /etc/yum.repos.d/*
      echo 0
    "
    check::exit_code "$?" "check" "ssh $host connection" "exit"
  done
}

function check::ssh_conn_new() {
  # 检查ssh连通性

  for host in $MASTER_NODES $WORKER_NODES $HOST; do
    [ "$host" == "127.0.0.1" ] && continue
    command::exec "${host}" "
    sed -i -e 's/#UseDNS yes/UseDNS no/g' \
    -e 's/GSSAPIAuthentication yes/GSSAPIAuthentication no/g' \
    /etc/ssh/sshd_config
    sed -i 's/#   StrictHostKeyChecking ask/   StrictHostKeyChecking no/g' /etc/ssh/ssh_config
    systemctl restart sshd
    echo 0
    "
    check::exit_code "$?" "check" "ssh $host connection"
  done
}

function check::ping_conn() {
  # ping 连通性

  for host in $MASTER_NODES $WORKER_NODES $HOST; do
    [ "$host" == "127.0.0.1" ] && continue
    ping $host -c 4 &> /dev/null
    check::exit_code "$?" "check" "ping $host connection"
  done

}

function check::time() {

  local segment=$(ip route | grep kernel | awk '{print $1}' | head -1)
  timedatectl set-timezone Asia/Shanghai
  hwclock --systohc
  timedatectl set-local-rtc 0
  date
  hwclock -r
  [[ "${OFFLINE_TAG}" != "1" ]] && yum -y install chrony

  if [ "${host}" == "${MGMT_NODE}" ]; then
      cat <<EOF >/etc/chrony.conf
driftfile /var/lib/chrony/drift
makestep 1.0 3
rtcsync
logdir /var/log/chrony
server ${MGMT_NODE} iburst
allow ${segment}
local stratum 10
EOF
  else
    cat <<EOF >/etc/chrony.conf
driftfile /var/lib/chrony/drift
makestep 1.0 3
rtcsync
logdir /var/log/chrony
server ${MGMT_NODE} iburst
#allow ${segment}
local stratum 10
EOF
  fi

  systemctl restart chronyd
  systemctl enable chronyd
}

function install::time() {
  MGMT_NODE=$(echo "${MASTER_NODES}" | awk '{print $1}')

  local servers="$MASTER_NODES $WORKER_NODES"

  for host in $servers; do
    # check chrony
    log::info "[create]" "create chrony on $host"
    command::exec "${host}" "
      export OFFLINE_TAG=${OFFLINE_TAG:-0}
      export MGMT_NODE=${MGMT_NODE}
      export host=${host}
      $(declare -f script::stop_security)
      script::stop_security
      $(declare -f check::time)
      check::time
  "
    check::exit_code "$?" "create" "create chrony on $host"
  done
}

function install::ssh_keygen() {

  MGMT_NODE=$(echo "${MASTER_NODES}" | awk '{print $1}')
  # 生成秘钥
  command::exec "${MGMT_NODE}" "
    rm -rf ~/.ssh
    ssh-keygen -t rsa -P \"\" -f ~/.ssh/id_rsa -C \"2385569970@qq.com\"
    cat ~/.ssh/id_rsa.pub > ~/.ssh/authorized_keys
  "

  local servers="$MASTER_NODES $WORKER_NODES"
  for host in $servers; do
    # create ssh keygen
    command::exec "${host}" "
      sed -i \
      -e 's/#UseDNS yes/UseDNS no/g' \
      -e 's/GSSAPIAuthentication yes/GSSAPIAuthentication no/g' \
      /etc/ssh/sshd_config
      sed -i 's/#   StrictHostKeyChecking ask/   StrictHostKeyChecking no/g' /etc/ssh/ssh_config
      systemctl restart sshd
    "
    log::info "[create]" "create ssh keygen $host"
    command::scp "${host}" "~/.ssh" "~/"
    check::exit_code "$?" "create" "create ssh keygen $host"
  done

  echo
}

function check::os() {
  # 检查os系统支持

  log::info "[check]" "os support: ${OS_SUPPORT}"
  for host in $MASTER_NODES $WORKER_NODES; do
    command::exec "${host}" "
      [ -f /etc/os-release ] && source /etc/os-release
      echo client_os:\${ID:-}\${VERSION_ID:-}
      if [[ \"${OS_SUPPORT}\" == *\"\${ID:-}\${VERSION_ID:-}\"* ]]; then
        exit 0
      fi
      exit 1
    "
    check::exit_code "$?" "check" "$host os support" "exit"
  done
}

function check::kernel() {
  # 检查os kernel 版本

  local version=${1:-}
  log::info "[check]" "kernel version not less than ${version}"
  version=$(echo "${version}" | awk -F. '{ printf("%d%03d%03d\n", $1,$2,$3); }')

  for host in $MASTER_NODES $WORKER_NODES; do
    command::exec "${host}" "
      kernel_version=\$(uname -r)
      kernel_version=\$(echo \${kernel_version/-*} | awk -F. '{ printf(\"%d%03d%03d\n\", \$1,\$2,\$3); }') 
      echo kernel_version \${kernel_version}
      [[ \${kernel_version} -ge ${version} ]] && exit 0 || exit 1
    "
    check::exit_code "$?" "check" "$host kernel version" "exit"
  done

}

function check::apiserver_conn() {
  # 检查apiserver连通性

  command::exec "${MGMT_NODE}" "kubectl get node"
  check::exit_code "$?" "check" "conn apiserver" "exit"
}

function check::exit_code() {
  # 检查返回码

  local code=${1:-}
  local app=${2:-}
  local desc=${3:-}
  local exit_script=${4:-}

  if [[ "${code}" == "0" ]]; then
    log::info "[${app}]" "${desc} succeeded."
  else
    log::error "[${app}]" "${desc} failed."
    [[ "$exit_script" == "exit" ]] && exit "$code"
  fi
}

function check::preflight() {
  # 预检

  # check command
  DEPEND_INSTALL_TAG=${DEPEND_INSTALL_TAG:-0}
  [[ "${DEPEND_INSTALL_TAG}" != "1" ]] && check::command

  # check ssh conn
  check::ssh_conn

  # check os
#  check::os

  # check os kernel
  [[ "${KUBE_NETWORK:-}" == "cilium" ]] && check::kernel 4.9.17

  # check apiserver conn
  if [[ $((${ADD_TAG:-0} + ${DEL_TAG:-0} + ${UPGRADE_TAG:-0} + ${RENEW_CERT_TAG:-0})) -gt 0 ]]; then
    check::apiserver_conn
  fi
}

function install::package() {
  # 安装包

  if [[ "${KUBE_CRI}" == "cri-o" && "${KUBE_CRI_VERSION}" == "latest" ]]; then
    KUBE_CRI_VERSION="${KUBE_VERSION}"
    if [[ "${KUBE_CRI_VERSION}" == "latest" ]]; then
      if command::exec "127.0.0.1" "wget https://storage.googleapis.com/kubernetes-release/release/stable.txt -q -O -"; then
        KUBE_CRI_VERSION="${COMMAND_OUTPUT#v}"
      else
        log::error "[install]" "get kubernetes stable version error. Please specify the version!"
        exit 1
      fi
    fi
    KUBE_CRI_VERSION="${KUBE_CRI_VERSION%.*}"
  fi

  for host in $MASTER_NODES $WORKER_NODES; do
    # install container cri
    log::info "[install]" "install ${KUBE_CRI} on $host."
    command::exec "${host}" "
      export OFFLINE_TAG=${OFFLINE_TAG:-0}
      export DOCKER_DATA_ROOT=${DOCKER_DATA_ROOT}
      export OFFLINE_DIR=${OFFLINE_DIR}
      $(declare -f script::install_"${KUBE_CRI}")
      script::install_${KUBE_CRI} $KUBE_CRI_VERSION
    "
    check::exit_code "$?" "install" "install ${KUBE_CRI} on $host"

    # install kube
    log::info "[install]" "install kube on $host"
    command::exec "${host}" "
      export OFFLINE_TAG=${OFFLINE_TAG:-0}
      export host=${host}
      export KUBE_APISERVER_PORT=${KUBE_APISERVER_PORT}
      $(declare -f script::install_kube)
      script::install_kube $KUBE_VERSION
    "
    check::exit_code "$?" "install" "install kube on $host"
    [[ "${OFFLINE_TAG}" != "1" ]] && {
      utils::download_file "${GITHUB_PROXY}https://github.com/kongyu666/kubeeasy/releases/download/v${KUBE_VERSION}/kubeadm-v${KUBE_VERSION}-linux-amd64" /tmp/kubeadm
      [[ "$?" == "0" ]] && log::info "[replace]" "replace kubeadm on $host"  && command::exec "${host}" "
        \mv /tmp/kubeadm /usr/bin/
        chmod +x /usr/bin/kubeadm
      " && check::exit_code "$?" "replace" "replace kubeadm on $host"
    }

  done

  local apiservers=$MASTER_NODES
  local VIRTUAL_IP=${VIRTUAL_IP:-}
  local ha_service_node=$MASTER_NODES
  local KUBE_APISERVER_PORT=${KUBE_APISERVER_PORT}

  if [[ "$apiservers" == "127.0.0.1" ]]; then
    command::exec "${MGMT_NODE}" "ip -o route get to 8.8.8.8 | sed -n 's/.*src \([0-9.]\+\).*/\1/p'"
    get::command_output "apiservers" "$?"
  fi

  if [[ "${ADD_TAG:-}" == "1" ]]; then
    command::exec "${MGMT_NODE}" "
      kubectl get node --selector='node-role.kubernetes.io/master' -o jsonpath='{$.items[*].status.addresses[?(@.type==\"InternalIP\")].address}'
    "
    get::command_output "apiservers" "$?"
  fi

  # 判断是否有vip参数，没有的话haproxy服务就安装在node节点
  [ ! -n "${VIRTUAL_IP}" ] && ha_service_node=$WORKER_NODES

  for host in $ha_service_node; do
    # install haproxy
    log::info "[install]" "install haproxy on $host"
    command::exec "${host}" "
      export OFFLINE_TAG=${OFFLINE_TAG:-0}
      export hostname_prefix=${HOSTNAME_PREFIX}
      export virtual_ip=${VIRTUAL_IP}
      export KUBE_APISERVER_PORT=${KUBE_APISERVER_PORT}
      $(declare -f script::install_haproxy)
      script::install_haproxy \"$apiservers\"
  "
    check::exit_code "$?" "install" "install haproxy on $host"

    # 判断是否有vip参数，有就安装keepalived
    if [ -n "${VIRTUAL_IP}" ]; then
      # install keepalived
      local state="MASTER"
      local nic_name=""
      local virtual_ip=${VIRTUAL_IP}
      local host=${host}
      local priority="$(echo $RANDOM|cksum |cut -c 1-3)"
      log::info "[init]" "Get ${host} NIC Name."
      command::exec "${host}" "
        ip -4 route get 8.8.8.8 2>/dev/null | head -1 | awk '{print \$5}'
      "
      get::command_output "nic_name" "$?" "exit"

      [ ${host} != ${MGMT_NODE} ] && state="BACKUP"
      [ ${host} != ${MGMT_NODE} ] && priority="$(echo $RANDOM|cksum |cut -c 1-2)"
      log::info "[install]" "install keepalived on $host"
      command::exec "${host}" "
        export OFFLINE_TAG=${OFFLINE_TAG:-0}
        export state=${state}
        export nic_name=${nic_name}
        export virtual_ip=${VIRTUAL_IP}
        export host=${host}
        export priority=${priority}
		    export KUBE_APISERVER_PORT=${KUBE_APISERVER_PORT}
        $(declare -f script::install_keepalived)
        script::install_keepalived \"$apiservers\"
      "
      check::exit_code "$?" "install" "install keepalived on $host"
      [ ${host} == ${MGMT_NODE} ] && log::info "[install]" "${virtual_ip}: virtual ip on $host"
    fi
  done
}

function init::upgrade_kernel() {
  # 升级节点内核

  [[ "${UPGRADE_KERNEL_TAG:-}" != "1" ]] && return

  for host in $MASTER_NODES $WORKER_NODES; do
    log::info "[init]" "upgrade kernel: $host"
    command::exec "${host}" "
      export OFFLINE_TAG=${OFFLINE_TAG:-0}
      $(declare -f script::upgrade_kernel)
      script::upgrade_kernel
    "
    check::exit_code "$?" "init" "upgrade kernel $host" "exit"
  done
  for host in $MASTER_NODES $WORKER_NODES; do
    command::exec "${host}" "bash -c 'sleep 15 && reboot' &>/dev/null &"
    check::exit_code "$?" "reboot" "$host: Wait for 15s to restart"
  done
#  log::info "[notice]" "Please execute the command again!"
#  log::access "[command]" "bash $0 ${SCRIPT_PARAMETER// --upgrade-kernel/}"
  exit 0
}

function init::node_config() {
  # 初始化节点配置

  local master_index=${master_index:-1}
  local worker_index=${worker_index:-1}
  local KUBE_APISERVER_IP="127.0.0.1"
  local VIRTUAL_IP=${VIRTUAL_IP:-}

  log::info "[get]" "Get $MGMT_NODE InternalIP."
  command::exec "${MGMT_NODE}" "
    ip -4 route get 8.8.8.8 2>/dev/null | head -1 | awk '{print \$7}'
  "
  get::command_output "MGMT_NODE_IP" "$?" "exit"
  log::info "[result]" "MGMT_NODE_IP is ${MGMT_NODE_IP}"

  # 判断是否有VIP
  [ "${VIRTUAL_IP}" != "" ] && KUBE_APISERVER_IP=${VIRTUAL_IP}

  # master
  for host in $MASTER_NODES; do
    log::info "[init]" "master: $host"
    command::exec "${host}" "
      export OFFLINE_TAG=${OFFLINE_TAG:-0} KUBE_APISERVER=${KUBE_APISERVER}
      export host=${host} MGMT_NODE=${MGMT_NODE}
      $(declare -f script::init_node)
      script::init_node
   "
    check::exit_code "$?" "init" "init master $host"

    # 设置主机名和解析
    log::info "[init]" "master: $host set hostname and hosts"
    command::exec "${host}" "
      cat << EOF >> /etc/hosts
## kubeeasy managed start
${KUBE_APISERVER_IP} $KUBE_APISERVER
$(
  echo -e $node_hosts
)
## kubeeasy managed end
EOF
      sed -i '/## kubeeasy managed start/,/## kubeeasy managed start/{/^$/d}' /etc/hosts
      sed -i '/127.0.0.1 temp/d' /etc/hosts
#      printf \"\\n${KUBE_APISERVER_IP} $KUBE_APISERVER\\n$node_hosts\" >> /etc/hosts
      hostnamectl set-hostname ${HOSTNAME_PREFIX}-master-node${master_index}
    "
    check::exit_code "$?" "init" "$host set hostname and hosts"

    master_index=$((master_index + 1))
  done

  # worker
  for host in $WORKER_NODES; do
    log::info "[init]" "worker: $host"
    command::exec "${host}" "
      export OFFLINE_TAG=${OFFLINE_TAG:-0} KUBE_APISERVER=${KUBE_APISERVER}
      export host=${host} MGMT_NODE=${MGMT_NODE}
      $(declare -f script::init_node)
      script::init_node
    "
    check::exit_code "$?" "init" "init worker $host"

    # 设置主机名和解析
    log::info "[init]" "master: $host set hostname and hosts"
    command::exec "${host}" "
      cat << EOF >> /etc/hosts
## kubeeasy managed start
${KUBE_APISERVER_IP} $KUBE_APISERVER
$(
  echo -e $node_hosts
)
## kubeeasy managed end
EOF
      sed -i '/## kubeeasy managed start/,/## kubeeasy managed start/{/^$/d}' /etc/hosts
      sed -i '/127.0.0.1 temp/d' /etc/hosts
#      printf \"\\n${KUBE_APISERVER_IP} $KUBE_APISERVER\\n$node_hosts\" >> /etc/hosts
      hostnamectl set-hostname ${HOSTNAME_PREFIX}-worker-node${worker_index}
    "
    check::exit_code "$?" "init" "$host set hostname and hosts"
    worker_index=$((worker_index + 1))
  done
}

function init::node() {
  # 初始化节点

  init::upgrade_kernel

  local node_hosts=""
  local i=1
  for h in $MASTER_NODES; do
    node_hosts="${node_hosts}\n$h ${HOSTNAME_PREFIX}-master-node${i}"
    i=$((i + 1))
  done

  local i=1
  for h in $WORKER_NODES; do
    node_hosts="${node_hosts}\n$h ${HOSTNAME_PREFIX}-worker-node${i}"
    i=$((i + 1))
  done
  node_hosts="${node_hosts}"
  init::node_config
}

function init::add_node() {
  # 初始化添加的节点

  init::upgrade_kernel

  local master_index=0
  local worker_index=0
  local node_hosts=""
  local add_node_hosts="127.0.0.1 temp"

  command::exec "${MGMT_NODE}" "
    kubectl get node --selector='node-role.kubernetes.io/master' -o jsonpath='{range.items[*]}{.status.addresses[?(@.type==\"InternalIP\")].address } {end}' | awk '{print \$1}'
  "
  get::command_output "MGMT_NODE" "$?" "exit"

  # 获取现有集群节点主机名
  command::exec "${MGMT_NODE}" "
    kubectl get node -o jsonpath='{range.items[*]}{.status.addresses[?(@.type==\"InternalIP\")].address} {.metadata.name }\\n{end}'
  "
  get::command_output "node_hosts" "$?" "exit"

  for host in $MASTER_NODES $WORKER_NODES; do
    if [[ $node_hosts == *"$host"* ]]; then
      log::error "[init]" "The host $host is already in the cluster!"
      exit 1
    fi
  done

  if [[ "$MASTER_NODES" != "" ]]; then
    command::exec "${MGMT_NODE}" "
      kubectl get node --selector='node-role.kubernetes.io/master' -o jsonpath='{\$.items[*].metadata.name}' | grep -Eo '[0-9]+\$'
    "
    get::command_output "master_index" "$?" "exit"
    master_index=$((master_index + 1))
    local i=$master_index
    for host in $MASTER_NODES; do
      add_node_hosts="${add_node_hosts}\n${host:-} ${HOSTNAME_PREFIX}-master-node${i}"
      i=$((i + 1))
    done
  fi

  if [[ "$WORKER_NODES" != "" ]]; then
    command::exec "${MGMT_NODE}" "
      kubectl get node --selector='!node-role.kubernetes.io/master' -o jsonpath='{\$.items[*].metadata.name}' | grep -Eo '[0-9]+\$' || echo 0
    "
    get::command_output "worker_index" "$?" "exit"
    worker_index=$((worker_index + 1))
    local i=$worker_index
    for host in $WORKER_NODES; do
      add_node_hosts="${add_node_hosts}\n${host:-} ${HOSTNAME_PREFIX}-worker-node${i}"
      i=$((i + 1))
    done
  fi
  #向集群节点添加新增的节点主机名解析
  for host in $(echo -ne "$node_hosts" | awk '{print $1}'); do
    command::exec "${host}" "
#       printf \"$add_node_hosts\" >> /etc/hosts
       sed -E -i \"/## kubeeasy managed end/i $add_node_hosts\" /etc/hosts
       sed -i '/127.0.0.1 temp/d' /etc/hosts
     "
    check::exit_code "$?" "init" "$host add new node hosts"
  done

  node_hosts="${node_hosts}${add_node_hosts}"
  init::node_config
}

function kubeadm::init() {
  # 集群初始化
  local KUBE_PORT="6443"
  local VIRTUAL_IP=${VIRTUAL_IP:-}

  [ -n "${VIRTUAL_IP}" ] && KUBE_PORT=${KUBE_APISERVER_PORT}
  log::info "[kubeadm init]" "kubeadm init on ${MGMT_NODE}"
  log::info "[kubeadm init]" "${MGMT_NODE}: set kubeadm-config.yaml"
  command::exec "${MGMT_NODE}" "
    cat << EOF > /etc/kubernetes/kubeadm-config.yaml
---
apiVersion: kubeadm.k8s.io/v1beta2
kind: InitConfiguration
${kubelet_nodeRegistration}
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: ipvs
ipvs:
  minSyncPeriod: 5s
  syncPeriod: 5s
  # ipvs 负载策略
  scheduler: 'wrr'
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
failSwapOn: false
#禁用swap检测
cgroupDriver: systemd
#修改driver为systemd
rotateCertificates: true
# 开启证书轮询
---
apiVersion: kubeadm.k8s.io/v1beta2
kind: ClusterConfiguration
kubernetesVersion: $KUBE_VERSION
controlPlaneEndpoint: $KUBE_APISERVER:${KUBE_PORT}
networking:
  dnsDomain: $KUBE_DNSDOMAIN
  podSubnet: $KUBE_POD_SUBNET
  serviceSubnet: $KUBE_SERVICE_SUBNET
imageRepository: $KUBE_IMAGE_REPO
apiServer:
  certSANs:
  - 127.0.0.1
  - $KUBE_APISERVER
$(for h in $MASTER_NODES; do echo "  - $h"; done)
  extraArgs:
    event-ttl: '720h'
    service-node-port-range: '1024-65535'
  extraVolumes:
  - name: localtime
    hostPath: /etc/localtime
    mountPath: /etc/localtime
    readOnly: true
    pathType: File
controllerManager:
  extraArgs:
    bind-address: 0.0.0.0
    node-cidr-mask-size: '24'
    deployment-controller-sync-period: '10s'
    node-monitor-grace-period: '20s'
    pod-eviction-timeout: '2m'
    terminated-pod-gc-threshold: '30'
    experimental-cluster-signing-duration: 87600h
    feature-gates: RotateKubeletServerCertificate=true
  extraVolumes:
  - hostPath: /etc/localtime
    mountPath: /etc/localtime
    name: localtime
    readOnly: true
    pathType: File
scheduler:
  extraArgs:
    bind-address: 0.0.0.0
  extraVolumes:
  - hostPath: /etc/localtime
    mountPath: /etc/localtime
    name: localtime
    readOnly: true
    pathType: File
$(if [[ "${KUBE_VERSION}" == "1.21.3" ]]; then
echo "dns:
  type: CoreDNS
  imageRepository: swr.cn-north-1.myhuaweicloud.com/kongyu/kubernetes
  imageTag: 1.8.0"
fi)
EOF
"
  check::exit_code "$?" "kubeadm init" "${MGMT_NODE}: set kubeadm-config.yaml" "exit"

  log::info "[kubeadm init]" "${MGMT_NODE}: kubeadm init start."
  command::exec "${MGMT_NODE}" "kubeadm init --config=/etc/kubernetes/kubeadm-config.yaml --upload-certs"
  check::exit_code "$?" "kubeadm init" "${MGMT_NODE}: kubeadm init" "exit"

  sleep 3

  log::info "[kubeadm init]" "${MGMT_NODE}: set kube config."
  command::exec "${MGMT_NODE}" "
  	 export VIRTUAL_IP=${VIRTUAL_IP}
     mkdir -p \$HOME/.kube
     sudo cp -f /etc/kubernetes/admin.conf \$HOME/.kube/config
     [ \"${VIRTUAL_IP}\" != \"\" ] && systemctl restart keepalived || true
  "
  check::exit_code "$?" "kubeadm init" "${MGMT_NODE}: set kube config"

  # 去除所有节点（包括master节点）的污点
  log::info "[kubeadm init]" "${MGMT_NODE}: delete master taint"
  command::exec "${MGMT_NODE}" "kubectl taint nodes --all node-role.kubernetes.io/master-"
  check::exit_code "$?" "kubeadm init" "${MGMT_NODE}: delete master taint"

  command::exec "${MGMT_NODE}" "
    kubectl create clusterrolebinding node-client-auto-approve-csr --clusterrole=system:certificates.k8s.io:certificatesigningrequests:nodeclient --user=kubelet-bootstrap
    kubectl create clusterrolebinding node-client-auto-renew-crt --clusterrole=system:certificates.k8s.io:certificatesigningrequests:selfnodeclient --group=system:nodes
    kubectl create clusterrolebinding node-server-auto-renew-crt --clusterrole=system:certificates.k8s.io:certificatesigningrequests:selfnodeserver --group=system:nodes
  "
  check::exit_code "$?" "kubeadm init" "Auto-Approve kubelet cert csr"
}

function kubeadm::join() {
  # 加入集群
  local KUBE_PORT="6443"
  [ -n "${VIRTUAL_IP}" ] && KUBE_PORT=${KUBE_APISERVER_PORT}
  log::info "[kubeadm join]" "master: get join token and cert info"
  command::exec "${MGMT_NODE}" "
    openssl x509 -pubkey -in /etc/kubernetes/pki/ca.crt | openssl rsa -pubin -outform der 2>/dev/null | openssl dgst -sha256 -hex | sed 's/^.* //'
  "
  get::command_output "CACRT_HASH" "$?" "exit"

  command::exec "${MGMT_NODE}" "
    kubeadm init phase upload-certs --upload-certs --config /etc/kubernetes/kubeadm-config.yaml 2>> /dev/null | tail -1
  "
  get::command_output "INTI_CERTKEY" "$?" "exit"

  command::exec "${MGMT_NODE}" "
    kubeadm token create
  "
  get::command_output "INIT_TOKEN" "$?" "exit"

  for host in $MASTER_NODES; do
    [[ "${MGMT_NODE}" == "$host" ]] && continue
    log::info "[kubeadm join]" "master $host join cluster."
    command::exec "${host}" "
      cat << EOF > /etc/kubernetes/kubeadm-config.yaml
---
apiVersion: kubeadm.k8s.io/v1beta2
kind: JoinConfiguration
discovery:
  bootstrapToken:
    apiServerEndpoint: $KUBE_APISERVER:${KUBE_PORT}
    caCertHashes:
    - sha256:${CACRT_HASH:-}
    token: ${INIT_TOKEN}
  timeout: 5m0s
controlPlane:
  certificateKey: ${INTI_CERTKEY:-}
${kubelet_nodeRegistration}
EOF
      kubeadm join --config /etc/kubernetes/kubeadm-config.yaml
    "
    check::exit_code "$?" "kubeadm join" "master $host join cluster"

    log::info "[kubeadm join]" "$host: set kube config."
    command::exec "${host}" "
      mkdir -p \$HOME/.kube
      sudo cp -f /etc/kubernetes/admin.conf \$HOME/.kube/config
    "
    check::exit_code "$?" "kubeadm join" "$host: set kube config"

    # 判断是否有VIP
    local KUBE_APISERVER_IP="127.0.0.1"
    [ "${VIRTUAL_IP}" != "" ] && KUBE_APISERVER_IP=${VIRTUAL_IP}
    command::exec "${host}" "
      sed -i 's#.*$KUBE_APISERVER#$KUBE_APISERVER_IP $KUBE_APISERVER#g' /etc/hosts
      kubectl taint nodes --all node-role.kubernetes.io/master-
    "
  done

  for host in $WORKER_NODES; do
    log::info "[kubeadm join]" "worker $host join cluster."
    command::exec "${host}" "
      mkdir -p /etc/kubernetes/manifests
      cat << EOF > /etc/kubernetes/kubeadm-config.yaml
---
apiVersion: kubeadm.k8s.io/v1beta2
kind: JoinConfiguration
discovery:
  bootstrapToken:
    apiServerEndpoint: $KUBE_APISERVER:${KUBE_PORT}
    caCertHashes:
    - sha256:${CACRT_HASH:-}
    token: ${INIT_TOKEN}
  timeout: 5m0s
${kubelet_nodeRegistration}
EOF
      kubeadm join --config /etc/kubernetes/kubeadm-config.yaml
    "
    check::exit_code "$?" "kubeadm join" "worker $host join cluster"

    log::info "[kubeadm join]" "set $host worker node role."
    command::exec "${MGMT_NODE}" "
	  export VIRTUAL_IP=${VIRTUAL_IP}
      kubectl get node --selector='!node-role.kubernetes.io/master' | grep '<none>' | awk '{print \"kubectl label node \" \$1 \" node-role.kubernetes.io/worker= --overwrite\" }' | bash
	  [ \"${VIRTUAL_IP}\" != \"\" ] && systemctl restart keepalived || true
    "
    check::exit_code "$?" "kubeadm join" "set $host worker node role"
  done
}

function kube::wait() {
  # 等待资源完成

  local app=$1
  local namespace=$2
  local resource=$3
  local selector=${4:-}

  sleep 3
  log::info "[waiting]" "waiting $app"
  command::exec "${MGMT_NODE}" "
    $(declare -f utils::retry)
    utils::retry 20 kubectl wait --namespace ${namespace} \
    --for=condition=ready ${resource} \
    --selector=$selector \
    --timeout=60s
  "
  local status="$?"
  check::exit_code "$status" "waiting" "$app ${resource} ready"
  return "$status"
}

function kube::apply() {
  # 应用yaml

  local file=$1
  local info=$(echo $1  | awk -F "/" '{print $NF}')
  log::info "[apply]" "apply $info file"
  command::exec "${MGMT_NODE}" "
    $(declare -f utils::retry)
    if [ -f \"$file\" ]; then
      utils::retry 6 kubectl apply --wait=true --timeout=10s -f \"$file\"
    else
      utils::retry 6 \"cat <<EOF | kubectl apply --wait=true --timeout=10s -f -
\$(printf \"%s\" \"${2:-}\")
EOF
      \"
    fi
  "
  local status="$?"
  check::exit_code "$status" "apply" "apply $info file"
  return "$status"
}

function kube::status() {
  # 集群状态

  sleep 5
  log::info "[cluster]" "kubernetes cluster status"
  command::exec "${MGMT_NODE}" "
     echo '+ kubectl get node -o wide'
     kubectl get node -o wide
     echo '+ kubectl get pods -A -o wide'
     kubectl get pods -A -o wide
     echo ''
  " && printf "%s \n" "${COMMAND_OUTPUT}"

  curl 'https://oapi.dingtalk.com/robot/send?access_token=e83fae47f7d1f7b9f2ffc76f3a35fb8c407c4551068501e4e648d8560d744760' \
   -H 'Content-Type: application/json' \
   -d '
  {"msgtype": "text",
    "text": {
        "content": "'"kubeeasy install info: ${COMMAND_OUTPUT}"'"
     }
  }' &> /dev/null || true

}

function config::haproxy_backend() {
  # 添加或删除haproxy的后端server

  local action=${1:-add}
  local action_cmd=""
  local master_nodes

  if [[ "$MASTER_NODES" == "" || "$MASTER_NODES" == "127.0.0.1" ]]; then
    return
  fi

  command::exec "${MGMT_NODE}" "
    kubectl get node --selector='node-role.kubernetes.io/master' -o jsonpath='{\$.items[*].status.addresses[?(@.type==\"InternalIP\")].address}'
  "
  get::command_output "master_nodes" "$?" "exit"

  for m in $MASTER_NODES; do
    if [[ "${action}" == "add" ]]; then
      num=$(echo "${m}" | awk -F'.' '{print $4}')
      action_cmd="${action_cmd}\necho \"    server apiserver${num} ${m}:6443 check\" >> /etc/haproxy/haproxy.cfg"
    else
      [[ "${master_nodes}" == *"${m}"* ]] || return
      action_cmd="${action_cmd}\n sed -i -e \"/${m}/d\" /etc/haproxy/haproxy.cfg"
    fi
  done

  command::exec "${MGMT_NODE}" "
    kubectl get node --selector='!node-role.kubernetes.io/master' -o jsonpath='{\$.items[*].status.addresses[?(@.type==\"InternalIP\")].address}'
  "
  get::command_output "worker_nodes" "$?"

  for host in ${worker_nodes:-}; do
    log::info "[config]" "worker ${host}: ${action} apiserver from haproxy"
    command::exec "${host}" "
      $(echo -ne "${action_cmd}")
      haproxy -c -f /etc/haproxy/haproxy.cfg && systemctl reload haproxy
    "
    check::exit_code "$?" "config" "worker ${host}: ${action} apiserver(${m}) from haproxy"
  done
}

function config::etcd_snapshot() {
  # 更新 etcd 备份副本

  command::exec "${MGMT_NODE}" "
    count=\$(kubectl get node --selector='node-role.kubernetes.io/master' --no-headers | wc -l)
    kubectl -n kube-system patch cronjobs etcd-snapshot --patch \"
spec:
  jobTemplate:
    spec:
      completions: \${count:-1}
      parallelism: \${count:-1}
\"
  "
  check::exit_code "$?" "config" "etcd-snapshot completions options"
}

function get::command_output() {
  # 获取命令的返回值

  local app="$1"
  local status="$2"
  local is_exit="${3:-}"

  if [[ "$status" == "0" && "${COMMAND_OUTPUT}" != "" ]]; then
    log::info "[result]" "get $app value succeeded."
    eval "$app=\"${COMMAND_OUTPUT}\""
  else
    log::error "[result]" "get $app value failed."
    [[ "$is_exit" == "exit" ]] && exit "$status"
  fi
  return "$status"
}

function add::network() {
  # 添加network组件

  local OFFLINE_TAG=${OFFLINE_TAG}
  local TMP_DIR=${TMP_DIR}

  if [[ "$KUBE_NETWORK" == "flannel" ]]; then
    log::info "[network]" "add flannel"

    local flannel_file="${OFFLINE_DIR}/manifests/kube-flannel.yml"
    utils::download_file "https://cdn.jsdelivr.net/gh/coreos/flannel@v${FLANNEL_VERSION}/Documentation/kube-flannel.yml" "${flannel_file}"

    command::exec "${MGMT_NODE}" "
      sed -i 's#10.244.0.0/16#$KUBE_POD_SUBNET#g' \"${flannel_file}\"
      sed -i 's#\"Type\": \"vxlan\"#\"Type\": \"${KUBE_FLANNEL_TYPE}\"#g' \"${flannel_file}\"
      if [[ \"${KUBE_FLANNEL_TYPE}\" == \"vxlan\" ]]; then
        sed -i 's#\"Type\": \"vxlan\"#\"Type\": \"vxlan\", \"DirectRouting\": true#g' \"${flannel_file}\"
      fi
    "
    check::exit_code "$?" "flannel" "change flannel pod subnet"
    kube::apply "${flannel_file}"
    kube::wait "flannel" "kube-system" "pods" "app=flannel"

  elif [[ "$KUBE_NETWORK" == "calico" ]]; then
    if [ "${OFFLINE_TAG}" == "1" ]; then
      log::info "[network]" "add calico network"
      local calico_file="${OFFLINE_DIR}/manifests/calico.yaml"
      command::exec "${MGMT_NODE}" "
        sed -i 's#10.244.0.0/16#$KUBE_POD_SUBNET#g' \"${calico_file}\"
      "
      check::exit_code "$?" "calico" "change calico pod subnet"
      kube::apply "${OFFLINE_DIR}/manifests/calico.yaml"
      kube::wait "calico-kube-controllers" "kube-system" "pods" "k8s-app=calico-kube-controllers"
      kube::wait "calico-node" "kube-system" "pods" "k8s-app=calico-node"
    else
      # 没有指定离线包就在线安装
      local calico_file="${TMP_DIR}/kubeeasy/manifests/calico.yaml"
      utils::download_file "${GITHUB_PROXY}https://raw.githubusercontent.com/kongyu666/kubeeasy/main/manifests/calico.yaml" "${calico_file}"
      log::info "[network]" "add calico network"
      command::exec "${MGMT_NODE}" "
        sed -i 's#10.244.0.0/16#$KUBE_POD_SUBNET#g' \"${calico_file}\"
      "
      check::exit_code "$?" "calico" "change calico pod subnet"
      kube::apply "${calico_file}"
      kube::wait "calico-kube-controllers" "kube-system" "pods" "k8s-app=calico-kube-controllers"
      kube::wait "calico-node" "kube-system" "pods" "k8s-app=calico-node"
    fi



  elif [[ "$KUBE_NETWORK" == "cilium" ]]; then
    log::info "[network]" "add cilium"

    local cilium_file="${OFFLINE_DIR}/manifests/cilium.yml"
    local cilium_hubble_file="${OFFLINE_DIR}/manifests/cilium_hubble.yml"
    utils::download_file "https://cdn.jsdelivr.net/gh/cilium/cilium@${CILIUM_VERSION}/install/kubernetes/quick-install.yaml" "${cilium_file}"
    utils::download_file "https://cdn.jsdelivr.net/gh/cilium/cilium@${CILIUM_VERSION}/install/kubernetes/quick-hubble-install.yaml" "${cilium_hubble_file}"

    local all_node=""
    if [[ "${MASTER_NODES}" == "" && "${WORKER_NODES}" == "" ]]; then
      command::exec "${MGMT_NODE}" "
        kubectl get node -o jsonpath='{range.items[*]}{.status.addresses[?(@.type==\"InternalIP\")].address} {end}'
      "
      get::command_output "all_node" "$?"
    else
      all_node="${MASTER_NODES} ${WORKER_NODES}"
    fi

    for host in $all_node; do
      command::exec "${host}" "mount bpffs -t bpf /sys/fs/bpf"
      check::exit_code "$?" "network" "${host}: mount bpf filesystem"
    done

    command::exec "${MGMT_NODE}" "
      sed -i \"s#10.0.0.0/8#${KUBE_POD_SUBNET}#g\" \"${cilium_file}\"
    "
    kube::apply "${cilium_file}"
    kube::wait "cilium-node" "kube-system" "pods" "k8s-app=cilium"
    kube::wait "cilium-operator" "kube-system" "pods" "name=cilium-operator"
    kube::apply "${cilium_hubble_file}"
    kube::wait "hubble-relay" "kube-system" "pods" "k8s-app=hubble-relay"

    log::info "[monitor]" "add hubble-ui ingress"
    kube::apply "hubble-ui ingress" "
---
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
  name: hubble-ui
  namespace: kube-system
  annotations:
    kubernetes.io/ingress.class: ${KUBE_INGRESS}
spec:
  rules:
  - host: hubble-ui.cluster.local
    http:
      paths:
      - backend:
          serviceName: hubble-ui
          servicePort: 80
    "
    # shellcheck disable=SC2181
    if [[ "$?" == "0" ]]; then
      get::ingress_conn
      log::access "[ingress]" "curl -H 'Host:hubble-ui.cluster.local' http://${INGRESS_CONN}"
    fi
  else
    log::warning "[network]" "No $KUBE_NETWORK config."
  fi
}

function add::storage() {
  # 添加存储
  local STORAGE_YAML_DIR="${TMP_DIR}/k8s-storage/deploy/"
  local NFS_YAML="${OFFLINE_DIR}/manifests/nfs-provisioner.yaml"
  local LOCAL_YAML="${OFFLINE_DIR}/manifests/openebs-provisioner-hostpath.yaml"
  local LONGHORN_YAML="${TMP_DIR}/k8s-storage/deploy/longhorn/longhorn.yaml"
  local OPENEBS_YAML="${TMP_DIR}/k8s-storage/deploy/openebs/openebs-operator.yaml"

  if [[ "$KUBE_STORAGE" == "nfs" ]]; then
    # 添加nfs和openebs local存储类
    log::info "[storage]" "add nfs and openebs local storage class"
    [[ -f "${NFS_YAML}" ]] || utils::download_file "${GITHUB_PROXY}https://raw.githubusercontent.com/kongyu666/kubeeasy/main/k8s-storage/deploy/nfs/nfs-provisioner.yaml" "${NFS_YAML}"
    [[ -f "${LOCAL_YAML}" ]] || utils::download_file "${GITHUB_PROXY}https://github.com/kongyu666/kubeeasy/blob/main/k8s-storage/deploy/local/openebs-provisioner-hostpath.yaml" "${LOCAL_YAML}"
    kube::apply "${NFS_YAML}"
    kube::wait "nfs-server-provisioner" "storage" "pods" "app=nfs-server-provisioner"
    kube::apply "${LOCAL_YAML}"
    kube::wait "openebs-provisioner-hostpath" "storage" "pods" "name=openebs-provisioner-hostpath"

  elif [[ "$KUBE_STORAGE" == "openebs" ]]; then
    # 添加openebs存储类
    log::info "[storage]" "add openebs storage class"
    [[ -f "${OPENEBS_YAML}" ]] || utils::download_file "${GITHUB_PROXY}https://raw.githubusercontent.com/kongyu666/kubeeasy/main/k8s-storage/deploy/openebs/openebs-operator.yaml" "${OPENEBS_YAML}"
    kube::apply "${OPENEBS_YAML}"
    kube::wait "openebs-provisioner" "openebs" "pods" "name=openebs-provisioner"
    sleep 5
    log::info "[cluster]" "kubernetes storage status"
    command::exec "${MGMT_NODE}" "
       echo '+ kubectl get pod -n openebs -o wide'
       kubectl get pod -n openebs -o wide
       echo '+ kubectl get storageclasses.storage.k8s.io'
       kubectl get storageclasses.storage.k8s.io
       echo ''
    " && printf "%s \n" "${COMMAND_OUTPUT}"

  elif [[ "$KUBE_STORAGE" == "longhorn" ]]; then
    # 添加longhorn存储类
    log::info "[storage]" "add longhorn storage class"
    [[ -f "${LONGHORN_YAML}" ]] || utils::download_file "${GITHUB_PROXY}https://raw.githubusercontent.com/kongyu666/kubeeasy/main/k8s-storage/deploy/longhorn/longhorn.yaml" "${LONGHORN_YAML}"
    kube::apply "${LONGHORN_YAML}"
    kube::wait "csi-provisioner" "longhorn-system" "pods" "app=csi-provisioner"
    sleep 5
    log::info "[cluster]" "kubernetes storage status"
    command::exec "${MGMT_NODE}" "
       echo '+ kubectl get pod -n longhorn-system -o wide'
       kubectl get pod -n longhorn-system -o wide
       echo '+ kubectl get storageclasses.storage.k8s.io'
       kubectl get storageclasses.storage.k8s.io
       echo ''
    " && printf "%s \n" "${COMMAND_OUTPUT}"

  else
    log::warning "[storage]" "No $KUBE_STORAGE config."
  fi
}

function add::ui() {
  # 添加用户界面
  local OFFLINE_TAG=${OFFLINE_TAG}
  local TMP_DIR=${TMP_DIR}

  if [[ "$KUBE_UI" == "kuboard" ]]; then
    log::info "[ui]" "add kuboard"
    local kuboard_file="${TMP_DIR}/kubeeasy/manifests/kuboard-v2.yaml"
    local metrics_file="${TMP_DIR}/kubeeasy/manifests/metrics-server.yaml"
    [[ -f "${kuboard_file}" ]] || utils::download_file "${GITHUB_PROXY}https://raw.githubusercontent.com/kongyu666/kubeeasy/main/manifests/kuboard-v2.yaml" "${kuboard_file}"
    [[ -f "${metrics_file}" ]] || utils::download_file "${GITHUB_PROXY}https://raw.githubusercontent.com/kongyu666/kubeeasy/main/manifests/metrics-server.yaml" "${metrics_file}"
    kube::apply "${OFFLINE_DIR}/manifests/kuboard-v2.yaml"
    kube::apply "${OFFLINE_DIR}/manifests/metrics-server.yaml"
    kube::wait "kuboard" "kube-system" "pods" "k8s.kuboard.cn/name=kuboard"
    kube::wait "metrics-server" "kube-system" "pods" "k8s-app=metrics-server"
    local dashboard_token=""
    command::exec "${MGMT_NODE}" "
      kubectl -n kube-system get secret \$(kubectl -n kube-system get secret | grep kuboard-user | awk '{print \$1}') -o go-template='{{.data.token}}' | base64 -d | tee -a ~/k8s-token.txt
    "
    get::command_output "dashboard_token" "$?"
    local node_ip=${MGMT_NODE}
    [ -n "${VIRTUAL_IP}" ] && node_ip=${VIRTUAL_IP}
    log::access "[kuboard]" "http://${node_ip}:32567"
    log::access "[Token]" "${dashboard_token}"
  elif [[ "$KUBE_UI" == "kubesphere" ]]; then
    log::info "[ui]" "add kubesphere"
    local cluster_configuration="${TMP_DIR}/kubesphere/deploy/cluster-configuration.yaml"
    local kubesphere_installer="${TMP_DIR}/kubesphere/deploy/kubesphere-installer.yaml"
    [[ -f "${cluster_configuration}" ]] || utils::download_file "${GITHUB_PROXY}https://raw.githubusercontent.com/kongyu666/kubeeasy/main/kubesphere/deploy/cluster-configuration.yaml" "${cluster_configuration}"
    [[ -f "${kubesphere_installer}" ]] || utils::download_file "${GITHUB_PROXY}https://raw.githubusercontent.com/kongyu666/kubeeasy/main/kubesphere/deploy/kubesphere-installer.yaml" "${kubesphere_installer}"
    kube::apply "${TMP_DIR}/kubesphere/deploy/kubesphere-installer.yaml"
    kube::apply "${TMP_DIR}/kubesphere/deploy/cluster-configuration.yaml"
    sleep 60
    kube::wait "ks-installer" "kubesphere-system" "pods" "app=ks-install"
    kube::wait "kubesphere-system" "kubesphere-system" "pods --all"
    kube::wait "kubesphere-controls-system" "kubesphere-controls-system" "pods --all"
    kube::wait "kubesphere-monitoring-system" "kubesphere-monitoring-system" "pods --all"
    if [[ "$?" == "0" ]]; then
      command::exec "${MGMT_NODE}" "
        kubectl get node --selector='node-role.kubernetes.io/master' -o jsonpath='{range.items[*]}{.status.addresses[?(@.type==\"InternalIP\")].address } {end}' | awk '{print \$1}'
      "
      get::command_output "node_ip" "$?"
      log::access "[kubesphere]" "Console: http://${node_ip:-NodeIP}:30880;  Account: admin; Password: P@88w0rd"
    fi
  else
    log::warning "[ui]" "No $KUBE_UI config."
  fi
}

function add::virt() {
  # 添加kubevirt
  local TMP_DIR=${TMP_DIR}

  if [[ "$KUBE_VIRT" == "kubevirt" ]]; then
    log::info "[virt]" "add kubevirt"
    local kubevirt_operator="${TMP_DIR}/kubevirt/deploy/kubevirt-operator.yaml"
    local kubevirt_cr="${TMP_DIR}/kubevirt/deploy/kubevirt-cr.yaml"
    local multus_daemonset="${TMP_DIR}/kubevirt/deploy/multus-daemonset.yml"
    local multus_cni_macvlan="${TMP_DIR}/kubevirt/deploy/multus-cni-macvlan.yaml"
    [[ -f "${kubevirt_operator}" ]] || utils::download_file "${GITHUB_PROXY}https://raw.githubusercontent.com/kongyu666/kubeeasy/main/kubevirt/deploy/kubevirt-operator.yaml" "${kubevirt_operator}"
    [[ -f "${kubevirt_cr}" ]] || utils::download_file "${GITHUB_PROXY}https://raw.githubusercontent.com/kongyu666/kubeeasy/main/kubevirt/deploy/kubevirt-cr.yaml" "${kubevirt_cr}"
    [[ -f "${multus_daemonset}" ]] || utils::download_file "${GITHUB_PROXY}https://raw.githubusercontent.com/kongyu666/kubeeasy/main/kubevirt/deploy/multus-daemonset.yml" "${multus_daemonset}"
    [[ -f "${multus_cni_macvlan}" ]] || utils::download_file "${GITHUB_PROXY}https://raw.githubusercontent.com/kongyu666/kubeeasy/main/kubevirt/deploy/multus-cni-macvlan.yaml" "${multus_cni_macvlan}"

    kube::apply "${kubevirt_operator}"
    kube::wait "kubevirt" "kubevirt" "pods" "kubevirt.io=virt-operator"
    kube::apply "${kubevirt_cr}"
    sleep 30
    kube::wait "kubevirt" "kubevirt" "pods" "kubevirt.io=virt-api"
    kube::wait "kubevirt" "kubevirt" "pods" "kubevirt.io=virt-controller"
    kube::wait "kubevirt" "kubevirt" "pods" "kubevirt.io=virt-handler"

    kube::apply "${multus_daemonset}"
    kube::wait "kube-multus" "kube-system" "pods" "app=multus"
    kube::apply "${multus_cni_macvlan}"

    log::info "[cluster]" "kubernetes kubevirt status"
    command::exec "${MGMT_NODE}" "
       echo '+ kubectl get pod -n kubevirt -o wide'
       kubectl get pod -n kubevirt -o wide
       echo ''
    " && printf "%s \n" "${COMMAND_OUTPUT}"

  elif [[ "$KUBE_VIRT" == "kata" ]]; then
    echo "none."
  else
    log::warning "[ui]" "No $KUBE_UI config."
  fi
}

function add::ops() {
  # 运维操作

  local master_num
  master_num=$(awk '{print NF}' <<<"${MASTER_NODES}")

  log::info "[ops]" "add anti-affinity strategy to coredns"
  command::exec "${MGMT_NODE}" """
    kubectl -n kube-system patch deployment coredns --patch '{\"spec\": {\"template\": {\"spec\": {\"affinity\":{\"podAntiAffinity\":{\"preferredDuringSchedulingIgnoredDuringExecution\":[{\"weight\":100,\"podAffinityTerm\":{\"labelSelector\":{\"matchExpressions\":[{\"key\":\"k8s-app\",\"operator\":\"In\",\"values\":[\"kube-dns\"]}]},\"topologyKey\":\"kubernetes.io/hostname\"}}]}}}}}}' --record
  """
  check::exit_code "$?" "ops" "add anti-affinity strategy to coredns"

  log::info "[ops]" "add etcd snapshot cronjob"
  command::exec "${MGMT_NODE}" "
    kubeadm config images list --config=/etc/kubernetes/kubeadm-config.yaml 2>/dev/null | grep etcd:
  "
  get::command_output "etcd_image" "$?"
  command::exec "${MGMT_NODE}" "
    kubectl get node --selector='node-role.kubernetes.io/master' --no-headers | wc -l
  "
  get::command_output "master_num" "$?"

  [[ "${master_num:-0}" == "0" ]] && master_num=1
  kube::apply "etcd-snapshot" """
---
apiVersion: batch/v1beta1
kind: CronJob
metadata:
  name: etcd-snapshot
  namespace: kube-system
spec:
  schedule: '0 */6 * * *'
  successfulJobsHistoryLimit: 3
  suspend: false
  concurrencyPolicy: Allow
  failedJobsHistoryLimit: 3
  jobTemplate:
    spec:
      backoffLimit: 6
      parallelism: ${master_num}
      completions: ${master_num}
      template:
        metadata:
          labels:
            app: etcd-snapshot
        spec:
          affinity:
            podAntiAffinity:
              requiredDuringSchedulingIgnoredDuringExecution:
              - labelSelector:
                  matchExpressions:
                  - key: app
                    operator: In
                    values:
                    - etcd-snapshot
                topologyKey: 'kubernetes.io/hostname'
          containers:
          - name: etcd-snapshot
            image: ${etcd_image:-${KUBE_IMAGE_REPO}/etcd:3.4.13-0}
            imagePullPolicy: IfNotPresent
            args:
            - -c
            - etcdctl --endpoints=https://127.0.0.1:2379 --cacert=/etc/kubernetes/pki/etcd/ca.crt
              --cert=/etc/kubernetes/pki/etcd/healthcheck-client.crt --key=/etc/kubernetes/pki/etcd/healthcheck-client.key
              snapshot save /backup/etcd-snapshot-\\\\\\\$(date +%Y-%m-%d_%H:%M:%S_%Z).db
              && echo 'delete old backups' && find /backup -type f -mtime +30 -exec rm -fv {} \\; || echo error
            command:
            - /bin/sh
            env:
            - name: ETCDCTL_API
              value: '3'
            resources: {}
            terminationMessagePath: /dev/termination-log
            terminationMessagePolicy: File
            volumeMounts:
            - name: etcd-certs
              mountPath: /etc/kubernetes/pki/etcd
              readOnly: true
            - name: backup
              mountPath: /backup
            - name: etc
              mountPath: /etc
            - name: bin
              mountPath: /usr/bin
            - name: lib64
              mountPath: /lib64
          dnsPolicy: ClusterFirst
          hostNetwork: true
          nodeSelector:
            node-role.kubernetes.io/master: ''
          tolerations:
          - effect: NoSchedule
            operator: Exists
          restartPolicy: OnFailure
          schedulerName: default-scheduler
          securityContext: {}
          terminationGracePeriodSeconds: 30
          volumes:
          - name: etcd-certs
            hostPath:
              path: /etc/kubernetes/pki/etcd
              type: DirectoryOrCreate
          - name: backup
            hostPath:
              path: /var/lib/etcd/backups
              type: DirectoryOrCreate
          - name: etc
            hostPath:
              path: /etc
          - name: bin
            hostPath:
              path: /usr/bin
          - name: lib64
            hostPath:
              path: /lib64
"""
  # shellcheck disable=SC2181
  [[ "$?" == "0" ]] && log::access "[ops]" "etcd backup directory: /var/lib/etcd/backups"
  command::exec "${MGMT_NODE}" "
    jobname=\"etcd-snapshot-$(date +%s)\"
    kubectl create job --from=cronjob/etcd-snapshot \${jobname} -n kube-system && \
    kubectl wait --for=condition=complete job/\${jobname} -n kube-system
  "
  check::exit_code "$?" "ops" "trigger etcd backup"
}

function reset::node() {
  # 重置节点

  local host=$1
  log::info "[reset]" "node $host"
  command::exec "${host}" "
    set +ex
    cri_socket=\"\"
    [ -S /var/run/crio/crio.sock ] && cri_socket=\"--cri-socket /var/run/crio/crio.sock\"
    [ -S /run/containerd/containerd.sock ] && cri_socket=\"--cri-socket /run/containerd/containerd.sock\"
    kubeadm reset -f \$cri_socket
    [ -f \"\$(which kubelet)\" ] && { systemctl stop kubelet; find /var/lib/kubelet | xargs -n 1 findmnt -n -o TARGET -T | sort | uniq | xargs -r umount -v; yum remove -y kubeadm kubelet kubectl; }
    [ -d /etc/kubernetes ] && rm -rf /etc/kubernetes/* /var/lib/kubelet/* /var/lib/etcd/* \$HOME/.kube /etc/cni/net.d/* /var/lib/dockershim/* /var/lib/cni/* /var/run/kubernetes/*
    docker_data_dir=\$(cat /usr/lib/systemd/system/docker.service | grep graph | awk -F 'graph' '{print \$2}' | sed \"s#=##g\")
    [ -f \"\$(which docker)\" ] && { docker rm -f -v \$(docker ps | grep kube | awk '{print \$1}'); systemctl stop docker; rm -rf \$HOME/.docker /etc/docker/* /var/lib/docker/* \${docker_data_dir}; yum remove -y docker docker-ce-cli; rm -rf \${docker_data_dir};}
    [ -f \"\$(which containerd)\" ] && { crictl rm \$(crictl ps -a -q); systemctl stop containerd; rm -rf /etc/containerd/* /var/lib/containerd/*; yum remove -y containerd.io; }
    [ -f \"\$(which crio)\" ] && { crictl rm \$(crictl ps -a -q); systemctl stop crio; rm -rf /etc/crictl.yaml /etc/crio/* /var/run/crio/*; yum remove -y cri-o; }
    [ -f \"\$(which runc)\" ] && { find /run/containers/ /var/lib/containers/ | xargs -n 1 findmnt -n -o TARGET -T | sort | uniq | xargs -r umount -v; rm -rf /var/lib/containers/* /var/run/containers/*; yum remove -y runc; }
    [ -f \"\$(which haproxy)\" ] && { systemctl stop haproxy; rm -rf /etc/haproxy/*; yum remove -y haproxy; }
    [ -f \"\$(which keepalived)\" ] && { systemctl stop keepalived; rm -rf /etc/keepalived/*; yum remove -y keepalived; }
    hostnamectl set-hostname kubeeasy
    sed -i -e \"/$KUBE_APISERVER/d\" -e '/worker/d' -e '/master/d' -e "/^$/d" /etc/hosts
    rm -rf /etc/profile.d/ssh-login-info.sh
    sed -i '/## kubeeasy managed start/,/## kubeeasy managed end/d' /etc/hosts /etc/security/limits.conf /etc/systemd/system.conf /etc/bashrc /etc/rc.local /etc/audit/rules.d/audit.rules
    
    [ -d /var/lib/elasticsearch ] && rm -rf /var/lib/elasticsearch/*
    [ -d /var/lib/longhorn ] &&  rm -rf /var/lib/longhorn/*
    [ -d \"${OFFLINE_DIR:-/tmp/abc}\" ] && rm -rf \"${OFFLINE_DIR:-/tmp/abc}\"

    ipvsadm --clear
    iptables -F && iptables -t nat -F && iptables -t mangle -F && iptables -X
    for int in kube-ipvs0 cni0 docker0 dummy0 flannel.1 cilium_host cilium_net cilium_vxlan lxc_health nodelocaldns 
    do
      [ -d /sys/class/net/\${int} ] && ip link delete \${int}
    done
    modprobe -r ipip
    echo done.
  "
  check::exit_code "$?" "reset" "$host: reset"

}

function reset::cluster() {
  # 重置所有节点

  while true ;do
  read -p "Are you sure to reset this cluster? [yes/no]:" result
  case $result in
          yes | y | YES | Y )
          break
          ;;
          no | n | NO | N ) exit 0
          break
          ;;
          *) echo "yes/no ?"
          ;;
  esac
  done

  local all_node=""

  command::exec "${MGMT_NODE}" "
    kubectl get node -o jsonpath='{range.items[*]}{.status.addresses[?(@.type==\"InternalIP\")].address} {end}'
  "
  get::command_output "all_node" "$?"
  log::info "[reset]" "all_node is ${all_node}"

  all_node=$(echo "${WORKER_NODES} ${MASTER_NODES} ${all_node}" | awk '{for (i=1;i<=NF;i++) if (!a[$i]++) printf("%s%s",$i,FS)}')

  for host in $all_node; do
    reset::node "$host"
  done

  # 重置完后重启主机
  for host in $all_node; do
    command::exec "${host}" "bash -c 'sleep 15 && reboot' &>/dev/null &"
    check::exit_code "$?" "reboot" "$host: Wait for 15s to restart"
  done

}

function reset::cluster_force() {
  # 强制重置指定节点

  while true ;do
  read -p "Are you sure to reset this node? [yes/no]:" result
  case $result in
          yes | y | YES | Y )
          break
          ;;
          no | n | NO | N ) exit 0
          break
          ;;
          *)
          ;;
  esac
  done

  local all_node=""
  local HOSTNAME_PREFIX=${HOSTNAME_PREFIX:-}

#  command::exec "${MGMT_NODE}" "
#    [ -n \"$(cat /etc/hosts | egrep ${HOSTNAME_PREFIX})\" ] && cat /etc/hosts | egrep ${HOSTNAME_PREFIX} | awk '{print \$1}' 2> /dev/null ; echo 127.0.0.1
#  "
#  get::command_output "all_node" "$?"
#
#  all_node=$(echo ${all_node} | sed 's#127.0.0.1##g')
#
#  [ "${all_node}" == "" -o "${all_node}" == "127.0.0.1" ] && all_node=$(echo "${WORKER_NODES} ${MASTER_NODES}" | awk '{for (i=1;i<=NF;i++) if (!a[$i]++) printf("%s%s",$i,FS)}')

  all_node=$(echo "${WORKER_NODES} ${MASTER_NODES}" | awk '{for (i=1;i<=NF;i++) if (!a[$i]++) printf("%s%s",$i,FS)}')
  log::info "[reset]" "reset node is ${all_node}"
  for host in $all_node; do
    reset::node "$host"
  done

  for host in $all_node; do
    command::exec "${host}" "bash -c 'sleep 15 && reboot' &>/dev/null &"
    check::exit_code "$?" "reboot" "$host: Wait for 15s to restart"
  done

}

function offline::load() {
  # 节点加载离线包

  local role="${1:-}"
  local hosts=""
  local UPGRADE_KERNEL_TAG="${UPGRADE_KERNEL_TAG:-0}"

  if [[ "${role}" == "master" ]]; then
    hosts="${MASTER_NODES}"
  elif [[ "${role}" == "worker" ]]; then
    hosts="${WORKER_NODES}"
  fi

  for host in ${hosts}; do
    # 分发离线包到节点
    log::info "[offline]" "${role} ${host}: load offline file"
#    command::exec "${host}" "mkdir -p ${OFFLINE_DIR}"
    ## 优化ssh
    command::exec "${host}" "
    sed -i -e 's/#UseDNS yes/UseDNS no/g' \
    -e 's/GSSAPIAuthentication yes/GSSAPIAuthentication no/g' \
    /etc/ssh/sshd_config
    sed -i 's/#   StrictHostKeyChecking ask/   StrictHostKeyChecking no/g' /etc/ssh/ssh_config
    systemctl restart sshd
    "
    [ "${UPGRADE_KERNEL_TAG}" == "1" ] && command::scp "${host}" "${OFFLINE_DIR}" "${TMP_DIR}"
    [ "${UPGRADE_KERNEL_TAG}" == "1" ] || command::rsync "${host}" "${OFFLINE_DIR}" "${TMP_DIR}"
    check::exit_code "$?" "offline" "load offline file to $host" "exit"

    if [[ "${UPGRADE_KERNEL_TAG:-}" == "1" ]]; then
      # 如果升级内核，就安装内核软件包
      log::info "[offline]" "${role} ${host}: upgrade kernel"
      command::exec "${host}" "
        rm -rf /etc/yum.repos.d/*
        yum localinstall -y --skip-broken ${OFFLINE_DIR}/packages/kernel/*.rpm
      "
      check::exit_code "$?" "offline" "${role} ${host}: upgrade kernel" "exit"
    else
      # 如果不升级内核，就安装基础包、docker、kubernetes等软件包
      log::info "[offline]" "${role} ${host}: install packages"
      command::exec "${host}" "
        rm -rf /etc/yum.repos.d/*
        yum localinstall -y --skip-broken ${OFFLINE_DIR}/packages/all/*.rpm
        # 替换编译后的kubeadm
        kubeadm_path=\$(which kubeadm)
        #mv \${kubeadm_path}{,_$(date +'%Y-%m-%d')}
        kubeadm_file='${OFFLINE_DIR}/bins/kubeadm-v1.21.3-linux-amd64'
        chmod +x \${kubeadm_file}
        \cp \${kubeadm_file} \${kubeadm_path}
        "
      check::exit_code "$?" "offline" "${role} ${host}: install packages" "exit"
    fi

    # 不升级内核时，关闭防火墙等
    if [[ "${UPGRADE_KERNEL_TAG:-}" != "1" ]]; then
      log::info "[offline]" "${role} $host: disable the firewall"
      command::exec "${host}" "
        set -e
        for target in firewalld python-firewall firewalld-filesystem iptables; do
          systemctl stop \$target &>/dev/null || true
          systemctl disable \$target &>/dev/null || true
        done
      "
      check::exit_code "$?" "offline" "$host: disable the firewall" "exit"
    fi
  done
}

function offline::cluster() {
  # 集群节点加载离线包

  [ ! -f "${OFFLINE_FILE}" ] && {
    log::error "[offline]" "not found ${OFFLINE_FILE}"
    exit 1
  }

  log::info "[offline]" "unzip offline package on local."
  tar -zxf "${OFFLINE_FILE}" -C "${TMP_DIR}/"
  check::exit_code "$?" "offline" "unzip offline package"

  offline::load "master"
  offline::load "worker"
}

function offline::load_depend() {
  # 节点加载离线包

  local hosts="${HOST}"
  local RPMS_DIR=""
  RPMS_DIR="${TMP_DIR}/centos-7-rpms"

  for host in ${hosts}; do
    # 优化ssh
    command::exec "${host}" "
      sed -i -e 's/#UseDNS yes/UseDNS no/g' \
      -e 's/GSSAPIAuthentication yes/GSSAPIAuthentication no/g' \
      /etc/ssh/sshd_config
      sed -i 's/#   StrictHostKeyChecking ask/   StrictHostKeyChecking no/g' /etc/ssh/ssh_config
      systemctl restart sshd
    "
    # 分发离线包到节点
    log::info "[offline]" "${host}: load offline dependencies file"
    command::scp "${host}" "${RPMS_DIR}" "${TMP_DIR}"
    check::exit_code "$?" "offline" "load offline dependencies file to $host" "exit"

    # 安装依赖包
    log::info "[install]" "${host}: install dependencies packages"
    command::exec "${host}" "
      rm -rf /etc/yum.repos.d/*
      yum localinstall -y --skip-broken ${RPMS_DIR}/*.rpm
    "
    check::exit_code "$?" "install" "${host}: install dependencies packages" "exit"
  done
}

function offline::cluster_depend() {
  # 集群节点加载离线包
  OFFLINE_FILE="${OFFLINE_FILE:-}"
  [ ! -f "${OFFLINE_FILE}" ] && {
    log::error "[offline]" "not found ${OFFLINE_FILE}"
    exit 1
  }

  log::info "[offline]" "unzip offline dependencies package on local."
  tar -zxf "${OFFLINE_FILE}" -C "${TMP_DIR}/"
  check::exit_code "$?" "offline" "unzip offline dependencies package"

  log::info "[install]" "install dependencies packages on local."
  rm -rf /etc/yum.repos.d/*
  RPMS_DIR="${TMP_DIR}/centos-7-rpms" && yum localinstall -y --skip-broken ${RPMS_DIR}/*.rpm &> $LOG_FILE
  check::exit_code "$?" "install" "install dependencies packages"

  offline::load_depend
}

function offline::load_kubesphere() {
  # 节点加载离线包

  local hosts="${MASTER_NODES} ${WORKER_NODES}"
  local IMAGE_DIR=""
  local IMAGE_FILE=""
  IMAGE_DIR="${TMP_DIR}/kubesphere/"
  IMAGE_FILE="${TMP_DIR}/kubesphere/images/kubesphere-images.tar.gz"

  for host in ${hosts}; do
    # 分发离线镜像包到节点
    log::info "[offline]" "${host}: load offline kubesphere file"
    command::scp "${host}" "${IMAGE_DIR}" "${TMP_DIR}"
    check::exit_code "$?" "offline" "load offline kubesphere file to $host" "exit"

    # 读取容器镜像
    log::info "[load]" "${host}: load kubesphere image"
    command::exec "${host}" "docker load -i ${IMAGE_FILE}"
    check::exit_code "$?" "load" "${host}: load kubesphere image" "exit"
  done
}

function offline::cluster_kubesphere() {
  # 集群节点加载离线包
  OFFLINE_FILE="${OFFLINE_FILE:-}"
  [ ! -f "${OFFLINE_FILE}" ] && {
    log::error "[offline]" "not found ${OFFLINE_FILE}"
    exit 1
  }

  log::info "[offline]" "unzip offline kubesphere package on local."
  tar -zxf "${OFFLINE_FILE}" -C "${TMP_DIR}/"
  check::exit_code "$?" "offline" "unzip offline kubesphere package"

  offline::load_kubesphere
}

function offline::load_storage() {
  # 节点加载离线包

  local hosts="${MASTER_NODES} ${WORKER_NODES}"
  local IMAGE_DIR=""
  local IMAGE_FILE=""
  IMAGE_DIR="${TMP_DIR}/k8s-storage/"
  IMAGE_FILE="${TMP_DIR}/k8s-storage/images/k8s-storage-images.tar.gz"

  for host in ${hosts}; do
    # 分发离线镜像包到节点
    log::info "[offline]" "${host}: load offline k8s storage class file"
    command::scp "${host}" "${IMAGE_DIR}" "${TMP_DIR}"
    check::exit_code "$?" "offline" "load offline k8s storage class file to $host" "exit"

    # 读取容器镜像
    log::info "[load]" "${host}: load k8s storage class image"
    command::exec "${host}" "docker load -i ${IMAGE_FILE}"
    check::exit_code "$?" "load" "${host}: load k8s storage class image" "exit"
  done
}

function offline::cluster_storage() {
  # 集群节点加载离线包
  OFFLINE_FILE="${OFFLINE_FILE:-}"
  [ ! -f "${OFFLINE_FILE}" ] && {
    log::error "[offline]" "not found ${OFFLINE_FILE}"
    exit 1
  }

  log::info "[offline]" "unzip offline k8s storage class package on local."
  tar -zxf "${OFFLINE_FILE}" -C "${TMP_DIR}/"
  check::exit_code "$?" "offline" "unzip offline k8s storage class package"

  offline::load_storage
}

function offline::load_kubevirt() {
  # 节点加载离线包

  local hosts="${MASTER_NODES} ${WORKER_NODES}"
  local IMAGE_DIR=""
  local IMAGE_FILE=""
  IMAGE_DIR="${TMP_DIR}/kubevirt/"
  IMAGE_FILE="${TMP_DIR}/kubevirt/images/kubevirt-images.tar.gz"

  for host in ${hosts}; do
    # 分发离线镜像包到节点
    log::info "[offline]" "${host}: load offline kubevirt file"
    command::scp "${host}" "${IMAGE_DIR}" "${TMP_DIR}"
    check::exit_code "$?" "offline" "load offline kubevirt file to $host" "exit"

    # 读取容器镜像
    log::info "[load]" "${host}: load kubevirt image"
    command::exec "${host}" "docker load -i ${IMAGE_FILE}"
    check::exit_code "$?" "load" "${host}: load kubevirt image" "exit"
  done
}

function offline::cluster_kubevirt() {
  # 集群节点加载离线包
  OFFLINE_FILE="${OFFLINE_FILE:-}"
  [ ! -f "${OFFLINE_FILE}" ] && {
    log::error "[offline]" "not found ${OFFLINE_FILE}"
    exit 1
  }

  log::info "[offline]" "unzip offline kubevirt package on local."
  tar -zxf "${OFFLINE_FILE}" -C "${TMP_DIR}/"
  check::exit_code "$?" "offline" "unzip offline kubevirt package"

  offline::load_kubevirt
}

function offline::load_images() {
  # 专门用于集群分发并读取容器镜像

  local hosts="${MASTER_NODES} ${WORKER_NODES}"
  local IMAGE_FILE=""
  IMAGE_FILE="${OFFLINE_FILE}"

  for host in ${hosts}; do
    # 分发离线镜像包到节点
    log::info "[offline]" "${host}: load offline container image file"
    command::rsync "${host}" "${IMAGE_FILE}" "${TMP_DIR}"
    check::exit_code "$?" "offline" "load offline container image file to $host" "exit"

    # 读取容器镜像
    log::info "[load]" "${host}: load container image"
    command::exec "${host}" "docker load -i ${TMP_DIR}/${IMAGE_FILE}"
    check::exit_code "$?" "load" "${host}: load container image" "exit"
  done
}

function offline::cluster_images() {
  # 专门用于集群分发并读取容器镜像
  OFFLINE_FILE="${OFFLINE_FILE:-}"

  [ ! -f "${OFFLINE_FILE}" ] && {
    log::error "[offline]" "not found ${OFFLINE_FILE}"
    exit 1
  }

  offline::load_images
}

function init::cluster() {
  # 初始化集群

  MGMT_NODE=$(echo "${MASTER_NODES}" | awk '{print $1}')

  # 加载离线包
  [[ "${OFFLINE_TAG:-}" == "1" ]] && offline::cluster

  # 1. 初始化节点
  init::node
  # 2. 安装包
  install::package
  # 3. 初始化kubeadm
  kubeadm::init
  # 4. 加入集群
  kubeadm::join
  # 5. 添加network
  add::network
  # 6. 添加web ui
  add::ui
  # 7. 添加storage
  add::storage
  
  # 6. 安装addon
  [[ "${ADDON_TAG:-}" == "1" ]] && add::addon
  # 7. 添加ingress
  [[ "${INGRESS_TAG:-}" == "1" ]] && add::ingress
  # 8. 添加storage
  [[ "${STORAGE_TAG:-}" == "1" ]] && add::storage
  # 10. 添加monitor
  [[ "${MONITOR_TAG:-}" == "1" ]] && add::monitor
  # 11. 添加log
  [[ "${LOG_TAG:-}" == "1" ]] && add::log
  # 12. 运维操作
  [[ "${OPS_TAG:-}" == "1" ]] && add::ops
  # 13. 查看集群状态
  kube::status
}

function add::node() {
  # 添加节点

  # 加载离线包
  [[ "${OFFLINE_TAG:-}" == "1" ]] && offline::cluster

  # KUBE_VERSION未指定时，获取集群的版本
  if [[ "${KUBE_VERSION}" == "" || "${KUBE_VERSION}" == "latest" ]]; then
    command::exec "${MGMT_NODE}" "
      kubectl get node --selector='node-role.kubernetes.io/master' -o jsonpath='{range.items[*]}{.status.nodeInfo.kubeletVersion } {end}' | awk -F'v| ' '{print \$2}'
  "
    get::command_output "KUBE_VERSION" "$?" "exit"
  fi

  # 1. 初始化节点
  init::add_node
  # 2. 安装包
  install::package
  # 3. 加入集群
  kubeadm::join
  # 4. haproxy添加apiserver
  config::haproxy_backend "add"
  # 5. 更新 etcd snapshot 副本
#  config::etcd_snapshot
  # 6. 查看集群状态
#  kube::status
}

function del::node() {
  # 删除节点

  config::haproxy_backend "remove"

  local cluster_nodes=""
  local del_hosts_cmd=""
  command::exec "${MGMT_NODE}" "
     kubectl get node -o jsonpath='{range.items[*]}{.status.addresses[?(@.type==\"InternalIP\")].address} {.metadata.name }\\n{end}'
  "
  get::command_output "cluster_nodes" "$?" exit
  log::info "[result]" "cluster_nodes is $cluster_nodes"

  for host in $MASTER_NODES; do
    command::exec "${MGMT_NODE}" "
       etcd_pod=\$(kubectl -n kube-system get pods -l component=etcd --field-selector=status.phase=Running -o jsonpath='{\$.items[0].metadata.name}')
       etcd_node=\$(kubectl -n kube-system exec \$etcd_pod -- sh -c \"export ETCDCTL_API=3 ETCDCTL_CACERT=/etc/kubernetes/pki/etcd/ca.crt ETCDCTL_CERT=/etc/kubernetes/pki/etcd/server.crt ETCDCTL_KEY=/etc/kubernetes/pki/etcd/server.key ETCDCTL_ENDPOINTS=https://127.0.0.1:2379; etcdctl member list\"| grep $host | awk -F, '{print \$1}')
       echo \"\$etcd_pod \$etcd_node\"
       kubectl -n kube-system exec \$etcd_pod -- sh -c \"export ETCDCTL_API=3 ETCDCTL_CACERT=/etc/kubernetes/pki/etcd/ca.crt ETCDCTL_CERT=/etc/kubernetes/pki/etcd/server.crt ETCDCTL_KEY=/etc/kubernetes/pki/etcd/server.key ETCDCTL_ENDPOINTS=https://127.0.0.1:2379; etcdctl member remove \$etcd_node; etcdctl member list\"
     "
    check::exit_code "$?" "delete" "remove $host etcd member"
  done

  for host in $MASTER_NODES $WORKER_NODES; do
    log::info "[delete]" "kubernetes node $host"

    local node_name
    node_name=$(echo -ne "${cluster_nodes}" | grep "${host}" | awk '{print $2}')
    if [[ "${node_name}" == "" ]]; then
      log::warning "[delete]" "kubernetes node $host not found."
#      read -r -t 10 -n 1 -p "Do you need to reset the node (y/n)? " answer
#      [[ -z "$answer" || "$answer" != "y" ]] && exit || echo
      while true ;do
      read -p "Are you sure to delete this node? [yes/no]:" result
      case $result in
              yes | y | YES | Y )
              break
              ;;
              no | n | NO | N ) exit 0
              break
              ;;
              *)
              ;;
      esac
      done
    else
      log::info "[delete]" "kubernetes drain $host"
      command::exec "${MGMT_NODE}" "kubectl drain $node_name --force --ignore-daemonsets --delete-local-data"
      check::exit_code "$?" "delete" "$host: kubernetes drain"

      log::info "[delete]" "kubernetes delete node $host"
      command::exec "${MGMT_NODE}" "kubectl delete node $node_name"
      check::exit_code "$?" "delete" "$host: kubernetes delete"
      sleep 3
    fi
    # 删除节点后重置节点
    reset::node "$host"

    del_hosts_cmd="${del_hosts_cmd}\nsed -i "/$host/d" /etc/hosts"
  done

  for host in $(echo -ne "${cluster_nodes}" | awk '{print $1}'); do
    log::info "[delete]" "$host: delete node hosts"
    command::exec "${host}" "
       $(echo -ne "${del_hosts_cmd}")
     "
    check::exit_code "$?" "delete" "delete node hosts"
  done

  for host in $MASTER_NODES $WORKER_NODES; do
    command::exec "${host}" "bash -c 'sleep 15 && reboot' &>/dev/null &"
    check::exit_code "$?" "reboot" "$host: Wait for 15s to restart"
  done

#  [ "$MASTER_NODES" != "" ] && config::etcd_snapshot
#  kube::status
}

function upgrade::cluster() {
  # 升级集群

  log::info "[upgrade]" "upgrade to $KUBE_VERSION"
  log::info "[upgrade]" "backup cluster"
  add::ops

  local stable_version="2"
  command::exec "127.0.0.1" "wget https://storage.googleapis.com/kubernetes-release/release/stable.txt -q -O -"
  get::command_output "stable_version" "$?" && stable_version="${stable_version#v}"

  local node_hosts="$MASTER_NODES $WORKER_NODES"
  if [[ "$node_hosts" == " " ]]; then
    command::exec "${MGMT_NODE}" "
      kubectl get node -o jsonpath='{range.items[*]}{.metadata.name } {end}'
    "
    get::command_output "node_hosts" "$?" exit
  fi

  local skip_plan=${SKIP_UPGRADE_PLAN,,}
  for host in ${node_hosts}; do
    log::info "[upgrade]" "node: $host"
    local local_version=""
    command::exec "${host}" "kubectl version --client --short | awk '{print \$3}'"
    get::command_output "local_version" "$?" && local_version="${local_version#v}"

    if [[ "${KUBE_VERSION}" != "latest" ]]; then
      if [[ "${KUBE_VERSION}" == "${local_version}" ]]; then
        log::warning "[check]" "The specified version(${KUBE_VERSION}) is consistent with the local version(${local_version})!"
        continue
      fi

      if [[ $(utils::version_to_number "$KUBE_VERSION") -lt $(utils::version_to_number "${local_version}") ]]; then
        log::warning "[check]" "The specified version($KUBE_VERSION) is less than the local version(${local_version})!"
        continue
      fi

      if [[ $(utils::version_to_number "$KUBE_VERSION") -gt $(utils::version_to_number "${stable_version}") ]]; then
        log::warning "[check]" "The specified version($KUBE_VERSION) is more than the stable version(${stable_version})!"
        continue
      fi
    else
      if [[ $(utils::version_to_number "${local_version}") -ge $(utils::version_to_number "${stable_version}") ]]; then
        log::warning "[check]" "The local version($local_version) is greater or equal to the stable version(${stable_version})!"
        continue
      fi
    fi

    command::exec "${MGMT_NODE}" "kubectl drain ${host} --ignore-daemonsets --delete-local-data"
    check::exit_code "$?" "upgrade" "drain ${host} node" "exit"
    sleep 5

    if [[ "${skip_plan}" == "false" ]]; then
      command::exec "${host}" "$(declare -f script::upgrage_kube); script::upgrage_kube 'init' '$KUBE_VERSION'"
      check::exit_code "$?" "upgrade" "plan and upgrade cluster on ${host}" "exit"
      command::exec "${host}" "$(declare -f utils::retry); utils::retry 10 kubectl get node"
      check::exit_code "$?" "upgrade" "${host}: upgrade" "exit"
      skip_plan=true
    else
      command::exec "${host}" "$(declare -f script::upgrage_kube); script::upgrage_kube 'node' '$KUBE_VERSION'"
      check::exit_code "$?" "upgrade" "upgrade ${host} node" "exit"
    fi

    command::exec "${MGMT_NODE}" "kubectl wait --for=condition=Ready node/${host} --timeout=120s"
    check::exit_code "$?" "upgrade" "${host} ready"
    sleep 5
    command::exec "${MGMT_NODE}" "$(declare -f utils::retry); utils::retry 6 kubectl uncordon ${host}"
    check::exit_code "$?" "upgrade" "uncordon ${host} node"
    sleep 5
  done

  kube::status
}

function transform::data {
  # 数据处理及限制
  if [ -n "$(echo ${MASTER_NODES} | grep -)" ]; then
    head1=$(echo ${MASTER_NODES} | awk -F '-' '{print $1}' | awk -F '.' '{print $1"."$2"."$3"."}')
    head2=$(echo ${MASTER_NODES} | awk -F '-' '{print $2}' | awk -F '.' '{print $1"."$2"."$3"."}')
    [ "${head1}" != "${head2}" ] && help::usage
    start=$(echo ${MASTER_NODES} | awk -F '-' '{print $1}' | awk -F '.' '{print $4}')
    end=$(echo ${MASTER_NODES} | awk -F '-' '{print $2}' | awk -F '.' '{print $4}')
    MASTER_NODES=$(for ((i=${start};i<=${end};i++));do printf ${head1}${i}, ;done)
  fi
  if [ -n "$(echo ${WORKER_NODES} | grep -)" ]; then
    head1=$(echo ${WORKER_NODES} | awk -F '-' '{print $1}' | awk -F '.' '{print $1"."$2"."$3"."}')
    head2=$(echo ${WORKER_NODES} | awk -F '-' '{print $2}' | awk -F '.' '{print $1"."$2"."$3"."}')
    [ "${head1}" != "${head2}" ] && help::usage
    start=$(echo ${WORKER_NODES} | awk -F '-' '{print $1}' | awk -F '.' '{print $4}')
    end=$(echo ${WORKER_NODES} | awk -F '-' '{print $2}' | awk -F '.' '{print $4}')
    WORKER_NODES=$(for ((i=${start};i<=${end};i++));do printf ${head1}${i}, ;done)
  fi
  if [ -n "$(echo ${HOST} | grep -)" ]; then
    head1=$(echo ${HOST} | awk -F '-' '{print $1}' | awk -F '.' '{print $1"."$2"."$3"."}')
    head2=$(echo ${HOST} | awk -F '-' '{print $2}' | awk -F '.' '{print $1"."$2"."$3"."}')
    [ "${head1}" != "${head2}" ] && help::usage
    start=$(echo ${HOST} | awk -F '-' '{print $1}' | awk -F '.' '{print $4}')
    end=$(echo ${HOST} | awk -F '-' '{print $2}' | awk -F '.' '{print $4}')
    HOST=$(for ((i=${start};i<=${end};i++));do printf ${head1}${i}, ;done)
  fi

  MASTER_NODES=$(echo "${MASTER_NODES}" | tr ',' ' ')
  WORKER_NODES=$(echo "${WORKER_NODES}" | tr ',' ' ')
  HOST=$(echo "${HOST}" | tr ',' ' ')

  if ! utils::is_element_in_array "$KUBE_CRI" docker containerd cri-o; then
    log::error "[limit]" "$KUBE_CRI is not supported, only [docker,containerd,cri-o]"
    exit 1
  fi

  [[ "$KUBE_CRI" != "docker" && "${OFFLINE_TAG:-}" == "1" ]] && {
    log::error "[limit]" "$KUBE_CRI is not supported offline, only docker"
    exit 1
  }
  [[ "$KUBE_CRI" == "containerd" && "${KUBE_CRI_ENDPOINT}" == "/var/run/dockershim.sock" ]] && KUBE_CRI_ENDPOINT="unix:///run/containerd/containerd.sock"
  [[ "$KUBE_CRI" == "cri-o" && "${KUBE_CRI_ENDPOINT}" == "/var/run/dockershim.sock" ]] && KUBE_CRI_ENDPOINT="unix:///var/run/crio/crio.sock"

  kubelet_nodeRegistration="nodeRegistration:
  criSocket: ${KUBE_CRI_ENDPOINT:-/var/run/dockershim.sock}
  kubeletExtraArgs:
    runtime-cgroups: /system.slice/${KUBE_CRI//-/}.service
$(if [[ "${KUBE_VERSION}" == "latest" || "${KUBE_VERSION}" == *"1.21"* ]]; then
    echo "    pod-infra-container-image: $KUBE_IMAGE_REPO/pause:3.4.1"
  else
    echo "    pod-infra-container-image: $KUBE_IMAGE_REPO/pause:3.2"
  fi)
"
}

function help::usage {
  # 使用帮助

  cat <<EOF

Install kubernetes cluster using kubeadm.
Documentation: https://github.com/kongyu666/kubeeasy

Usage:
  $(basename "$0") [command]

Available Commands:
  install            Install Service cluster.

Flags:
  -h, --help               help for kubeeasy

Example:
  [install k8s cluster]
  $0 install k8s \\
  --master 10.24.2.31 \\
  --worker 10.24.2.43,10.24.2.34,10.24.2.40 \\
  --user root \\
  --password 000000 \\
  --version 1.21.3

Use "$(basename "$0") [command] --help" for more information about a command.
EOF
  exit 1
}

function help::details {
  # 使用帮助

  cat <<EOF

Install kubernetes cluster using kubeadm.
Documentation: https://github.com/kongyu666/kubeeasy

Usage:
  $(basename "$0") [command]

Available Commands:
  install         Install cluster service.
  reset           Reset Kubernetes cluster.
  create          create service.
  push            push packages.
  add             Add service/node to the cluster.
  del             Remove node from the cluster.

Flag:
  -m,--master          master node, example: 10.24.2.10
  -w,--worker          work node, example: 10.24.2.11,10.24.2.12 or 10.24.2.10-10.24.2.20
  -host,--host         other node, example: 10.24.2.11,10.24.2.12 or 10.24.2.10-10.24.2.20
  -vip,--virtual-ip    keepalived service: virtual ipaddress, example: 10.24.2.100
  --kube-vip-port      kube apiserver port, default: ${KUBE_APISERVER_PORT}
  -u,--user            ssh user, default: ${SSH_USER}
  -p,--password        ssh password, default: ${SSH_PASSWORD}
  -P,--port            ssh port, default: ${SSH_PORT}
  -v,--version         kube version, default: ${KUBE_VERSION}
  -d,--docker-data     docker store data root, default: ${DOCKER_DATA_ROOT}
  --pod-cidr           kube pod subnet, default: ${KUBE_POD_SUBNET}
  -ui,--ui             cluster web ui, choose: [kuboard,kubesphere], default: ${KUBE_UI}
  -s,--storage         cluster storage, choose: [openebs,longhorn]
  -vm,--virt           cluster virt, choose: [kubevirt]
  -U,--upgrade-kernel  upgrade kernel
  -of,--offline-file   specify the offline package file to load

Example:
  [install dependencies package cluster]
  $0 install dependencies \\
  --host 10.24.2.31,10.24.2.14,10.24.2.46 \\
  --user root \\
  --password 000000 \\
  --offline-file dependencies/centos-7-rpms.tar.gz

  [upgrade kernel cluster online]
  $0 install upgrade-kernel \\
  --master 10.24.2.31 \\
  --worker 10.24.2.43,10.24.2.34,10.24.2.40 \\
  --user root \\
  --password 000000

  [upgrade kernel cluster offline]
  $0 install upgrade-kernel \\
  --master 10.24.2.31 \\
  --worker 10.24.2.43,10.24.2.34,10.24.2.40 \\
  --user root \\
  --password 000000 \\
  --offline-file kubeeasy.tar.gz

  [install k8s cluster online]
  $0 install kubernetes \\
  --master 10.24.2.31 \\
  --worker 10.24.2.43,10.24.2.34,10.24.2.40 \\
  --user root \\
  --password 000000 \\
  --version 1.21.3 \\
  --pod-cidr 10.244.0.0/16

  [install k8s cluster offline]
  $0 install kubernetes \\
  --master 10.24.2.31 \\
  --worker 10.24.2.43,10.24.2.34,10.24.2.40 \\
  --user root \\
  --password 000000 \\
  --version 1.21.3 \\
  --offline-file kubeeasy.tar.gz

  [install k8s ha cluster online]
  $0 install kubernetes \\
  --master 10.24.2.31,10.24.2.14,10.24.2.46 \\
  --worker 10.24.2.43,10.24.2.34,10.24.2.40 \\
  --user root \\
  --password 000000 \\
  --version 1.21.3 \\
  --virtual-ip 10.24.2.50

  [install k8s ha cluster offline]
  $0 install kubernetes \\
  --master 10.24.2.31,10.24.2.14,10.24.2.46 \\
  --worker 10.24.2.43,10.24.2.34,10.24.2.40 \\
  --user root \\
  --password 000000 \\
  --version 1.21.3 \\
  --virtual-ip 10.24.2.50 \\
  --offline-file kubeeasy.tar.gz

  [reset k8s cluster]
  $0 reset \\
  --user root \\
  --password 000000

  [reset force k8s node]
  $0 reset --force \\
  --master 10.24.2.31 \\
  --worker 10.24.2.43 \\
  --user root \\
  --password 000000

  [add node] ##Only add worker
  $0 add \\
  --master 10.24.2.31,10.24.2.32
  --user root \\
  --password 000000

  $0 add \\
  --worker 10.24.2.31,10.24.2.32
  --user root \\
  --password 000000

  [del node]
  $0 del \\
  --master 10.24.2.31,10.24.2.14,10.24.2.46 \\
  --worker 10.24.2.43,10.24.2.34,10.24.2.40 \\
  --user root \\
  --password 000000

  [add storage]
  $0 add --storage openebs
  $0 add --storage longhorn

  [add dashboard]
  $0 add --ui kubesphere

  [add virt]
  $0 add --virt kubevirt

  [push file]
  $0 push virt-file \\
  --offline-file kubevirt.tar.gz \\
  --master 10.24.2.14,10.24.2.29,10.24.2.34 \\
  --worker 10.24.2.23,10.24.2.49,10.24.2.43 \\
  --user root \\
  --password 000000

  [push image file and load]
  $0 push image-file \\
  --offline-file test-images.tar.gz \\
  --master 10.24.2.14,10.24.2.29,10.24.2.34 \\
  --worker 10.24.2.23,10.24.2.49,10.24.2.43 \\
  --user root \\
  --password 000000

  ###<=    other   >=###
  [install keepalived and haproxy]
  $0 install keepalived \\
  --virtual-ip 10.24.2.50 \\
  --master 10.24.2.31,10.24.2.14,10.24.2.46 \\
  --user root \\
  --password 000000

  [create chronyc time]
  $0 create time \\
  --master 10.24.2.31 \\
  --worker 10.24.2.43,10.24.2.34,10.24.2.40,10.24.2.14,10.24.2.46 \\
  --user root \\
  --password 000000

  [create ssh keygen]
  $0 create ssh-keygen \\
  --master 10.24.2.31 \\
  --worker 10.24.2.43,10.24.2.34,10.24.2.40,10.24.2.14,10.24.2.46 \\
  --user root \\
  --password 000000

  [mount and mkfs disk]
  $0 create mount-disk \\
  --host 192.168.200.11-192.168.200.13 \\
  --disk sdb \\
  --mount-dir /data/data \\
  --user root \\
  --password 000000

  [set root password]
  $0 create password \\
  --host 192.168.200.11-192.168.200.13 \\
  --user root \\
  --password 000000 \\
  --new-password 123456

  [check node ssh]
  $0 check ssh \\
  --host 10.24.2.31,10.24.2.14,10.24.2.46 \\
  --user root \\
  --password 000000

  [check node ping]
  $0 check ping \\
  --host 10.24.2.31-10.24.2.46

  More features are expected.

EOF
  exit 1
}


######################################################################################################
# main
######################################################################################################

[ "$#" == "0" ] && help::usage

while [ "${1:-}" != "" ]; do
  case $1 in
  -h | --help)
    HELP_TAG=1
    ;;
  install)
    INSTALL_TAG=1
    ;;
  create)
    CREATE_TAG=1
    ;;
  check)
    CHECK_TAG=1
    ;;
  push)
    PUSH_TAG=1
    ;;
  reset)
    RESET_TAG=1
    ;;
  add)
    ADD_TAG=1
    ;;
  del)
    DEL_TAG=1
    ;;
  renew-cert)
    RENEW_CERT_TAG=1
    ;;
  upgrade)
    UPGRADE_TAG=1
    ;;
  update)
    UPDATE_TAG=1
    ;;
  # install
  k8s | kubernetes)
    KUBE_INSTALL_TAG=1
    ;;
  depend | dependencies)
    DEPEND_INSTALL_TAG=1
    ;;
  keepalived)
    VIP_INSTALL_TAG=1
    ;;
  upgrade-kernel)
    UPGRADE_KERNEL_TAG=1
    ;;
  # create
  chrony | time)
    TIME_CREATE_TAG=1
    ;;
  ssh-keygen)
    SSH_CREATE_TAG=1
    ;;
  mount | mount-disk)
    DISK_CREATE_TAG=1
    ;;
  pw | password)
    PW_CREATE_TAG=1
    ;;
  # check
  ssh)
    SSH_CHECK_TAG=1
    ;;
  ping)
    PING_CHECK_TAG=1
    ;;
  # reset
  --force)
    RESET_FORCE_TAG=1
    ;;
  # push
  ksf | kubesphere-file)
    KUBESPHERE_PUSH_TAG=1
    ;;
  sf | storage-file)
    STORAGE_PUSH_TAG=1
    ;;
  vf | kubevirt-file)
    KUBEVIRT_PUSH_TAG=1
    ;;
  if | image-file)
    IMAGE_PUSH_TAG=1
    ;;
  # add
  -i | --ingress)
    shift
    INGRESS_TAG=1
    KUBE_INGRESS=${1:-$KUBE_INGRESS}
    ;;
  -M | --monitor)
    shift
    MONITOR_TAG=1
    KUBE_MONITOR=${1:-$KUBE_MONITOR}
    ;;
  -l | --log)
    shift
    LOG_TAG=1
    KUBE_LOG=${1:-$KUBE_LOG}
    ;;
  -s | --storage)
    shift
    STORAGE_TAG=1
    KUBE_STORAGE=${1:-$KUBE_STORAGE}
    ;;
  -ui | --ui)
    shift
    UI_TAG=1
    KUBE_UI=${1:-$KUBE_UI}
    ;;
  -a | --addon)
    shift
    ADDON_TAG=1
    KUBE_ADDON=${1:-$KUBE_ADDON}
    ;;
  -vm | --virt)
    shift
    VIRT_TAG=1
    KUBE_VIRT=${1:-$KUBE_VIRT}
    ;;
  # other
  -m | --master)
    shift
    MASTER_NODES=${1:-$MASTER_NODES}
    ;;
  -w | --worker)
    shift
    WORKER_NODES=${1:-$WORKER_NODES}
    ;;
  -host | --host)
    shift
    HOST=${1:-$HOST}
    ;;
  -vip | --virtual-ip)
    shift
    VIRTUAL_IP=${1}
    ;;
  --kube-vip-port)
    shift
    KUBE_APISERVER_PORT=${1:-$KUBE_APISERVER_PORT}
    ;;
  -u | --user)
    shift
    SSH_USER=${1:-$SSH_USER}
    ;;
  -p | --password)
    shift
    SSH_PASSWORD=${1:-$SSH_PASSWORD}
    ;;
  -np | --new-password)
    shift
    NEW_SSH_PASSWORD=${1:-$NEW_SSH_PASSWORD}
    ;;
  --private-key)
    shift
    SSH_PRIVATE_KEY=${1:-$SSH_SSH_PRIVATE_KEY}
    ;;
  -P | --port)
    shift
    SSH_PORT=${1:-$SSH_PORT}
    ;;
  -v | --version)
    shift
    KUBE_VERSION=${1:-$KUBE_VERSION}
    ;;
  -d | --docker-data | --mount-dir)
    shift
    DOCKER_DATA_ROOT=${1:-$DOCKER_DATA_ROOT}
    ;;
  --disk)
    shift
    MOUNT_DISK=${1:-}
    ;;
  -n | --network)
    shift
    NETWORK_TAG=1
    KUBE_NETWORK=${1:-$KUBE_NETWORK}
    ;;
  --pod-cidr)
    shift
    KUBE_POD_SUBNET=${1:-$KUBE_POD_SUBNET}
    ;;
  --cri)
    shift
    KUBE_CRI=${1:-$KUBE_CRI}
    ;;
  --cri-version)
    shift
    KUBE_CRI_VERSION=${1:-$KUBE_CRI_VERSION}
    ;;
  --cri-endpoint)
    shift
    KUBE_CRI_ENDPOINT=${1:-$KUBE_CRI_ENDPOINT}
    ;;
  -of | --offline-file)
    shift
    OFFLINE_TAG=1
    OFFLINE_FILE=${1:-$OFFLINE_FILE}
    ;;
  --10years)
    CERT_YEAR_TAG=1
    ;;
  --sudo)
    SUDO_TAG=1
    ;;
  --sudo-user)
    shift
    SUDO_USER=${1:-$SUDO_USER}
    ;;
  --sudo-password)
    shift
    SUDO_PASSWORD=${1:-}
    ;;
  *)
    help::usage
    exit 1
    ;;
  esac
  shift
done

# 开始
[ "${HELP_TAG}" != "1" ] && log::info "[start]" "bash $0 ${SCRIPT_PARAMETER//${SSH_PASSWORD:-${SUDO_PASSWORD:-}}/******}"

# 数据处理
[ "${HELP_TAG}" != "1" ] && transform::data

# 预检
[ "${HELP_TAG}" != "1" ] && check::preflight

# 动作
if [[ "${INSTALL_TAG:-}" == "1" ]]; then
  # 安装
  [[ "${KUBE_INSTALL_TAG:-}" == "1" ]] && {
    [[ "$MASTER_NODES" == "" ]] && MASTER_NODES="127.0.0.1"
    init::cluster
    install=1
  }
  [[ "${DEPEND_INSTALL_TAG:-}" == "1" ]] && {
    offline::cluster_depend
    install=1
  }
  [[ "${VIP_INSTALL_TAG:-}" == "1" ]] && {
    install::ha-service
    install=1
  }
  [[ "${UPGRADE_KERNEL_TAG:-}" == "1" ]] && {
    [[ "${OFFLINE_TAG}" == "1" ]] && offline::cluster
    init::upgrade_kernel
    install=1
  }
  [[ "${install:-}" != "1" ]] && help::usage
elif [[ "${CREATE_TAG:-}" == "1" ]]; then
  # 创建
  [[ "${TIME_CREATE_TAG:-}" == "1" ]] && {
    install::time
    create=1
  }
  [[ "${SSH_CREATE_TAG:-}" == "1" ]] && {
    install::ssh_keygen
    create=1
  }
  [[ "${DISK_CREATE_TAG:-}" == "1" ]] && {
    mount::disk
    create=1
  }
  [[ "${PW_CREATE_TAG:-}" == "1" ]] && {
    create::password
    create=1
  }
  [[ "${create:-}" != "1" ]] && help::usage

elif [[ "${CHECK_TAG:-}" == "1" ]]; then
  # 检查
  [[ "${SSH_CHECK_TAG:-}" == "1" ]] && {
    check::ssh_conn_new
    check=1
  }
  [[ "${PING_CHECK_TAG:-}" == "1" ]] && {
    check::ping_conn
    check=1
  }

  [[ "${check:-}" != "1" ]] && help::usage

elif [[ "${ADD_TAG:-}" == "1" ]]; then
  [[ "${NETWORK_TAG:-}" == "1" ]] && {
    add::network
    add=1
  }
  [[ "${STORAGE_TAG:-}" == "1" ]] && {
    add::storage
    add=1
  }
  [[ "${UI_TAG:-}" == "1" ]] && {
    add::ui
    add=1
  }
  [[ "${VIRT_TAG:-}" == "1" ]] && {
    add::virt
    add=1
  }
  [[ "$MASTER_NODES" != "" || "$WORKER_NODES" != "" ]] && {
    add::node
    add=1
  }
  [[ "${add:-}" != "1" ]] && help::usage
elif [[ "${PUSH_TAG:-}" == "1" ]]; then
  # 分发文件
  [[ "${KUBESPHERE_PUSH_TAG:-}" == "1" ]] && {
    offline::cluster_kubesphere
    push=1
  }
  [[ "${STORAGE_PUSH_TAG:-}" == "1" ]] && {
    offline::cluster_storage
    push=1
  }
  [[ "${KUBEVIRT_PUSH_TAG:-}" == "1" ]] && {
    offline::cluster_kubevirt
    push=1
  }
  [[ "${IMAGE_PUSH_TAG:-}" == "1" ]] && {
    offline::cluster_images
    push=1
  }
  [[ "${push:-}" != "1" ]] && help::usage

elif [[ "${DEL_TAG:-}" == "1" ]]; then
  if [[ "$MASTER_NODES" != "" || "$WORKER_NODES" != "" ]]; then del::node; else help::usage; fi
elif [[ "${RESET_TAG:-}" == "1" ]]; then
  # 重置k8s
  if [[ "${RESET_FORCE_TAG:-}" == "1" ]]; then
    reset::cluster_force
  else
    reset::cluster
  fi
elif [[ "${UPGRADE_TAG:-}" == "1" ]]; then
  upgrade::cluster
elif [[ "${HELP_TAG:-}" == "1" ]]; then
  # 帮助
  help::details
else
  help::usage
fi
