#!/usr/bin/env bash
# 参考 https://raw.githubusercontent.com/lework/kainstall/v1.4.9/kainstall-centos.sh

[[ -n $DEBUG ]] && set -x
set -o errtrace         # Make sure any error trap is inherited
set -o nounset          # Disallow expansion of unset variables
set -o pipefail         # Use last non-zero exit code in a pipeline

######################################################################################################
# 软件版本
######################################################################################################
# 安装cri的源, 官方源: https://download.docker.com/linux/centos/docker-ce.repo
CRI_REPO="http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo"
# 如: 3:25.0.5 | 3:26.1.3 | 使用"yum list docker-ce --showduplicates | sort -r"查看版本列表
CRI_VERSION="${CRI_VERSION:-latest}"
# 查看版本列表: https://kubernetes.io/zh-cn/releases/patch-releases/
KUBE_VERSION="${KUBE_VERSION:-1.28.13}"
# 查看版本列表: https://github.com/helm/helm/releases
HELM_VERSION="${HELM_VERSION:-3.15.4}"
# 查看版本列表: https://github.com/flannel-io/flannel/releases
FLANNEL_VERSION="${FLANNEL_VERSION:-0.25.6}"
# 查看版本列表: https://github.com/kubernetes/ingress-nginx/releases
INGRESS_VERSION="${INGRESS_VERSION:-4.10.4}"
# 查看版本列表: https://artifacthub.io/packages/helm/rancher-stable/rancher | https://github.com/rancher/rancher/releases
RANCHER_VERSION="${RANCHER_VERSION:-2.8.5}"

######################################################################################################
# 集群配置
######################################################################################################
# k8s节点hostname前缀
HOSTNAME_PREFIX="k8s"
# cri镜像源 | https://docker.nju.edu.cn/ | https://dockerproxy.com | https://docker.mirrors.ustc.edu.cn
CRI_REGISTRY_MIRRORS="https://registry-1.docker.io"
# cri数据根目录
CRI_DATA_ROOT="/var/lib/docker"
# cri endpoint | /var/run/containerd/containerd.sock | /var/run/dockershim.sock
CRI_ENDPOINT="/var/run/containerd/containerd.sock"
# k8s集群名称
KUBE_CLUSTER_NAME="k8s"
# k8s集群域名
KUBE_DNS_DOMAIN="cluster.local"
# api-server域名
KUBE_API_SERVER="apiserver.$KUBE_DNS_DOMAIN"
# k8s中pod的网段
KUBE_POD_SUBNET="10.244.0.0/16"
# k8s中service的网段
KUBE_SERVICE_SUBNET="10.96.0.0/16"
# flannel的网络模式
KUBE_FLANNEL_TYPE="vxlan"
# nginx代理ingress的端口配置
NGINX_HTTP_PORT="80"
######################################################################################################
# 安装k8s的镜像加速地址
######################################################################################################
# k8s.gcr.io 加速地址: gcr.nju.edu.cn/google-containers
K8S_GCR_IO_REPO="k8s.gcr.io"
# registry.k8s.io 加速地址: k8s.mirror.nju.edu.cn
REGISTRY_K8S_IO_REPO="registry.k8s.io"
# quay.io 加速地址: quay.nju.edu.cn
QUAY_IO_REPO="quay.io"
# docker.io 加速地址: docker.m.daocloud.io | dockerproxy.com
DOCKER_IO_REPO="docker.io"
# kubeadmcfg配置文件的 imageRepository | registry.aliyuncs.com/google_containers
KUBEADMCFG_REPO="registry.k8s.io"

######################################################################################################
# 脚本设置
######################################################################################################
# 配置支持的操作系统(多个用空格分隔)
OS_SUPPORT="rocky9.0 rocky9.1 rocky9.2 rocky9.3 rocky9.4 rocky9.5"
# 执行 init 指令
INIT_TAG="0"
# 定义的master和worker节点地址，以逗号分隔
MASTER_NODES="${MASTER_NODES:-}"
WORKER_NODES="${WORKER_NODES:-}"
# ssh远程连接信息
SSH_PORT="${SSH_PORT:-22}"
SSH_USER="${SSH_USER:-root}"
SSH_PASSWORD="${SSH_PASSWORD:-}"
SSH_PRIVATE_KEY="${SSH_PRIVATE_KEY:-}"
# sudo用户名密码
SUDO_TAG="0"
SUDO_USER="${SUDO_USER:-root}"
SUDO_PASSWORD="${SUDO_PASSWORD:-}"
# 升级内核
UPGRADE_KERNEL_TAG="0"
# 证书期限为10年
CERT_YEAR_TAG="1"
# 下载文件临时目录
DOWNLOAD_DIR="/tmp/k8s-download"
# 日志文件
LOG_FILE="/tmp/k8s-install-rocky9-$(date +"%H%M%S").log"
# 安装步骤执行标记文件
SETUP_STATE_FILE="/tmp/k8s-install-setup.txt"
# SSH 选项
SSH_OPTIONS="-o ConnectTimeout=600 -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null"
# github | https://mirror.ghproxy.com/
GITHUB_PROXY="${GITHUB_PROXY:-}"
# 当前脚本文件名称(仅显示)
SCRIPT_FILE_NAME="k8s-install-rocky9.sh"

######################################################################################################
# 临时变量
######################################################################################################
# 定义在哪个节点上执行
MGMT_NODE="127.0.0.1"
# pause镜像版本
PAUSE_VERSION="3.7"
# k8s共用配置片段
KUBELET_NODE_REGISTRATION=""
# 存储命令执行返回值的的临时变量
COMMAND_OUTPUT=""
# 存储所有命令行参数
SCRIPT_PARAMETER="$*"
# MGMT_NODE机器的内网IP
MGMT_NODE_IP=""
# 跳过更新系统源
SKIP_SET_OS_REPO=${SKIP_SET_OS_REPO:-false}
# 普通日志信息
ACCESS_INFO="\n\033[32mACCESS Summary: \033[0m\n  "
# 错误日志信息
ERROR_INFO="\n\033[31mERROR Summary: \033[0m\n  "
# 执行过程中存在错误的标识
HAS_ERROR="0"

######################################################################################################
# 通用函数
######################################################################################################

trap trap::info 1 2 3 15 EXIT

# 信号处理
function trap::info() {
    [[ ${#ERROR_INFO} -gt 37 ]] && echo -e "$ERROR_INFO"
    [[ ${#ACCESS_INFO} -gt 38 ]] && echo -e "$ACCESS_INFO"
    [ -f "$LOG_FILE" ] && echo -e "\n\n  See detailed log >>> cat $LOG_FILE \n\n"
    trap '' EXIT
    exit
}

# 错误日志
function log::error() {
    HAS_ERROR="1"
    local item; item="[$(date +'%Y-%m-%dT%H:%M:%S.%N%z')]: \033[31mERROR:   \033[0m$*"
    ERROR_INFO="${ERROR_INFO}${item}\n  "
    echo -e "${item}" | tee -a "$LOG_FILE"
}

# 基础日志
function log::info() {
    printf "[%s]: \033[32mINFO:    \033[0m%s\n" "$(date +'%Y-%m-%dT%H:%M:%S.%N%z')" "$*" | tee -a "$LOG_FILE"
}

# 警告日志
function log::warning() {
    printf "[%s]: \033[33mWARNING: \033[0m%s\n" "$(date +'%Y-%m-%dT%H:%M:%S.%N%z')" "$*" | tee -a "$LOG_FILE"
}

# 访问信息
function log::access() {
    ACCESS_INFO="${ACCESS_INFO}$*\n  "
    printf "[%s]: \033[32mINFO:    \033[0m%s\n" "$(date +'%Y-%m-%dT%H:%M:%S.%N%z')" "$*" | tee -a "$LOG_FILE"
}

# 执行日志
function log::exec() {
    printf "[%s]: \033[34mEXEC:    \033[0m%s\n" "$(date +'%Y-%m-%dT%H:%M:%S.%N%z')" "$*" >> "$LOG_FILE"
}

# 版本号转数字
function utils::version_to_number() {
    echo "$@" | awk -F. '{ printf("%d%03d%03d%03d\n", $1,$2,$3,$4); }';
}

# 重试
function utils::retry() {
    local retries=$1
    shift
    local count=0
    until eval "$*"; do
        exit=$?
        wait=$((2 ** count))
        count=$((count + 1))
        if [ "$count" -lt "$retries" ]; then
            echo "Retry $count/$retries exited $exit, retrying in $wait seconds..."
            sleep $wait
        else
            echo "Retry $count/$retries exited $exit, no more retries left."
            return $exit
        fi
    done
    return 0
}

# 转义引号
function utils::quote() {
    # shellcheck disable=SC2046
    if [ $(echo "$*" | tr -d "\n" | wc -c) -eq 0 ]; then
        echo "''"
    elif [ $(echo "$*" | tr -d "[a-z][A-Z][0-9]:,.=~_/\n-" | wc -c) -gt 0 ]; then
        printf "%s" "$*" | sed -e "1h;2,\$H;\$!d;g" -e "s/'/\'\"\'\"\'/g" | sed -e "1h;2,\$H;\$!d;g" -e "s/^/'/g" -e "s/$/'/g"
    else
        echo "$*"
    fi
}

# 下载文件
function utils::download_file() {
    local url="$1"
    local dest="$2"
    local unzip_tag="${3:-1}"
    local dest_dirname; dest_dirname=$(dirname "$dest")
    local filename; filename=$(basename "$dest")
    log::info "[download]" "${filename}"
    command::exec "${MGMT_NODE}" "
        set -e
        if [ ! -f \"${dest}\" ]; then
            [ ! -d \"${dest_dirname}\" ] && mkdir -pv \"${dest_dirname}\"
            wget --timeout=10 --waitretry=3 --tries=5 --retry-connrefused --no-check-certificate \"${url}\" -O \"${dest}\"
            if [[ \"${unzip_tag}\" == \"unzip\" ]]; then
                command -v unzip 2>/dev/null || yum install -y unzip
                unzip -o \"${dest}\" -d \"${dest_dirname}\"
            fi
        else
            echo \"${dest} is exists!\"
        fi
    "
    local status="$?"
    check::exit_code "$status" "download" "${filename}" "exit"
    return "$status"
}

# 判断是否在数组中存在元素
function utils::is_element_in_array() {
    local -r element="${1}"
    local -r array=("${@:2}")
    local walker=''
    for walker in "${array[@]}"; do
        [[ "${walker}" = "${element}" ]] && return 0
    done
    return 1
}

# 执行命令
function command::exec() {
    local host=${1:-}
    shift
    local command="$*"
    if [[ "${SUDO_TAG:-}" == "1" ]]; then
        sudo_options="sudo -H -n -u ${SUDO_USER}"
        if [[ "${SUDO_PASSWORD:-}" != "" ]]; then
            sudo_options="${sudo_options// -n/} -p \"\" -S <<< \"${SUDO_PASSWORD}\""
        fi
        command="$sudo_options bash -c $(utils::quote "$command")"
    fi
    command="$(utils::quote "$command")"
    if [[ "${host}" == "127.0.0.1" ]]; then
        # 本地执行
        log::exec "[command]" "bash -c $(printf "%s" "${command//${SUDO_PASSWORD:-}/zzzzzz}")"
        # shellcheck disable=SC2094
        COMMAND_OUTPUT=$(eval bash -c "${command}" 2>> "$LOG_FILE" | tee -a "$LOG_FILE")
        local status=$?
    else
        # 远程执行
        local ssh_cmd="ssh"
        if [[ "${SSH_PASSWORD}" != "" ]]; then
            ssh_cmd="sshpass -p \"${SSH_PASSWORD}\" ${ssh_cmd}"
        elif [[ "$SSH_PRIVATE_KEY" != "" ]]; then
            [ -f "${SSH_PRIVATE_KEY}" ] || { log::error "[exec]" "ssh private_key:${SSH_PRIVATE_KEY} not found."; exit 1; }
            ssh_cmd="${ssh_cmd} -i $SSH_PRIVATE_KEY"
        fi
        log::exec "[command]" "${ssh_cmd//${SSH_PASSWORD:-}/zzzzzz} ${SSH_OPTIONS} ${SSH_USER}@${host} -p ${SSH_PORT} bash -c $(printf "%s" "${command//${SUDO_PASSWORD:-}/zzzzzz}")"
        # shellcheck disable=SC2094
        COMMAND_OUTPUT=$(eval "${ssh_cmd} ${SSH_OPTIONS} ${SSH_USER}@${host} -p ${SSH_PORT}" bash -c '"${command}"' 2>> "$LOG_FILE" | tee -a "$LOG_FILE")
        local status=$?
    fi
    return $status
}

# 拷贝文件
function command::scp() {
    local host=${1:-}
    local src=${2:-}
    local dest=${3:-/tmp/}
    if [[ "${host}" == "127.0.0.1" ]]; then
        local command="cp -rf ${src} ${dest}"
        log::exec "[command]" "bash -c \"${command}\""
        # shellcheck disable=SC2094
        COMMAND_OUTPUT=$(bash -c "${command}" 2>> "$LOG_FILE" | tee -a "$LOG_FILE")
        local status=$?
    else
        local scp_cmd="scp"
        if [[ "${SSH_PASSWORD}" != "" ]]; then
            scp_cmd="sshpass -p \"${SSH_PASSWORD}\" ${scp_cmd}"
        elif [[ "$SSH_PRIVATE_KEY" != "" ]]; then
            [ -f "${SSH_PRIVATE_KEY}" ] || { log::error "[exec]" "ssh private_key:${SSH_PRIVATE_KEY} not found."; exit 1; }
            scp_cmd="${scp_cmd} -i $SSH_PRIVATE_KEY"
        fi
        log::exec "[command]" "${scp_cmd} ${SSH_OPTIONS} -P ${SSH_PORT} -r ${src} ${SSH_USER}@${host}:${dest}" >> "$LOG_FILE"
        # shellcheck disable=SC2094
        COMMAND_OUTPUT=$(eval "${scp_cmd} ${SSH_OPTIONS} -P ${SSH_PORT} -r ${src} ${SSH_USER}@${host}:${dest}" 2>> "$LOG_FILE" | tee -a "$LOG_FILE")
        local status=$?
    fi
    return $status
}

# 检查命令是否存在
function check::command_exists() {
    local cmd=${1}
    local package=${2}
    if command -V "$cmd" > /dev/null 2>&1; then
        log::info "[check]" "$cmd command exists."
    else
        log::warning "[check]" "I require $cmd but it's not installed."
        log::warning "[check]" "install $package package."
        command::exec "127.0.0.1" "yum install -y ${package}"
        check::exit_code "$?" "check" "$package install" "exit"
    fi
}

# 检查用到的命令
function check::command() {
    check::command_exists ssh openssh-clients
    check::command_exists sshpass sshpass
    check::command_exists wget wget
}

# 检查ssh连通性
function check::ssh_conn() {
    for host in $MASTER_NODES $WORKER_NODES; do
        [ "$host" == "127.0.0.1" ] && continue
        command::exec "${host}" "echo 0"
        check::exit_code "$?" "check" "ssh $host connection" "exit"
    done
}

# 检查os系统支持
function check::os() {
    log::info "[check]" "os support: ${OS_SUPPORT}"
    for host in $MASTER_NODES $WORKER_NODES; do
        command::exec "${host}" "
            [ -f /etc/os-release ] && source /etc/os-release
            echo client_os:\${ID:-}\${VERSION_ID:-}
            if [[ \"${OS_SUPPORT}\" == *\"\${ID:-}\${VERSION_ID:-}\"* ]]; then
                exit 0
            fi
            exit 1
        "
        check::exit_code "$?" "check" "$host os support" "exit"
    done
}

# 检查os kernel版本
function check::kernel() {
    local version=${1:-}
    log::info "[check]" "kernel version not less than ${version}"
    version=$(echo "${version}" | awk -F. '{ printf("%d%03d%03d\n", $1,$2,$3); }')
    for host in $MASTER_NODES $WORKER_NODES; do
        command::exec "${host}" "
            kernel_version=\$(uname -r)
            kernel_version=\$(echo \${kernel_version/-*} | awk -F. '{ printf(\"%d%03d%03d\n\", \$1,\$2,\$3); }')
            echo kernel_version \${kernel_version}
            [[ \${kernel_version} -ge ${version} ]] && exit 0 || exit 1
        "
        check::exit_code "$?" "check" "$host kernel version" "exit"
    done
}

# 检查返回码
function check::exit_code() {
    local code=${1:-}
    local app=${2:-}
    local desc=${3:-}
    local exit_script=${4:-}
    if [[ "${code}" == "0" ]]; then
        log::info "[${app}]" "${desc} succeeded."
    else
        log::error "[${app}]" "${desc} failed."
        [[ "$exit_script" == "exit" ]] && exit "$code"
    fi
}

# 获取命令的返回值
function get::command_output() {
    local app="$1"
    local status="$2"
    local is_exit="${3:-}"
    if [[ "$status" == "0" && "${COMMAND_OUTPUT}" != "" ]]; then
        log::info "[command]" "get $app value succeeded."
        eval "$app=\"${COMMAND_OUTPUT}\""
    else
        log::error "[command]" "get $app value failed."
        [[ "$is_exit" == "exit" ]] && exit "$status"
    fi
    return "$status"
}

# 数据处理及限制
function transform::data() {
    MASTER_NODES=$(echo "${MASTER_NODES}" | tr ',' ' ')
    WORKER_NODES=$(echo "${WORKER_NODES}" | tr ',' ' ')
    mkdir -p $DOWNLOAD_DIR
}

######################################################################################################
# k8s操作函数
######################################################################################################

# 初始化 KUBELET_NODE_REGISTRATION
function kube::init_kubelet_node_registration() {
    KUBELET_NODE_REGISTRATION="
nodeRegistration:
  criSocket: unix://${CRI_ENDPOINT}
  kubeletExtraArgs:
    runtime-cgroups: /system.slice/docker.service
    pod-infra-container-image: ${REGISTRY_K8S_IO_REPO}/pause:${PAUSE_VERSION}
"
}

# 应用manifest
function kube::apply() {
    local file=$1
    log::info "[apply]" "$file"
    command::exec "${MGMT_NODE}" "
        $(declare -f utils::retry)
        if [ -f \"$file\" ]; then
            utils::retry 6 kubectl apply --wait=true --timeout=10s --force -f \"$file\"
        else
            utils::retry 6 \"cat <<EOF | kubectl apply --wait=true --timeout=10s --force -f -
\$(printf \"%s\" \"${2:-}\")
EOF\"
        fi
    "
    local status="$?"
    check::exit_code "$status" "apply" "add $file" "exit"
    return "$status"
}

# 等待资源完成
function kube::wait() {
    local app=$1
    local namespace=$2
    local resource=$3
    local selector=${4:-}
    sleep 3
    log::info "[waiting]" "waiting $app"
    command::exec "${MGMT_NODE}" "
        $(declare -f utils::retry)
        utils::retry 6 kubectl wait --namespace ${namespace} \
            --for=condition=ready ${resource} \
            --selector=$selector \
            --timeout=60s
    "
    local status="$?"
    check::exit_code "$status" "waiting" "$app ${resource} ready"
    return "$status"
}

######################################################################################################
# 安装函数
######################################################################################################

# 预检
function check::preflight() {
    # check command
    check::command
    # check ssh conn
    check::ssh_conn
    # check os
    check::os
    # check api-server conn
    if [[ $(( ${ADD_TAG:-0} + ${DEL_TAG:-0} + ${UPGRADE_TAG:-0} + ${RENEW_CERT_TAG:-0} )) -gt 0 ]]; then
        check::apiserver_conn
    fi
}

# 升级内核
function script::upgrade_kernel() {
    # 载入公钥
    rpm --import https://www.elrepo.org/RPM-GPG-KEY-elrepo.org
    # 安装ELRepo
    yum install -y https://www.elrepo.org/elrepo-release-9.el9.elrepo.noarch.rpm
    # 设置国内源
    sed -i 's/mirrorlist=/#mirrorlist=/g' /etc/yum.repos.d/elrepo.repo
    sed -i 's#elrepo.org/linux#mirrors.ustc.edu.cn/elrepo#g' /etc/yum.repos.d/elrepo.repo
    yum makecache
    # 安装最新版本的kernel(lt: 长期支持版本，更稳定; ml: 主线版本，特性更新)
    if ! yum install -y --disablerepo="*" --enablerepo=elrepo-kernel kernel-lt; then
        exit 1;
    fi
    # 启用最新的内核
    grub2-set-default 0 && grub2-mkconfig -o /etc/grub2.cfg
    grubby --default-kernel
    grubby --args="cgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory user_namespace.enable=1" --update-kernel="$(grubby --default-kernel)"
}

# 节点初始化脚本
function script::init_node() {
    # clean
    sed -i -e "/$KUBE_API_SERVER/d" -e '/-worker-/d' -e '/-master-/d' /etc/hosts
    sed -i '/## k8s_install managed start/,/## k8s_install managed end/d' /etc/security/limits.conf /etc/systemd/system.conf /etc/bashrc /etc/rc.local /etc/audit/rules.d/audit.rules
    # 禁用 selinux
    sed -i '/SELINUX/s/enforcing/disabled/' /etc/selinux/config
    setenforce 0
    # 禁用 swap
    swapoff -a && sysctl -w vm.swappiness=0
    sed -ri '/^[^#]*swap/s@^@#@' /etc/fstab
    # 禁用 firewalld
    for target in firewalld python-firewall firewalld-filesystem iptables; do
        systemctl stop $target &>/dev/null || true
        systemctl disable $target &>/dev/null || true
    done
    # 更新源 repo
    if [[ -f /etc/yum.repos.d/rocky.repo && "${SKIP_SET_OS_REPO,,}" == "false" ]]; then
        sed -e 's|^mirrorlist=|#mirrorlist=|g' \
            -e 's|^#baseurl=http://dl.rockylinux.org/$contentdir|baseurl=https://mirrors.aliyun.com/rockylinux|g' \
            -i.bak \
            /etc/yum.repos.d/[Rr]ocky*.repo
    fi
    [[ "${SKIP_SET_OS_REPO,,}" == "false" ]] && yum install -y epel-release
    if [[ -f /etc/yum.repos.d/epel.repo && "${SKIP_SET_OS_REPO,,}" == "false" ]]; then
        sed -e 's!^mirrorlist=!#mirrorlist=!g' \
            -e 's!^metalink=!#metalink=!g' \
            -e 's!^#baseurl=!baseurl=!g' \
            -e 's!//download.*/pub!//mirrors.aliyun.com!g' \
            -e 's!http://mirrors\.aliyun!https://mirrors.aliyun!g' \
            -i /etc/yum.repos.d/epel.repo
    fi
    yum clean all
    # 更新 limits
    [ ! -f /etc/security/limits.conf_bak ] && cp /etc/security/limits.conf{,_bak}
    cat << EOF >> /etc/security/limits.conf
## k8s_install managed start
root    soft    nofile  655360
root    hard    nofile  655360
root    soft    nproc   655360
root    hard    nproc   655360
root    soft    core    unlimited
root    hard    core    unlimited
*       soft    nofile  655360
*       hard    nofile  655360
*       soft    nproc   655360
*       hard    nproc   655360
*       soft    core    unlimited
*       hard    core    unlimited
## k8s_install managed end
EOF
    [ -f /etc/security/limits.d/20-nproc.conf ] && sed -i 's#4096#655360#g' /etc/security/limits.d/20-nproc.conf
    cat << EOF >> /etc/systemd/system.conf
## k8s_install managed start
DefaultLimitCORE=infinity
DefaultLimitNOFILE=655360
DefaultLimitNPROC=655360
DefaultTasksMax=75%
## k8s_install managed end
EOF
    # 更新 sysctl
    cat << EOF >  /etc/sysctl.d/99-kube.conf
# https://www.kernel.org/doc/Documentation/sysctl/
#############################################################################################
# 调整虚拟内存
#############################################################################################
# Default: 30
# 0 - 任何情况下都不使用swap。
# 1 - 除非内存不足（OOM），否则不使用swap。
vm.swappiness = 0

# 内存分配策略
#0 - 表示内核将检查是否有足够的可用内存供应用进程使用；如果有足够的可用内存，内存申请允许；否则，内存申请失败，并把错误返回给应用进程。
#1 - 表示内核允许分配所有的物理内存，而不管当前的内存状态如何。
#2 - 表示内核允许分配超过所有物理内存和交换空间总和的内存
vm.overcommit_memory = 1

# OOM时处理
# 1关闭，等于0时，表示当内存耗尽时，内核会触发OOM killer杀掉最耗内存的进程。
vm.panic_on_oom = 0

# vm.dirty_background_ratio 用于调整内核如何处理必须刷新到磁盘的脏页。
# Default value is 10.
# 该值是系统内存总量的百分比，在许多情况下将此值设置为5是合适的。
# 此设置不应设置为零。
vm.dirty_background_ratio = 5

# 内核强制同步操作将其刷新到磁盘之前允许的脏页总数
# 也可以通过更改 vm.dirty_ratio 的值（将其增加到默认值30以上（也占系统内存的百分比））来增加
# 推荐 vm.dirty_ratio 的值在60到80之间。
vm.dirty_ratio = 60

# vm.max_map_count 计算当前的内存映射文件数。
# mmap 限制（vm.max_map_count）的最小值是打开文件的ulimit数量（cat /proc/sys/fs/file-max）。
# 每128KB系统内存 map_count应该大约为1。 因此，在32GB系统上，max_map_count为262144。
# Default: 65530
vm.max_map_count = 2097152

#############################################################################################
# 调整文件
#############################################################################################
fs.may_detach_mounts = 1

# 增加文件句柄和inode缓存的大小，并限制核心转储。
fs.file-max = 2097152
fs.nr_open = 2097152
fs.suid_dumpable = 0

# 同时可以拥有的的异步IO请求数目
fs.aio-max-nr = 10000000
fs.aio-nr = 75552

# 文件监控
fs.inotify.max_user_instances = 8192
fs.inotify.max_user_watches = 524288
fs.inotify.max_queued_events = 16384

#############################################################################################
# 调整网络设置
#############################################################################################
# 为每个套接字的发送和接收缓冲区分配的默认内存量。
net.core.wmem_default = 25165824
net.core.rmem_default = 25165824

# 为每个套接字的发送和接收缓冲区分配的最大内存量。
net.core.wmem_max = 25165824
net.core.rmem_max = 25165824

# 除了套接字设置外，发送和接收缓冲区的大小
# 必须使用net.ipv4.tcp_wmem和net.ipv4.tcp_rmem参数分别设置TCP套接字。
# 使用三个以空格分隔的整数设置这些整数，分别指定最小，默认和最大大小。
# 最大大小不能大于使用net.core.wmem_max和net.core.rmem_max为所有套接字指定的值。
# 合理的设置是最小4KiB，默认64KiB和最大2MiB缓冲区。
net.ipv4.tcp_wmem = 20480 12582912 25165824
net.ipv4.tcp_rmem = 20480 12582912 25165824

# 增加最大可分配的总缓冲区空间
# 以页为单位（4096字节）进行度量
net.ipv4.tcp_mem = 65536 25165824 262144
net.ipv4.udp_mem = 65536 25165824 262144

# 为每个套接字的发送和接收缓冲区分配的最小内存量。
net.ipv4.udp_wmem_min = 16384
net.ipv4.udp_rmem_min = 16384

# 启用TCP窗口缩放，客户端可以更有效地传输数据，并允许在代理方缓冲该数据。
net.ipv4.tcp_window_scaling = 1

# 提高同时接受连接数。
net.ipv4.tcp_max_syn_backlog = 10240

# 将net.core.netdev_max_backlog的值增加到大于默认值1000
# 可以帮助突发网络流量，特别是在使用数千兆位网络连接速度时，
# 通过允许更多的数据包排队等待内核处理它们。
net.core.netdev_max_backlog = 65536

# 增加选项内存缓冲区的最大数量
net.core.optmem_max = 25165824

# 被动TCP连接的SYNACK次数。
net.ipv4.tcp_synack_retries = 2

# 允许的本地端口范围。
net.ipv4.ip_local_port_range = 2048 65535

# 防止TCP时间等待
# Default: net.ipv4.tcp_rfc1337 = 0
net.ipv4.tcp_rfc1337 = 1

# 减少tcp_fin_timeout连接的时间默认值
net.ipv4.tcp_fin_timeout = 15

# 积压套接字的最大数量。
# Default is 128.
net.core.somaxconn = 32768

# 打开syncookies以进行SYN洪水攻击保护。
net.ipv4.tcp_syncookies = 1

# 避免Smurf攻击
# 发送伪装的ICMP数据包，目的地址设为某个网络的广播地址，源地址设为要攻击的目的主机，
# 使所有收到此ICMP数据包的主机都将对目的主机发出一个回应，使被攻击主机在某一段时间内收到成千上万的数据包
net.ipv4.icmp_echo_ignore_broadcasts = 1

# 为icmp错误消息打开保护
net.ipv4.icmp_ignore_bogus_error_responses = 1

# 启用自动缩放窗口。
# 如果延迟证明合理，这将允许TCP缓冲区超过其通常的最大值64K。
net.ipv4.tcp_window_scaling = 1

# 打开并记录欺骗，源路由和重定向数据包
net.ipv4.conf.all.log_martians = 1
net.ipv4.conf.default.log_martians = 1

# 告诉内核有多少个未附加的TCP套接字维护用户文件句柄。 万一超过这个数字，
# 孤立的连接会立即重置，并显示警告。
# Default: net.ipv4.tcp_max_orphans = 65536
net.ipv4.tcp_max_orphans = 65536

# 不要在关闭连接时缓存指标
net.ipv4.tcp_no_metrics_save = 1

# 启用RFC1323中定义的时间戳记：
# Default: net.ipv4.tcp_timestamps = 1
net.ipv4.tcp_timestamps = 1

# 启用选择确认。
# Default: net.ipv4.tcp_sack = 1
net.ipv4.tcp_sack = 1

# 增加 tcp-time-wait 存储桶池大小，以防止简单的DOS攻击。
# net.ipv4.tcp_tw_recycle 已从Linux 4.12中删除。请改用net.ipv4.tcp_tw_reuse。
net.ipv4.tcp_max_tw_buckets = 14400
net.ipv4.tcp_tw_reuse = 1

# accept_source_route 选项使网络接口接受设置了严格源路由（SSR）或松散源路由（LSR）选项的数据包。
# 以下设置将丢弃设置了SSR或LSR选项的数据包。
net.ipv4.conf.all.accept_source_route = 0
net.ipv4.conf.default.accept_source_route = 0

# 打开反向路径过滤
net.ipv4.conf.all.rp_filter = 1
net.ipv4.conf.default.rp_filter = 1

# 禁用ICMP重定向接受
net.ipv4.conf.all.accept_redirects = 0
net.ipv4.conf.default.accept_redirects = 0
net.ipv4.conf.all.secure_redirects = 0
net.ipv4.conf.default.secure_redirects = 0

# 禁止发送所有IPv4 ICMP重定向数据包。
net.ipv4.conf.all.send_redirects = 0
net.ipv4.conf.default.send_redirects = 0

# 开启IP转发.
net.ipv4.ip_forward = 1

# 禁止IPv6
net.ipv6.conf.lo.disable_ipv6=1
net.ipv6.conf.all.disable_ipv6 = 1
net.ipv6.conf.default.disable_ipv6 = 1

# 要求iptables不对bridge的数据进行处理
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-arptables = 1

# arp缓存
# 存在于 ARP 高速缓存中的最少层数，如果少于这个数，垃圾收集器将不会运行。缺省值是 128
net.ipv4.neigh.default.gc_thresh1=2048
# 保存在 ARP 高速缓存中的最多的记录软限制。垃圾收集器在开始收集前，允许记录数超过这个数字 5 秒。缺省值是 512
net.ipv4.neigh.default.gc_thresh2=4096
# 保存在 ARP 高速缓存中的最多记录的硬限制，一旦高速缓存中的数目高于此，垃圾收集器将马上运行。缺省值是 1024
net.ipv4.neigh.default.gc_thresh3=8192

# 持久连接
net.ipv4.tcp_keepalive_time = 600
net.ipv4.tcp_keepalive_intvl = 30
net.ipv4.tcp_keepalive_probes = 10

# conntrack表
net.nf_conntrack_max=1048576
net.netfilter.nf_conntrack_max=1048576
net.netfilter.nf_conntrack_buckets=262144
net.netfilter.nf_conntrack_tcp_timeout_fin_wait=30
net.netfilter.nf_conntrack_tcp_timeout_time_wait=30
net.netfilter.nf_conntrack_tcp_timeout_close_wait=15
net.netfilter.nf_conntrack_tcp_timeout_established=300

#############################################################################################
# 调整内核参数
#############################################################################################

# 地址空间布局随机化（ASLR）是一种用于操作系统的内存保护过程，可防止缓冲区溢出攻击。
# 这有助于确保与系统上正在运行的进程相关联的内存地址不可预测，
# 因此，与这些流程相关的缺陷或漏洞将更加难以利用。
# Accepted values: 0 = 关闭, 1 = 保守随机化, 2 = 完全随机化
kernel.randomize_va_space = 2

# 调高 PID 数量
kernel.pid_max = 65536
kernel.threads-max=30938

# coredump
kernel.core_pattern=core

# 决定了检测到soft lockup时是否自动panic，缺省值是0
kernel.softlockup_all_cpu_backtrace=1
kernel.softlockup_panic=1
EOF
    # 更新 history
    cat << EOF >> /etc/bashrc
## k8s_install managed start
# history actions record，include action time, user, login ip
HISTFILESIZE=5000
HISTSIZE=5000
USER_IP=\$(who -u am i 2>/dev/null | awk '{print \$NF}' | sed -e 's/[()]//g')
if [ -z \$USER_IP ]
then
USER_IP=\$(hostname -i)
fi
HISTTIMEFORMAT="%Y-%m-%d %H:%M:%S \$USER_IP:\$(whoami) "
export HISTFILESIZE HISTSIZE HISTTIMEFORMAT

# PS1
PS1='\[\033[0m\]\[\033[1;36m\][\u\[\033[0m\]@\[\033[1;32m\]\h\[\033[0m\] \[\033[1;31m\]\w\[\033[0m\]\[\033[1;36m\]]\[\033[33;1m\]\\$ \[\033[0m\]'
## k8s_install managed end
EOF
    # 更新 journal
    mkdir -p /var/log/journal /etc/systemd/journald.conf.d
    cat << EOF > /etc/systemd/journald.conf.d/99-prophet.conf
[Journal]
# 持久化保存到磁盘
Storage=persistent
# 压缩历史日志
Compress=yes
SyncIntervalSec=5m
RateLimitInterval=30s
RateLimitBurst=1000
# 最大占用空间 5G
SystemMaxUse=5G
# 单日志文件最大 200M
SystemMaxFileSize=200M
# 日志保存时间 3 周
MaxRetentionSec=3week
# 不将日志转发到 syslog
ForwardToSyslog=no
EOF
    # 设置 motd
    cat << EOF > /etc/profile.d/zz-ssh-login-info.sh
#!/bin/sh
#
# @Time    : 2020-02-04
# @Author  : lework
# @Desc    : ssh login banner

export PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:\$PATH
shopt -q login_shell && : || return 0

# os
upSeconds="\$(cut -d. -f1 /proc/uptime)"
secs=\$((\${upSeconds}%60))
mins=\$((\${upSeconds}/60%60))
hours=\$((\${upSeconds}/3600%24))
days=\$((\${upSeconds}/86400))
UPTIME_INFO=\$(printf "%d days, %02dh %02dm %02ds" "\$days" "\$hours" "\$mins" "\$secs")

if [ -f /etc/redhat-release ] ; then
PRETTY_NAME=\$(< /etc/redhat-release)

elif [ -f /etc/debian_version ]; then
DIST_VER=\$(</etc/debian_version)
PRETTY_NAME="\$(grep PRETTY_NAME /etc/os-release | sed -e 's/PRETTY_NAME=//g' -e  's/"//g') (\$DIST_VER)"

else
PRETTY_NAME=\$(cat /etc/*-release | grep "PRETTY_NAME" | sed -e 's/PRETTY_NAME=//g' -e 's/"//g')
fi

if [[ -d "/system/app/" && -d "/system/priv-app" ]]; then
model="\$(getprop ro.product.brand) \$(getprop ro.product.model)"

elif [[ -f /sys/devices/virtual/dmi/id/product_name ||
-f /sys/devices/virtual/dmi/id/product_version ]]; then
model="\$(< /sys/devices/virtual/dmi/id/product_name)"
model+=" \$(< /sys/devices/virtual/dmi/id/product_version)"

elif [[ -f /sys/firmware/devicetree/base/model ]]; then
model="\$(< /sys/firmware/devicetree/base/model)"

elif [[ -f /tmp/sysinfo/model ]]; then
model="\$(< /tmp/sysinfo/model)"
fi

MODEL_INFO=\${model}
KERNEL=\$(uname -srmo)
USER_NUM=\$(who -u | wc -l)
RUNNING=\$(ps ax | wc -l | tr -d " ")

# disk
totaldisk=\$(df -h -x devtmpfs -x tmpfs -x debugfs -x aufs -x overlay --total 2>/dev/null | tail -1)
disktotal=\$(awk '{print \$2}' <<< "\${totaldisk}")
diskused=\$(awk '{print \$3}' <<< "\${totaldisk}")
diskusedper=\$(awk '{print \$5}' <<< "\${totaldisk}")
DISK_INFO="\033[0;33m\${diskused}\033[0m of \033[1;34m\${disktotal}\033[0m disk space used (\033[0;33m\${diskusedper}\033[0m)"

# cpu
cpu=\$(awk -F':' '/^model name/ {print \$2}' /proc/cpuinfo | uniq | sed -e 's/^[ \t]*//')
cpun=\$(grep -c '^processor' /proc/cpuinfo)
cpuc=\$(grep '^cpu cores' /proc/cpuinfo | tail -1 | awk '{print \$4}')
cpup=\$(grep '^physical id' /proc/cpuinfo | wc -l)
CPU_INFO="\${cpu} \${cpup}P \${cpuc}C \${cpun}L"

# get the load averages
read one five fifteen rest < /proc/loadavg
LOADAVG_INFO="\033[0;33m\${one}\033[0m / \${five} / \${fifteen} with \033[1;34m\$(( cpun*cpuc ))\033[0m core(s) at \033[1;34m\$(grep '^cpu MHz' /proc/cpuinfo | tail -1 | awk '{print \$4}')\033 MHz"

# mem
MEM_INFO="\$(cat /proc/meminfo | awk '/MemTotal:/{total=\$2/1024/1024;next} /MemAvailable:/{use=total-\$2/1024/1024; printf("\033[0;33m%.2fGiB\033[0m of \033[1;34m%.2fGiB\033[0m RAM used (\033[0;33m%.2f%%\033[0m)",use,total,(use/total)*100);}')"

# network
# extranet_ip=" and \$(curl -s ip.cip.cc)"
IP_INFO="\$(ip a|grep -E '^[0-9]+: em*|^[0-9]+: eno*|^[0-9]+: enp*|^[0-9]+: ens*|^[0-9]+: eth*|^[0-9]+: wlp*' -A3|grep inet|awk -F ' ' '{print \$2}'|cut -f1 -d/|xargs echo)"

# Container info
CONTAINER_INFO="\$(sudo /usr/bin/crictl ps -a -o yaml 2> /dev/null | awk '/^  state: /{gsub("CONTAINER_", "", \$NF) ++S[\$NF]}END{for(m in S) printf "%s%s:%s ",substr(m,1,1),tolower(substr(m,2)),S[m]}')Images:\$(sudo /usr/bin/crictl images -q 2> /dev/null | wc -l)"

# info
echo -e "
Information as of: \033[1;34m\$(date +"%Y-%m-%d %T")\033[0m

\033[0;1;31mProduct\033[0m............: \${MODEL_INFO}
\033[0;1;31mOS\033[0m.................: \${PRETTY_NAME}
\033[0;1;31mKernel\033[0m.............: \${KERNEL}
\033[0;1;31mCPU\033[0m................: \${CPU_INFO}

\033[0;1;31mHostname\033[0m...........: \033[1;34m\$(hostname)\033[0m
\033[0;1;31mIP Addresses\033[0m.......: \033[1;34m\${IP_INFO}\033[0m

\033[0;1;31mUptime\033[0m.............: \033[0;33m\${UPTIME_INFO}\033[0m
\033[0;1;31mMemory\033[0m.............: \${MEM_INFO}
\033[0;1;31mLoad Averages\033[0m......: \${LOADAVG_INFO}
\033[0;1;31mDisk Usage\033[0m.........: \${DISK_INFO}

\033[0;1;31mUsers online\033[0m.......: \033[1;34m\${USER_NUM}\033[0m
\033[0;1;31mRunning Processes\033[0m..: \033[1;34m\${RUNNING}\033[0m
\033[0;1;31mContainer Info\033[0m.....: \${CONTAINER_INFO}
"
EOF
    chmod +x /etc/profile.d/zz-ssh-login-info.sh
    # 配置 crictl
    echo 'ALL ALL=(ALL) NOPASSWD:/usr/bin/crictl' > /etc/sudoers.d/crictl
    # 时间同步
    ntpd --help >/dev/null 2>&1 && yum remove -y ntp
    yum install -y chrony
    [ ! -f /etc/chrony.conf_bak ] && cp /etc/chrony.conf{,_bak}
    cat << EOF > /etc/chrony.conf
server ntp.aliyun.com iburst
server cn.ntp.org.cn iburst
server ntp.shu.edu.cn iburst
server 0.cn.pool.ntp.org iburst
server 1.cn.pool.ntp.org iburst
server 2.cn.pool.ntp.org iburst
server 3.cn.pool.ntp.org iburst

driftfile /var/lib/chrony/drift
makestep 1.0 3
logdir /var/log/chrony
EOF
    timedatectl set-timezone Asia/Shanghai
    chronyd -q -t 1 'server cn.pool.ntp.org iburst maxsamples 1'
    systemctl enable chronyd
    systemctl start chronyd
    chronyc sources -v
    chronyc sourcestats
    hwclock --systohc
    # 安装依赖的软件包
    yum install -y curl wget
    # 配置 ipvs
    yum install -y ipvsadm ipset sysstat conntrack libseccomp
    [ -f /etc/modules-load.d/ipvs.conf ] && cp -f /etc/modules-load.d/ipvs.conf{,_bak}
    module=(
        ip_vs
        ip_vs_rr
        ip_vs_wrr
        ip_vs_sh
        overlay
        nf_conntrack
        br_netfilter
    )
    for kernel_module in "${module[@]}"; do
        /sbin/modinfo -F filename "$kernel_module" |& grep -qv ERROR && echo "$kernel_module" >> /etc/modules-load.d/ipvs.conf
    done
    systemctl restart systemd-modules-load
    systemctl enable systemd-modules-load
    sysctl --system
    # 审计 audit
    yum install -y audit audit-libs
    cat << EOF >> /etc/audit/rules.d/audit.rules
## k8s_install managed start
# Ignore errors
-i

# SYSCALL
-a always,exit -F arch=b64 -S kill,tkill,tgkill -F a1=9 -F key=trace_kill_9
-a always,exit -F arch=b64 -S kill,tkill,tgkill -F a1=15 -F key=trace_kill_15

# docker
-w /usr/bin/dockerd -k docker
-w /var/lib/docker -k docker
-w /etc/docker -k docker
-w /usr/lib/systemd/system/docker.service -k docker
-w /etc/systemd/system/docker.service -k docker
-w /usr/lib/systemd/system/docker.socket -k docker
-w /etc/default/docker -k docker
-w /etc/sysconfig/docker -k docker
-w /etc/docker/daemon.json -k docker

# containerd
-w /usr/bin/containerd -k containerd
-w /var/lib/containerd -k containerd
-w /usr/lib/systemd/system/containerd.service -k containerd
-w /etc/containerd/config.toml -k containerd

# cri-o
-w /usr/bin/crio -k cri-o
-w /etc/crio -k cri-o

# runc
-w /usr/bin/runc -k runc

# kube
-w /usr/bin/kubeadm -k kubeadm
-w /usr/bin/kubelet -k kubelet
-w /usr/bin/kubectl -k kubectl
-w /var/lib/kubelet -k kubelet
-w /etc/kubernetes -k kubernetes
## k8s_install managed end
EOF
    chmod 600 /etc/audit/rules.d/audit.rules
    sed -i 's#max_log_file =.*#max_log_file = 80#g' /etc/audit/auditd.conf
    if [ -f /usr/libexec/initscripts/legacy-actions/auditd/restart ]; then
        /usr/libexec/initscripts/legacy-actions/auditd/restart
    else
        systemctl stop auditd && systemctl start auditd
    fi
    systemctl enable auditd
    grep single-request-reopen /etc/resolv.conf || sed -i '1ioptions timeout:2 attempts:3 rotate single-request-reopen' /etc/resolv.conf
    ipvsadm --clear
    iptables -F && iptables -t nat -F && iptables -t mangle -F && iptables -X
}

## 安装 containerd
#function script::install_containerd() {
#    local version="-${1:-latest}"
#    version="${version#-latest}"
#    yum install -y yum-utils
#    yum-config-manager --add-repo ${CRI_REPO}
#    [ -f "$(which runc)" ]  && yum remove -y runc
#    [ -f "$(which containerd)" ]  && yum remove -y containerd.io
#    yum install -y containerd.io"${version}" containernetworking bash-completion
#    [ -d /etc/bash_completion.d ] && crictl completion bash > /etc/bash_completion.d/crictl
#    containerd config default > /etc/containerd/config.toml
#    sed -i -e "s#k8s.gcr.io#${K8S_GCR_IO_REPO}#g" \
#        -e "s#registry.k8s.io#${REGISTRY_K8S_IO_REPO}#g" \
#        -e "s#https://registry-1.docker.io#${CRI_REGISTRY_MIRRORS}#g" \
#        -e "s#SystemdCgroup = false#SystemdCgroup = true#g" \
#        -e "s#oom_score = 0#oom_score = -999#" \
#        -e "s#max_container_log_line_size = 16384#max_container_log_line_size = 65535#" \
#        -e "s#max_concurrent_downloads = 3#max_concurrent_downloads = 10#g" /etc/containerd/config.toml
#    grep docker.io /etc/containerd/config.toml ||  sed -i -e "/registry.mirrors]/a\ \ \ \ \ \ \ \ [plugins.\"io.containerd.grpc.v1.cri\".registry.mirrors.\"docker.io\"]\n           endpoint = [\"$CRI_REGISTRY_MIRRORS\"]" /etc/containerd/config.toml
#    cat << EOF > /etc/crictl.yaml
#runtime-endpoint: unix:///run/containerd/containerd.sock
#image-endpoint: unix:///run/containerd/containerd.sock
#timeout: 2
#debug: false
#pull-image-on-create: true
#disable-pull-on-run: false
#EOF
#    systemctl restart containerd
#    systemctl enable containerd
#}

# 安装 docker
function script::install_docker() {
    local version="-${1:-latest}"
    version="${version#-latest}"
    # 安装 docker
    yum install -y yum-utils
    yum-config-manager --add-repo ${CRI_REPO}
    [ -f "$(which docker)" ]  && yum remove -y docker-ce docker-ce-cli containerd.io
    yum install -y "docker-ce${version}" "docker-ce-cli${version}" containerd.io bash-completion
    # 配置 docker
    [ -f /usr/share/bash-completion/completions/docker ] && cp -f /usr/share/bash-completion/completions/docker /etc/bash_completion.d/
    [ ! -d /etc/docker ] && mkdir /etc/docker
    [ ! -d "$CRI_DATA_ROOT" ] && mkdir $CRI_DATA_ROOT
    cat << EOF > /etc/docker/daemon.json
{
    "data-root": "$CRI_DATA_ROOT",
    "log-driver": "json-file",
    "log-opts": {
        "max-size": "200m",
        "max-file": "3"
    },
    "default-ulimits": {
        "nofile": {
            "Name": "nofile",
            "Hard": 655360,
            "Soft": 655360
        },
        "nproc": {
            "Name": "nproc",
            "Hard": 655360,
            "Soft": 655360
        }
    },
    "live-restore": true,
    "max-concurrent-downloads": 10,
    "max-concurrent-uploads": 10,
    "registry-mirrors": [
        "$CRI_REGISTRY_MIRRORS"
    ],
    "insecure-registries": [
    ]
}
EOF
    # 安装 containerd(docker包含了containerd)
    # yum install -y yum-utils
    # yum-config-manager --add-repo ${CRI_REPO}
    # [ -f "$(which runc)" ]  && yum remove -y runc
    # [ -f "$(which containerd)" ]  && yum remove -y containerd.io
    # yum install -y containerd.io"${version}" bash-completion
    # 配置 containerd
    [ -d /etc/bash_completion.d ] && crictl completion bash > /etc/bash_completion.d/crictl
    containerd config default > /etc/containerd/config.toml
    sed -i -e "s#k8s.gcr.io#${K8S_GCR_IO_REPO}#g" \
        -e "s#registry.k8s.io#${REGISTRY_K8S_IO_REPO}#g" \
        -e "s#https://registry-1.docker.io#${CRI_REGISTRY_MIRRORS}#g" \
        -e "s#SystemdCgroup = false#SystemdCgroup = true#g" \
        -e "s#oom_score = 0#oom_score = -999#" \
        -e "s#max_container_log_line_size = 16384#max_container_log_line_size = 65535#" \
        -e "s#max_concurrent_downloads = 3#max_concurrent_downloads = 10#g" /etc/containerd/config.toml
    grep docker.io /etc/containerd/config.toml ||  sed -i -e "/registry.mirrors]/a\ \ \ \ \ \ \ \ [plugins.\"io.containerd.grpc.v1.cri\".registry.mirrors.\"docker.io\"]\n           endpoint = [\"$CRI_REGISTRY_MIRRORS\"]" /etc/containerd/config.toml
    sed -i 's|#oom_score = 0|oom_score = -999|' /etc/containerd/config.toml
    cat << EOF > /etc/crictl.yaml
runtime-endpoint: unix://$CRI_ENDPOINT
image-endpoint: unix://$CRI_ENDPOINT
timeout: 2
debug: false
pull-image-on-create: true
disable-pull-on-run: false
EOF
    systemctl enable containerd
    systemctl restart containerd
    systemctl enable docker
    systemctl restart docker
}

# 安装 kube
function script::install_kube() {
    local version="-${1:-latest}"
    version="${version#-latest}"
    repo="${version%.*}"
    repo="${repo//-}"
    [ "${repo}" == "" ] && repo="1.30"
    cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://pkgs.k8s.io/core:/stable:/v${repo}/rpm/
enabled=1
gpgcheck=1
gpgkey=https://pkgs.k8s.io/core:/stable:/v${repo}/rpm/repodata/repomd.xml.key
exclude=kubelet kubeadm kubectl cri-tools kubernetes-cni
EOF
    [ -f /usr/bin/kubeadm ]  && yum remove -y kubeadm
    [ -f /usr/bin/kubelet ]  && yum remove -y kubelet
    [ -f /usr/bin/kubectl ]  && yum remove -y kubectl
    yum install -y "kubeadm${version}" "kubelet${version}" "kubectl${version}" --disableexcludes=kubernetes
    if [ -d /etc/bash_completion.d ]; then
        kubectl completion bash > /etc/bash_completion.d/kubectl
        kubeadm completion bash > /etc/bash_completion.d/kubeadm
    fi
    [ ! -d /usr/lib/systemd/system/kubelet.service.d ] && mkdir -p /usr/lib/systemd/system/kubelet.service.d
    cat << EOF > /usr/lib/systemd/system/kubelet.service.d/11-cgroup.conf
[Service]
CPUAccounting=true
MemoryAccounting=true
BlockIOAccounting=true
ExecStartPre=/bin/bash -c '/bin/mkdir -p /sys/fs/cgroup/{cpuset,memory,hugetlb,systemd,pids,"cpu,cpuacct"}/{system,kube,kubepods}.slice||:'
Slice=kube.slice
EOF
    systemctl daemon-reload
    systemctl enable kubelet
    systemctl restart kubelet
}

# 安装 haproxy
function script::install_haproxy() {
    local api_servers="$*"
    [ -f /usr/bin/haproxy ] && yum remove -y haproxy
    yum install -y haproxy rsyslog
    [ ! -f /etc/haproxy/haproxy.cfg_bak ] && cp /etc/haproxy/haproxy.cfg{,_bak}
    cat << EOF > /etc/haproxy/haproxy.cfg
global
log /dev/log    local0
log /dev/log    local1 notice
tune.ssl.default-dh-param 2048

defaults
log global
mode http
option dontlognull
timeout connect 5000ms
timeout client 600000ms
timeout server 600000ms

listen stats
bind :19090
mode http
balance
stats uri /haproxy_stats
stats auth admin:admin123
stats admin if TRUE

frontend kube-apiserver-https
mode tcp
option tcplog
bind :6443
default_backend kube-apiserver-backend

backend kube-apiserver-backend
mode tcp
balance roundrobin
stick-table type ip size 200k expire 30m
stick on src
$(index=1;for h in $api_servers;do echo "    server apiserver${index} $h:6443 check";index=$((index+1));done)
EOF
    cat <<EOF > /etc/rsyslog.d/haproxy.conf
local0.* /var/log/haproxy.log
local1.* /var/log/haproxy.log
EOF
    systemctl enable haproxy
    systemctl restart haproxy
    systemctl enable rsyslog
    systemctl restart rsyslog
}

######################################################################################################
# 流程函数
######################################################################################################

# 升级节点内核
function init::upgrade_kernel() {
    [[ "${UPGRADE_KERNEL_TAG:-}" != "1" ]] && return
    if grep -q "init_upgrade_kernel" $SETUP_STATE_FILE; then
        log::info "[skip]" "跳过已完成步骤 init::upgrade_kernel"
        return
    fi
    for host in $MASTER_NODES $WORKER_NODES; do
        log::info "[init]" "upgrade kernel: $host"
        command::exec "${host}" "
            $(declare -f script::upgrade_kernel)
            script::upgrade_kernel
        "
        check::exit_code "$?" "init" "upgrade kernel $host" "exit"
    done
    for host in $MASTER_NODES $WORKER_NODES; do
        command::exec "${host}" "bash -c 'sleep 15 && reboot' &>/dev/null &"
        check::exit_code "$?" "init" "$host: Wait for 15s to restart"
    done
    log::info "[notice]" "Please execute the command again!"
    log::access "[command]" "bash $SCRIPT_FILE_NAME ${SCRIPT_PARAMETER// --upgrade-kernel/}"
    echo "init_upgrade_kernel" >> $SETUP_STATE_FILE
    exit 0
}

# 初始化节点配置
function init::node_config() {
    local master_index=${master_index:-1}
    local worker_index=${worker_index:-1}
    # 获取MGMT_NODE机器的内网IP
    if [[ "$MGMT_NODE" == "127.0.0.1" || "$MGMT_NODE_IP" == "" ]]; then
        log::info "[init]" "Get $MGMT_NODE InternalIP."
        command::exec "${MGMT_NODE}" "
            ip -4 route get 8.8.8.8 2>/dev/null | head -1 | awk '{print \$7}'
        "
        get::command_output "MGMT_NODE_IP" "$?" "exit"
        if [[ "$MGMT_NODE" != "$MGMT_NODE_IP" ]]; then
            log::warning "[init]" "ip不相同: $MGMT_NODE(MGMT_NODE) != $MGMT_NODE_IP(MGMT_NODE_IP)"
        fi
    else
        MGMT_NODE_IP=$MGMT_NODE
    fi
    log::info "[init]" "$MGMT_NODE InternalIP: $MGMT_NODE_IP"
    # master
    for host in $MASTER_NODES; do
        log::info "[init]" "master: $host"
            command::exec "${host}" "
            export KUBE_API_SERVER=${KUBE_API_SERVER} SKIP_SET_OS_REPO=${SKIP_SET_OS_REPO:-false}
            $(declare -f script::init_node)
            script::init_node
        "
        check::exit_code "$?" "init" "init master $host" "exit"
        # 设置主机名和解析
        command::exec "${host}" "
            printf \"\\n${MGMT_NODE_IP} $KUBE_API_SERVER\\n$node_hosts\" >> /etc/hosts
            hostnamectl set-hostname ${HOSTNAME_PREFIX}-master-node${master_index}
        "
        check::exit_code "$?" "init" "$host set hostname and hostname resolution"
        # 设置审核策略(audit-policy)
        log::info "[init]" "$host: set audit-policy file."
        command::exec "${host}" "
            [ ! -d etc/kubernetes ] && mkdir -p /etc/kubernetes
            cat << EOF > /etc/kubernetes/audit-policy.yaml
# Log all requests at the Metadata level.
apiVersion: audit.k8s.io/v1
kind: Policy
rules:
- level: Metadata
EOF
        "
        check::exit_code "$?" "init" "$host: set audit-policy file" "exit"
        master_index=$((master_index + 1))
    done
    # worker
    for host in $WORKER_NODES; do
        log::info "[init]" "worker: $host"
        command::exec "${host}" "
            export KUBE_API_SERVER=${KUBE_API_SERVER} SKIP_SET_OS_REPO=${SKIP_SET_OS_REPO:-false}
            $(declare -f script::init_node)
            script::init_node
        "
        check::exit_code "$?" "init" "init worker $host" "exit"
        # 设置主机名和解析
        command::exec "${host}" "
            printf \"\\n127.0.0.1 $KUBE_API_SERVER\\n$node_hosts\" >> /etc/hosts
            hostnamectl set-hostname ${HOSTNAME_PREFIX}-worker-node${worker_index}
        "
        worker_index=$((worker_index + 1))
    done
}

# 初始化节点
function init::node() {
    init::upgrade_kernel
    local node_hosts=""
    local i=1
    for h in $MASTER_NODES; do
        node_hosts="${node_hosts}\n$h ${HOSTNAME_PREFIX}-master-node${i}"
        i=$((i + 1))
    done
    local i=1
    for h in $WORKER_NODES; do
        node_hosts="${node_hosts}\n$h ${HOSTNAME_PREFIX}-worker-node${i}"
        i=$((i + 1))
    done
    init::node_config
}

# 安装包
function install::package() {
    # 安装 cri & kube
    for host in $MASTER_NODES $WORKER_NODES; do
        # 安装 cri
        log::info "[install]" "install docker on $host."
        command::exec "${host}" "
            export CRI_REPO=$CRI_REPO CRI_DATA_ROOT=$CRI_DATA_ROOT CRI_REGISTRY_MIRRORS=$CRI_REGISTRY_MIRRORS CRI_ENDPOINT=$CRI_ENDPOINT
            export K8S_GCR_IO_REPO=$K8S_GCR_IO_REPO REGISTRY_K8S_IO_REPO=$REGISTRY_K8S_IO_REPO QUAY_IO_REPO=$QUAY_IO_REPO DOCKER_IO_REPO=$DOCKER_IO_REPO
            $(declare -f script::install_docker)
            script::install_docker $CRI_VERSION
        "
        check::exit_code "$?" "install" "install docker on $host"
        # 安装 kube
        log::info "[install]" "install kube on $host"
        command::exec "${host}" "
            $(declare -f script::install_kube)
            script::install_kube $KUBE_VERSION
        "
        check::exit_code "$?" "install" "install kube on $host"
    done
    # 配置 kube
    local apiservers=$MASTER_NODES
    if [[ "$apiservers" == "127.0.0.1" ]]; then
        command::exec "${MGMT_NODE}" "ip -o route get to 8.8.8.8 | sed -n 's/.*src \([0-9.]\+\).*/\1/p'"
        get::command_output "apiservers" "$?"
    fi
    # 输出 api-servers 信息
    if [[ "${ADD_TAG:-}" == "1" ]]; then
        command::exec "${MGMT_NODE}" "
            kubectl get node --selector='!node-role.kubernetes.io/worker' -o jsonpath='{$.items[*].status.addresses[?(@.type==\"InternalIP\")].address}'
        "
        get::command_output "apiservers" "$?"
    fi
    # 安装 haproxy
    for host in $WORKER_NODES; do
        log::info "[install]" "install haproxy on $host"
        command::exec "${host}" "
            $(declare -f script::install_haproxy)
            script::install_haproxy \"$apiservers\"
        "
        check::exit_code "$?" "install" "install haproxy on $host"
    done
    # 安装 helm
    local mgmt_node_raw=$MGMT_NODE
    MGMT_NODE="127.0.0.1"
    local certs_file="${DOWNLOAD_DIR}/helm-v$HELM_VERSION-linux-amd64.tar.gz"
    utils::download_file "https://mirrors.huaweicloud.com/helm/v$HELM_VERSION/helm-v$HELM_VERSION-linux-amd64.tar.gz" "${certs_file}"
    MGMT_NODE=$mgmt_node_raw
    for host in $MASTER_NODES $WORKER_NODES; do
        log::info "[install]" "install helm on $host"
        command::scp "${host}" "${certs_file}" "/tmp/helm-linux-amd64.tar.gz"
        check::exit_code "$?" "install" "scp helm to $host" "exit"
        command::exec "${host}" "
            set -e
            if [[ -f /tmp/helm-linux-amd64.tar.gz ]]; then
                cd /tmp
                # 解压
                tar -zxvf helm-linux-amd64.tar.gz
                # 安装
                mv linux-amd64/helm /usr/local/bin/
                # 清理
                rm -rf helm-linux-amd64.tar.gz linux-amd64
                # 验证
                helm version
            else
                echo \"not found /tmp/helm-linux-amd64.tar.gz\"
                exit 1
            fi
        "
        check::exit_code "$?" "install" "install helm on $host"
    done
    # 10年证书
    if [[ "${CERT_YEAR_TAG:-}" == "1" ]]; then
        local version="${KUBE_VERSION}"
        log::info "[install]" "download kubeadm 10 years certs client"
        local mgmt_node_raw=$MGMT_NODE
        MGMT_NODE="127.0.0.1"
        local certs_file="${DOWNLOAD_DIR}/bins/kubeadm-linux-amd64"
        utils::download_file "${GITHUB_PROXY}https://github.com/lework/kubeadm-certs/releases/download/v${version}/kubeadm-linux-amd64" "${certs_file}"
        MGMT_NODE=$mgmt_node_raw
        for host in $MASTER_NODES $WORKER_NODES; do
            log::info "[install]" "scp kubeadm client to $host"
            command::scp "${host}" "${certs_file}" "/tmp/kubeadm-linux-amd64"
            check::exit_code "$?" "install" "scp kubeadm client to $host" "exit"
            command::exec "${host}" "
                set -e
                if [[ -f /tmp/kubeadm-linux-amd64 ]]; then
                    [[ -f /usr/bin/kubeadm && ! -f /usr/bin/kubeadm_src ]] && mv -fv /usr/bin/kubeadm{,_src}
                    mv -fv /tmp/kubeadm-linux-amd64 /usr/bin/kubeadm
                    chmod +x /usr/bin/kubeadm
                else
                    echo \"not found /tmp/kubeadm-linux-amd64\"
                    exit 1
                fi
            "
            check::exit_code "$?" "install" "$host: use kubeadm 10 years certs client"
        done
    fi
}

# 集群初始化
function kubeadm::init() {
    log::info "[kubeadm init]" "kubeadm init on ${MGMT_NODE}"
    log::info "[kubeadm init]" "${MGMT_NODE}: set kubeadmcfg.yaml"
    command::exec "${MGMT_NODE}" "
        kubeadm config images list 2>/dev/null | awk -F: '/pause/ {print \$2}'
    "
    get::command_output "PAUSE_VERSION" "$?"
    kube::init_kubelet_node_registration
    log::info "[kubeadm init]" "PAUSE_VERSION = ${PAUSE_VERSION}"
    command::exec "${MGMT_NODE}" "
cat << EOF > /etc/kubernetes/kubeadmcfg.yaml
---
apiVersion: kubeadm.k8s.io/v1beta3
kind: InitConfiguration
${KUBELET_NODE_REGISTRATION}
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: ipvs
ipvs:
  minSyncPeriod: 5s
  syncPeriod: 5s
  # ipvs 负载策略
  scheduler: 'wrr'

---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
maxPods: 200
cgroupDriver: systemd
runtimeRequestTimeout: 5m
# 此配置保证了 kubelet 能在 swap 开启的情况下启动
failSwapOn: false
nodeStatusUpdateFrequency: 5s
rotateCertificates: true
imageGCLowThresholdPercent: 70
imageGCHighThresholdPercent: 80
# 软驱逐阀值
evictionSoft:
  imagefs.available: 15%
  memory.available: 512Mi
  nodefs.available: 15%
  nodefs.inodesFree: 10%
# 达到软阈值之后，持续时间超过多久才进行驱逐
evictionSoftGracePeriod:
  imagefs.available: 3m
  memory.available: 1m
  nodefs.available: 3m
  nodefs.inodesFree: 1m
# 硬驱逐阀值
evictionHard:
  imagefs.available: 10%
  memory.available: 256Mi
  nodefs.available: 10%
  nodefs.inodesFree: 5%
evictionMaxPodGracePeriod: 30
# 节点资源预留
kubeReserved:
  cpu: 200m\$(if [[ \$(cat /proc/meminfo | awk '/MemTotal/ {print \$2}') -gt 3670016 ]]; then echo -e '\n  memory: 256Mi';fi)
  ephemeral-storage: 1Gi
systemReserved:
  cpu: 300m\$(if [[ \$(cat /proc/meminfo | awk '/MemTotal/ {print \$2}') -gt 3670016 ]]; then echo -e '\n  memory: 512Mi';fi)
  ephemeral-storage: 1Gi
kubeReservedCgroup: /kube.slice
systemReservedCgroup: /system.slice
enforceNodeAllocatable:
  - pods

---
apiVersion: kubeadm.k8s.io/v1beta3
kind: ClusterConfiguration
kubernetesVersion: $KUBE_VERSION
controlPlaneEndpoint: $KUBE_API_SERVER:6443
networking:
  dnsDomain: $KUBE_DNS_DOMAIN
  podSubnet: $KUBE_POD_SUBNET
  serviceSubnet: $KUBE_SERVICE_SUBNET
imageRepository: $KUBEADMCFG_REPO
clusterName: $KUBE_CLUSTER_NAME
apiServer:
  certSANs:
    - 127.0.0.1
    - $KUBE_API_SERVER
$(for h in $MASTER_NODES;do echo "    - $h";done)
  extraArgs:
    event-ttl: '720h'
    service-node-port-range: '30000-50000'
    # 审计日志相关配置
    audit-log-maxage: '20'
    audit-log-maxbackup: '10'
    audit-log-maxsize: '100'
    audit-log-path: /var/log/kube-audit/audit.log
    audit-policy-file: /etc/kubernetes/audit-policy.yaml
  extraVolumes:
    - name: audit-config
      hostPath: /etc/kubernetes/audit-policy.yaml
      mountPath: /etc/kubernetes/audit-policy.yaml
      readOnly: true
      pathType: File
    - name: audit-log
      hostPath: /var/log/kube-audit
      mountPath: /var/log/kube-audit
      pathType: DirectoryOrCreate
    - name: localtime
      hostPath: /usr/share/zoneinfo/Asia/Shanghai
      mountPath: /etc/localtime
      readOnly: true
      pathType: File
controllerManager:
  extraArgs:
    bind-address: 0.0.0.0
    node-cidr-mask-size: '24'
    node-monitor-grace-period: '20s'
    terminated-pod-gc-threshold: '30'
    cluster-signing-duration: 87600h
    feature-gates: RotateKubeletServerCertificate=true
  extraVolumes:
    - hostPath: /usr/share/zoneinfo/Asia/Shanghai
      mountPath: /etc/localtime
      name: localtime
      readOnly: true
      pathType: File
scheduler:
  extraArgs:
    bind-address: 0.0.0.0
  extraVolumes:
    - hostPath: /usr/share/zoneinfo/Asia/Shanghai
      mountPath: /etc/localtime
      name: localtime
      readOnly: true
      pathType: File
EOF
    "
    check::exit_code "$?" "kubeadm init" "${MGMT_NODE}: set kubeadmcfg.yaml" "exit"
    log::info "[kubeadm init]" "${MGMT_NODE}: kubeadm init start."
    command::exec "${MGMT_NODE}" "kubeadm init --config=/etc/kubernetes/kubeadmcfg.yaml --upload-certs"
    check::exit_code "$?" "kubeadm init" "${MGMT_NODE}: kubeadm init" "exit"
    sleep 3
    log::info "[kubeadm init]" "${MGMT_NODE}: set kube config."
    command::exec "${MGMT_NODE}" "
        mkdir -p \$HOME/.kube
        sudo cp -f /etc/kubernetes/admin.conf \$HOME/.kube/config
    "
    check::exit_code "$?" "kubeadm init" "${MGMT_NODE}: set kube config" "exit"
    if [[ "$(echo "$MASTER_NODES" | wc -w)" == "1" ]]; then
        log::info "[kubeadm init]" "${MGMT_NODE}: delete master taint"
        command::exec "${MGMT_NODE}" "kubectl taint nodes --all node-role.kubernetes.io/master- || kubectl taint nodes --all node-role.kubernetes.io/control-plane-"
        check::exit_code "$?" "kubeadm init" "${MGMT_NODE}: delete master taint"
    fi
    command::exec "${MGMT_NODE}" "
        kubectl create clusterrolebinding node-client-auto-approve-csr --clusterrole=system:certificates.k8s.io:certificatesigningrequests:nodeclient --user=kubelet-bootstrap
        kubectl create clusterrolebinding node-client-auto-renew-crt --clusterrole=system:certificates.k8s.io:certificatesigningrequests:selfnodeclient --group=system:nodes
        kubectl create clusterrolebinding node-server-auto-renew-crt --clusterrole=system:certificates.k8s.io:certificatesigningrequests:selfnodeserver --group=system:nodes
    "
    check::exit_code "$?" "kubeadm init" "Auto-Approve kubelet cert csr" "exit"
}

# 加入集群
function kubeadm::join() {
    log::info "[kubeadm join]" "master: get join token and cert info"
    command::exec "${MGMT_NODE}" "
        openssl x509 -pubkey -in /etc/kubernetes/pki/ca.crt | openssl rsa -pubin -outform der 2>/dev/null | openssl dgst -sha256 -hex | sed 's/^.* //'
    "
    get::command_output "CACRT_HASH" "$?" "exit"
    command::exec "${MGMT_NODE}" "
        kubeadm init phase upload-certs --upload-certs --config /etc/kubernetes/kubeadmcfg.yaml 2>> /dev/null | tail -1
    "
    get::command_output "INTI_CERTKEY" "$?" "exit"
    command::exec "${MGMT_NODE}" "
        kubeadm token create
    "
    get::command_output "INIT_TOKEN" "$?" "exit"
    command::exec "${MGMT_NODE}" "
        kubeadm config images list 2>/dev/null | awk -F: '/pause/ {print \$2}'
    "
    get::command_output "PAUSE_VERSION" "$?"
    kube::init_kubelet_node_registration
    # 配置 master 节点
    for host in $MASTER_NODES; do
        [[ "${MGMT_NODE}" == "$host" ]] && continue
        log::info "[kubeadm join]" "master $host join cluster."
        command::exec "${host}" "
            cat << EOF > /etc/kubernetes/kubeadmcfg.yaml
---
apiVersion: kubeadm.k8s.io/v1beta3
kind: JoinConfiguration
discovery:
  bootstrapToken:
    apiServerEndpoint: $KUBE_API_SERVER:6443
    caCertHashes:
      - sha256:${CACRT_HASH:-}
    token: ${INIT_TOKEN}
  timeout: 5m0s
controlPlane:
  certificateKey: ${INTI_CERTKEY:-}
${KUBELET_NODE_REGISTRATION}
EOF
            kubeadm join --config /etc/kubernetes/kubeadmcfg.yaml
        "
        check::exit_code "$?" "kubeadm join" "master $host join cluster"
        log::info "[kubeadm join]" "$host: set kube config."
        command::exec "${host}" "
            mkdir -p \$HOME/.kube
            sudo cp -f /etc/kubernetes/admin.conf \$HOME/.kube/config
        "
        check::exit_code "$?" "kubeadm join" "$host: set kube config" "exit"
        command::exec "${host}" "
            sed -i 's#.*$KUBE_API_SERVER#127.0.0.1 $KUBE_API_SERVER#g' /etc/hosts
        "
    done
    # 配置 worker 节点
    for host in $WORKER_NODES; do
    log::info "[kubeadm join]" "worker $host join cluster."
    command::exec "${host}" "
        mkdir -p /etc/kubernetes/manifests
        cat << EOF > /etc/kubernetes/kubeadmcfg.yaml
---
apiVersion: kubeadm.k8s.io/v1beta3
kind: JoinConfiguration
discovery:
  bootstrapToken:
    apiServerEndpoint: $KUBE_API_SERVER:6443
    caCertHashes:
      - sha256:${CACRT_HASH:-}
    token: ${INIT_TOKEN}
  timeout: 5m0s
${KUBELET_NODE_REGISTRATION}
EOF
        kubeadm join --config /etc/kubernetes/kubeadmcfg.yaml
    "
    check::exit_code "$?" "kubeadm join" "worker $host join cluster"

    log::info "[kubeadm join]" "set $host worker node role."
    command::exec "${MGMT_NODE}" "
        kubectl get node --selector='!node-role.kubernetes.io/master,!node-role.kubernetes.io/control-plane' | grep '<none>' | awk '{print \"kubectl label node \" \$1 \" node-role.kubernetes.io/worker= --overwrite\" }' | bash
    "
    check::exit_code "$?" "kubeadm join" "set $host worker node role"
    done
}

# 添加network组件
function add::network() {
    log::info "[network]" "add flannel"
    local flannel_file="${DOWNLOAD_DIR}/manifests/kube-flannel.yml"
    utils::download_file "${GITHUB_PROXY}https://raw.githubusercontent.com/flannel-io/flannel/v${FLANNEL_VERSION}/Documentation/kube-flannel.yml" "${flannel_file}"
    command::exec "${MGMT_NODE}" "
        sed -i -e 's#10.244.0.0/16#${KUBE_POD_SUBNET}#g' \
            -e 's#quay.io#${QUAY_IO_REPO}#g' \
            -e 's#docker.io#${DOCKER_IO_REPO}#g' \
            -e 's#namespace: kube-system#namespace: kube-flannel#g' \
            -e 's#\"Type\": \"vxlan\"#\"Type\": \"${KUBE_FLANNEL_TYPE}\"#g' \"${flannel_file}\"
        if [[ \"${KUBE_FLANNEL_TYPE}\" == \"vxlan\" ]]; then
            sed -i 's#\"Type\": \"vxlan\"#\"Type\": \"vxlan\", \"DirectRouting\": true#g' \"${flannel_file}\"
        fi
    "
    check::exit_code "$?" "flannel" "change flannel pod subnet"
    kube::apply "${flannel_file}"
    kube::wait "flannel" "kube-flannel" "pods" "app=flannel"
}

# 添加ingress组件
function add::ingress() {
    # 安装 ingress-nginx
    log::info "[ingress]" "add ingress-nginx"
    command::exec "${MGMT_NODE}" "
        $(declare -f utils::retry)
        helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx
        utils::retry 6 helm upgrade --install ingress-nginx ingress-nginx/ingress-nginx \
            --namespace ingress-nginx --create-namespace \
            --version ${INGRESS_VERSION} \
            --set controller.admissionWebhooks.patch.image.digest= \
            --set controller.admissionWebhooks.enabled=true \
            --set controller.admissionWebhooks.extraEnvs\[0\].name=\"TZ\" \
            --set controller.admissionWebhooks.extraEnvs\[0\].value=\"Asia/Shanghai\" \
            --set controller.kind=DaemonSet \
            --set controller.replicaCount=1 \
            --set controller.minAvailable=1 \
            --set controller.image.digest= \
            --set controller.ingressClassResource.name=nginx \
            --set controller.ingressClassResource.enable=true \
            --set controller.ingressClassResource.default=false \
            --set controller.service.enabled=true \
            --set controller.service.type=NodePort \
            --set controller.service.enableHttp=true \
            --set controller.service.enableHttps=true \
            --set controller.service.nodePorts.http=30080 \
            --set controller.service.nodePorts.https=30443 \
            --set controller.extraEnvs\[0\].name=\"TZ\" \
            --set controller.extraEnvs\[0\].value=\"Asia/Shanghai\" \
            --set defaultBackend.enabled=true \
            --set defaultBackend.name=defaultbackend \
            --set defaultBackend.replicaCount=1 \
            --set defaultBackend.minAvailable=1 \
            --set defaultBackend.extraEnvs\[0\].name=\"TZ\" \
            --set defaultBackend.extraEnvs\[0\].value=\"Asia/Shanghai\" \
            --set rbac.create=true \
            --set serviceAccount.create=true \
            --set podSecurityPolicy.enabled=true
        kubectl get pod -n ingress-nginx -o wide
        kubectl get svc -n ingress-nginx -o wide
    "
    kube::wait "ingress-nginx" "ingress-nginx" "pod" "app.kubernetes.io/name=ingress-nginx"
    # 安装 nginx
    for host in $MASTER_NODES $WORKER_NODES; do
        log::info "[nginx]" "install nginx on $host"
        command::exec "${host}" "
            sudo yum -y install nginx
            nginx -v
            sudo systemctl enable nginx
            sudo service nginx start
            cat << EOF > /etc/nginx/conf.d/k8s.ingress.conf
map \\\$http_upgrade \\\$connection_upgrade {
    default upgrade;
    '' close;
}

upstream k8s-ingress {
$(for h in $MASTER_NODES $WORKER_NODES;do echo "    server $h:30080 max_fails=1 fail_timeout=15s;";done)
    keepalive 128;
}

server {
    listen       ${NGINX_HTTP_PORT};

    location / {
        proxy_http_version 1.1;
        proxy_set_header Connection \"\";
        proxy_next_upstream error;
        proxy_set_header X-Real-IP \\\$remote_addr;
        proxy_set_header X-Forwarded-For \\\$proxy_add_x_forwarded_for;
        proxy_set_header Host \\\$http_host;
        proxy_set_header X-Nginx-Proxy true;
        proxy_pass http://k8s-ingress/;
        # websocket 支持
        proxy_set_header Upgrade \\\$http_upgrade;
        proxy_set_header Connection \\\$connection_upgrade;
    }
}
EOF
            sudo nginx -s reload
        "
    done
}

# 安装 rancher
function add::rancher() {
    log::info "[rancher]" "add rancher"
    command::exec "${MGMT_NODE}" "
        $(declare -f utils::retry)
        cd ${DOWNLOAD_DIR}
        helm repo add rancher-stable https://releases.rancher.com/server-charts/stable
        utils::retry 6 helm pull rancher-stable/rancher --version ${RANCHER_VERSION} --untar
        cat << EOF > rancher/templates/service.yaml
apiVersion: v1
kind: Service
metadata:
  name: {{ template \"rancher.fullname\" . }}
labels:
{{ include \"rancher.labels\" . | indent 4 }}
spec:
  type: NodePort
  ports:
    - port: 80
      targetPort: 80
      protocol: TCP
      name: http
      # 使用nodePort端口
      nodePort: 31080
    - port: 443
      targetPort: 444
      protocol: TCP
      name: https-internal
      # 使用nodePort端口
      nodePort: 31443
  selector:
    app: {{ template \"rancher.fullname\" . }}
EOF
        helm upgrade --install rancher ./rancher \
            --namespace cattle-system --create-namespace \
            --set replicas=1 \
            --set extraEnv\[0\].name=\"TZ\" \
            --set extraEnv\[0\].value=\"Asia/Shanghai\" \
            --set ingress.tls.source=secret \
            --set ingress.enabled=false
        kubectl get pod -n cattle-system -o wide
        kubectl get svc -n cattle-system -o wide
    "
    kube::wait "rancher" "cattle-system" "pod" "app=rancher"
    log::info "[rancher]" "获取初始密码 kubectl get secret --namespace cattle-system bootstrap-secret -o go-template='{{.data.bootstrapPassword|base64decode}}{{ \"\n\" }}'"
    log::info "[rancher]" "重置初始密码 kubectl -n cattle-system exec \$(kubectl -n cattle-system get pods -l app=rancher | grep '1/1' | head -1 | awk '{ print \$1 }') -- reset-password"
}

# 运维操作
function add::ops() {
    local master_num
    master_num=$(awk '{print NF}' <<< "${MASTER_NODES}")
    log::info "[ops]" "add anti-affinity strategy to coredns"
    command::exec "${MGMT_NODE}" """
        kubectl -n kube-system patch deployment coredns --patch '{\"spec\": {\"template\": {\"spec\": {\"affinity\":{\"podAntiAffinity\":{\"preferredDuringSchedulingIgnoredDuringExecution\":[{\"weight\":100,\"podAffinityTerm\":{\"labelSelector\":{\"matchExpressions\":[{\"key\":\"k8s-app\",\"operator\":\"In\",\"values\":[\"kube-dns\"]}]},\"topologyKey\":\"kubernetes.io/hostname\"}}]}}}}}}' --record
    """
    check::exit_code "$?" "ops" "add anti-affinity strategy to coredns"
    log::info "[ops]" "add etcd snapshot cronjob"
    command::exec "${MGMT_NODE}" "
        kubeadm config images list --config=/etc/kubernetes/kubeadmcfg.yaml 2>/dev/null | grep etcd:
    "
    get::command_output "etcd_image" "$?"
    command::exec "${MGMT_NODE}" "
        kubectl get node --selector='!node-role.kubernetes.io/worker' --no-headers | wc -l
    "
    get::command_output "master_num" "$?"
    [[ "${master_num:-0}" == "0" ]] && master_num=1
    kube::apply "etcd-snapshot" """
---
apiVersion: batch/v1
kind: CronJob
metadata:
  name: etcd-snapshot
  namespace: kube-system
spec:
  schedule: '0 */6 * * *'
  successfulJobsHistoryLimit: 3
  suspend: false
  concurrencyPolicy: Allow
  failedJobsHistoryLimit: 3
  jobTemplate:
    spec:
      backoffLimit: 6
      parallelism: ${master_num}
      completions: ${master_num}
      template:
        metadata:
          labels:
            app: etcd-snapshot
        spec:
          affinity:
            podAntiAffinity:
              requiredDuringSchedulingIgnoredDuringExecution:
                - labelSelector:
                    matchExpressions:
                      - key: app
                        operator: In
                        values:
                          - etcd-snapshot
                  topologyKey: 'kubernetes.io/hostname'
          containers:
            - name: etcd-snapshot
              image: ${etcd_image:-${REGISTRY_K8S_IO_REPO}/etcd:3.5.13-0}
              imagePullPolicy: IfNotPresent
              args:
                - -c
                - etcdctl --endpoints=https://127.0.0.1:2379 --cacert=/etc/kubernetes/pki/etcd/ca.crt
                  --cert=/etc/kubernetes/pki/etcd/healthcheck-client.crt
                  --key=/etc/kubernetes/pki/etcd/healthcheck-client.key snapshot save /backup/etcd-snapshot-\\\\\\\$(date +%Y-%m-%d_%H:%M:%S_%Z).db
                  && echo 'delete old backups' && { find /backup -type f -mtime +30 -exec rm -fv {} \\; || echo error; }
              command:
                - /usr/bin/bash
              env:
                - name: ETCDCTL_API
                  value: '3'
              resources: {}
              terminationMessagePath: /dev/termination-log
              terminationMessagePolicy: File
              volumeMounts:
                - name: etcd-certs
                  mountPath: /etc/kubernetes/pki/etcd
                  readOnly: true
                - name: backup
                  mountPath: /backup
                - name: etc
                  mountPath: /etc
                - name: bin
                  mountPath: /usr/bin
                - name: lib64
                  mountPath: /lib64
          dnsPolicy: ClusterFirst
          hostNetwork: true
          nodeSelector:
            node-role.kubernetes.io/control-plane: ''
          tolerations:
            - effect: NoSchedule
              operator: Exists
          restartPolicy: OnFailure
          schedulerName: default-scheduler
          securityContext: {}
          terminationGracePeriodSeconds: 30
          volumes:
            - name: etcd-certs
              hostPath:
                path: /etc/kubernetes/pki/etcd
                type: DirectoryOrCreate
            - name: backup
              hostPath:
                path: /var/lib/etcd/backups
                type: DirectoryOrCreate
            - name: etc
              hostPath:
                path: /etc
            - name: bin
              hostPath:
                path: /usr/bin
            - name: lib64
              hostPath:
                path: /lib64
    """
    # shellcheck disable=SC2181
    [[ "$?" == "0" ]] && log::access "[ops]" "etcd backup directory: /var/lib/etcd/backups"
    command::exec "${MGMT_NODE}" "
        jobname=\"etcd-snapshot-$(date +%s)\"
        kubectl create job --from=cronjob/etcd-snapshot \${jobname} -n kube-system && \
        kubectl wait --for=condition=complete job/\${jobname} -n kube-system
    "
    check::exit_code "$?" "ops" "trigger etcd backup"
}

# 集群状态
function kube::status() {
    sleep 5
    log::info "[cluster]" "cluster status"
    command::exec "${MGMT_NODE}" "
        echo
        kubectl get node -o wide
        echo
        kubectl get pods -A
    " && printf "%s" "${COMMAND_OUTPUT}"
}

# 重置节点
function reset::node() {
    local host=$1
    log::info "[reset]" "node $host"
    command::exec "${host}" "
        set +ex
        # 重置集群
        cri_socket=\"\"
        [ -S /var/run/crio/crio.sock ] && cri_socket=\"--cri-socket /var/run/crio/crio.sock\"
        [ -S /run/containerd/containerd.sock ] && cri_socket=\"--cri-socket /run/containerd/containerd.sock\"
        kubeadm reset -f \$cri_socket
        # 删除 k8s
        [ -f \"\$(which kubelet)\" ] && { systemctl stop kubelet; find /var/lib/kubelet | xargs -n 1 findmnt -n -o TARGET -T | sort | uniq | xargs -r umount -v; yum remove -y kubeadm kubelet kubectl; }
        [ -d /etc/kubernetes ] && rm -rf /etc/kubernetes/* /var/lib/kubelet/* /var/lib/etcd/* \$HOME/.kube /etc/cni/net.d/* /var/lib/dockershim/* /var/lib/cni/* /var/run/kubernetes/*
        # 删除 docker containerd 等软件包
        [ -f \"\$(which docker)\" ] && { docker rm -f -v \$(docker ps | grep kube | awk '{print \$1}'); systemctl stop docker; rm -rf \$HOME/.docker /etc/docker/* /var/lib/docker/*; yum remove -y docker; }
        [ -f \"\$(which containerd)\" ] && { crictl rm \$(crictl ps -a -q); systemctl stop containerd; rm -rf /etc/containerd/* /var/lib/containerd/*; yum remove -y containerd.io; }
        [ -f \"\$(which crio)\" ] && { crictl rm \$(crictl ps -a -q); systemctl stop crio; rm -rf /etc/crictl.yaml /etc/crio/* /var/run/crio/*; yum remove -y cri-o; }
        [ -f \"\$(which runc)\" ] && { find /run/containers/ /var/lib/containers/ | xargs -n 1 findmnt -n -o TARGET -T | sort | uniq | xargs -r umount -v; rm -rf /var/lib/containers/* /var/run/containers/*; yum remove -y runc; }
        [ -f \"\$(which haproxy)\" ] && { systemctl stop haproxy; rm -rf /etc/haproxy/* /etc/rsyslog.d/haproxy.conf; yum remove -y haproxy; }
        # 还原配置
        sed -i -e \"/$KUBE_API_SERVER/d\" -e '/-worker-/d' -e '/-master-/d' /etc/hosts
        sed -i '/## Kainstall managed start/,/## Kainstall managed end/d' /etc/security/limits.conf /etc/systemd/system.conf /etc/bashrc /etc/rc.local /etc/audit/rules.d/audit.rules
        # 删除数据文件
        # [ -d /var/lib/elasticsearch ] && rm -rf /var/lib/elasticsearch/*
        # [ -d /var/lib/longhorn ] &&  rm -rf /var/lib/longhorn/*
        [ -d \"${DOWNLOAD_DIR:-/tmp/abc}\" ] && rm -rf \"${DOWNLOAD_DIR:-/tmp/abc}\"
        # 删除软件源
        for repo in kubernetes.repo docker-ce.repo devel_kubic_libcontainers_stable.repo; do
            [ -f /etc/yum.repos.d/\${repo} ] && rm -f /etc/yum.repos.d/\${repo}
        done
        # 清理网络配置
        ipvsadm --clear
        iptables -F && iptables -t nat -F && iptables -t mangle -F && iptables -X
        for int in kube-ipvs0 cni0 docker0 dummy0 flannel.1 cilium_host cilium_net cilium_vxlan lxc_health nodelocaldns; do
            [ -d /sys/class/net/\${int} ] && ip link delete \${int}
        done
        modprobe -r ipip
        echo done.
    "
    check::exit_code "$?" "reset" "$host: reset"
}

# 更新 etcd 备份副本
function config::etcd_snapshot() {
    command::exec "${MGMT_NODE}" "
        count=\$(kubectl get node --selector='!node-role.kubernetes.io/worker' --no-headers | wc -l)
        kubectl -n kube-system patch cronjobs etcd-snapshot --patch \"
spec:
  jobTemplate:
    spec:
      completions: \${count:-1}
      parallelism: \${count:-1}
\"
    "
    check::exit_code "$?" "config" "etcd-snapshot completions options"
}

# 添加或删除haproxy的后端apiserver
function config::haproxy_backend() {
    local action=${1:-add}
    local action_cmd=""
    local master_nodes
    if [[ "$MASTER_NODES" == "" || "$MASTER_NODES" == "127.0.0.1" ]]; then
        return
    fi
    command::exec "${MGMT_NODE}" "
        kubectl get node --selector='!node-role.kubernetes.io/worker' -o jsonpath='{\$.items[*].status.addresses[?(@.type==\"InternalIP\")].address}'
    "
    get::command_output "master_nodes" "$?" "exit"
    for m in $MASTER_NODES; do
        if [[ "${action}" == "add" ]]; then
            num=$(echo "${m}"| awk -F'.' '{print $4}')
            action_cmd="${action_cmd}\necho \"    server apiserver${num} ${m}:6443 check\" >> /etc/haproxy/haproxy.cfg"
        else
            [[ "${master_nodes}" == *"${m}"* ]] || return
            action_cmd="${action_cmd}\n sed -i -e \"/${m}/d\" /etc/haproxy/haproxy.cfg"
        fi
    done
    command::exec "${MGMT_NODE}" "
        kubectl get node --selector='node-role.kubernetes.io/worker' -o jsonpath='{\$.items[*].status.addresses[?(@.type==\"InternalIP\")].address}'
    "
    get::command_output "worker_nodes" "$?"
    for host in ${worker_nodes:-}; do
        log::info "[config]" "worker ${host}: ${action} apiserver from haproxy"
        command::exec "${host}" "
            $(echo -ne "${action_cmd}")
            haproxy -c -f /etc/haproxy/haproxy.cfg && systemctl reload haproxy
        "
        check::exit_code "$?" "config" "worker ${host}: ${action} apiserver(${m}) from haproxy"
    done
}

# 初始化添加的节点
function init::add_node() {
    init::upgrade_kernel
    local master_index=0
    local worker_index=0
    local node_hosts=""
    local add_node_hosts=""
    command::exec "${MGMT_NODE}" "
        kubectl get node --selector='!node-role.kubernetes.io/worker' -o jsonpath='{range.items[*]}{.status.addresses[?(@.type==\"InternalIP\")].address } {end}' | awk '{print \$1}'
    "
    get::command_output "MGMT_NODE" "$?" "exit"
    # 获取现有集群节点主机名
    command::exec "${MGMT_NODE}" "
        kubectl get node -o jsonpath='{range.items[*]}{.status.addresses[?(@.type==\"InternalIP\")].address} {.metadata.name }\\n{end}'
    "
    get::command_output "node_hosts" "$?" "exit"
    for host in $MASTER_NODES $WORKER_NODES; do
        if [[ $node_hosts == *"$host"* ]]; then
            log::error "[init]" "The host $host is already in the cluster!"
            exit 1
        fi
    done
    if [[ "$MASTER_NODES" != "" ]]; then
        command::exec "${MGMT_NODE}" "
            kubectl get node --selector='!node-role.kubernetes.io/worker' -o jsonpath='{\$.items[*].metadata.name}' |grep -Eo 'node[0-9]*'|grep -Eo '[0-9]*'|awk -F ' ' 'BEGIN {max = 0} {if (\$0+0 > max+0) max=\$0} END {print max}'
        "
        get::command_output "master_index" "$?" "exit"
        master_index=$(( master_index + 1 ))
        local i=$master_index
        for host in $MASTER_NODES; do
            add_node_hosts="${add_node_hosts}\n${host:-} ${HOSTNAME_PREFIX}-master-node${i}"
            i=$((i + 1))
        done
    fi
    if [[ "$WORKER_NODES" != "" ]]; then
        command::exec "${MGMT_NODE}" "
            kubectl get node --selector='node-role.kubernetes.io/worker' -o jsonpath='{\$.items[*].metadata.name}'| grep -Eo 'node[0-9]*'|grep -Eo '[0-9]*'|awk 'BEGIN {max = 0} {if (\$0+0 > max+0) max=\$0} END {print max}' || echo 0
        "
        get::command_output "worker_index" "$?" "exit"
        worker_index=$(( worker_index + 1 ))
        local i=$worker_index
        for host in $WORKER_NODES; do
            add_node_hosts="${add_node_hosts}\n${host:-} ${HOSTNAME_PREFIX}-worker-node${i}"
            i=$((i + 1))
        done
    fi
    #向集群节点添加新增的节点主机名解析
    for host in $(echo -ne "$node_hosts" | awk '{print $1}'); do
        command::exec "${host}" "
        printf \"$add_node_hosts\" >> /etc/hosts
        "
        check::exit_code "$?" "init" "$host add new node hostname resolution"
    done
    node_hosts="${node_hosts}\n${add_node_hosts}"
    init::node_config
}








# 检查api-server连通性
function check::api_server_conn() {
    log::warning "[TODO]" "未实现"
}

# 证书续期
function cert::renew() {
    log::warning "[TODO]" "未实现"
}

# 节点证书续期
function cert::renew_node() {
    log::warning "[TODO]" "未实现"
}

# 节点软件升级
function script::upgrade_kube() {
    log::warning "[TODO]" "未实现"
}

# 获取ingress连接地址
function get::ingress_conn() {
    log::warning "[TODO]" "未实现"
}

######################################################################################################
# 主调用逻辑
######################################################################################################

# 初始化集群
function init::cluster() {
    # 创建安装步骤执行标记文件
    [ ! -f "${SETUP_STATE_FILE}" ] && echo "" > $SETUP_STATE_FILE
    log::info "[setup]" "setup file: $SETUP_STATE_FILE"
    # 选择执行节点
    MGMT_NODE=$(echo "${MASTER_NODES}" | awk '{print $1}')
    # 1. 初始化节点
    if grep -q "init_node" $SETUP_STATE_FILE; then
        log::info "[skip]" "跳过已完成步骤 init::node"
    else
        HAS_ERROR="0"
        init::node
        [ "${HAS_ERROR}" == "0" ] && echo "init_node" >> $SETUP_STATE_FILE
    fi
    # 2. 安装包
    if grep -q "install_package" $SETUP_STATE_FILE; then
        log::info "[skip]" "跳过已完成步骤 install::package"
    else
        HAS_ERROR="0"
        install::package
        [ "${HAS_ERROR}" == "0" ] && echo "install_package" >> $SETUP_STATE_FILE
    fi
    # 3. 初始化kubeadm
    if grep -q "kubeadm_init" $SETUP_STATE_FILE; then
        log::info "[skip]" "跳过已完成步骤 kubeadm::init"
    else
        HAS_ERROR="0"
        kubeadm::init
        [ "${HAS_ERROR}" == "0" ] && echo "kubeadm_init" >> $SETUP_STATE_FILE
    fi
    # 4. 加入集群
    if grep -q "kubeadm_join" $SETUP_STATE_FILE; then
        log::info "[skip]" "跳过已完成步骤 kubeadm::join"
    else
        HAS_ERROR="0"
        kubeadm::join
        [ "${HAS_ERROR}" == "0" ] && echo "kubeadm_join" >> $SETUP_STATE_FILE
    fi
    # 5. 添加network
    if grep -q "add_network" $SETUP_STATE_FILE; then
        log::info "[skip]" "跳过已完成步骤 add::network"
    else
        HAS_ERROR="0"
        add::network
        [ "${HAS_ERROR}" == "0" ] && echo "add_network" >> $SETUP_STATE_FILE
    fi
    # 6. 添加ingress
    if grep -q "add_ingress" $SETUP_STATE_FILE; then
        log::info "[skip]" "跳过已完成步骤 add::ingress"
    else
        HAS_ERROR="0"
        add::ingress
        [ "${HAS_ERROR}" == "0" ] && echo "add_ingress" >> $SETUP_STATE_FILE
    fi
    # 7. 添加rancher
    if grep -q "add_rancher" $SETUP_STATE_FILE; then
        log::info "[skip]" "跳过已完成步骤 add::rancher"
    else
        HAS_ERROR="0"
        add::rancher
        [ "${HAS_ERROR}" == "0" ] && echo "add_rancher" >> $SETUP_STATE_FILE
    fi
    # 8. 运维操作
    if grep -q "add_ops" $SETUP_STATE_FILE; then
        log::info "[skip]" "跳过已完成步骤 add::ops"
    else
        HAS_ERROR="0"
        add::ops
        [ "${HAS_ERROR}" == "0" ] && echo "add_ops" >> $SETUP_STATE_FILE
    fi
    # 9. 查看集群状态
    kube::status
}

# 重新初始化集群
function reinit:cluster() {
    # 重置集群
    local all_node=""
    command::exec "${MGMT_NODE}" "
        kubectl get node -o jsonpath='{range.items[*]}{.status.addresses[?(@.type==\"InternalIP\")].address} {end}'
    "
    get::command_output "all_node" "$?"
    all_node=$(echo "${WORKER_NODES} ${MASTER_NODES} ${all_node}" | awk '{for (i=1;i<=NF;i++) if (!a[$i]++) printf("%s%s",$i,FS)}')
    for host in $all_node; do
        log::info "[reset]" "node $host"
        command::exec "${host}" "
            set +ex
            kubeadm reset -f
        "
        check::exit_code "$?" "reset" "$host: reset"
    done
    # 初始化集群
    kubeadm::init
    # 加入集群
    kubeadm::join
    # 添加network
    add::network
    # 添加ingress
    add::ingress
    # 添加rancher
    add::rancher
    # 更新 etcd snapshot 副本
    config::etcd_snapshot
    # 查看集群状态
    kube::status
}

# 重置所有节点(未验证)
function reset::cluster() {
    echo "" > $SETUP_STATE_FILE
    local all_node=""
    command::exec "${MGMT_NODE}" "
        kubectl get node -o jsonpath='{range.items[*]}{.status.addresses[?(@.type==\"InternalIP\")].address} {end}'
    "
    get::command_output "all_node" "$?"
    all_node=$(echo "${WORKER_NODES} ${MASTER_NODES} ${all_node}" | awk '{for (i=1;i<=NF;i++) if (!a[$i]++) printf("%s%s",$i,FS)}')
    for host in $all_node; do
        reset::node "$host"
    done
}

# 删除节点(未验证)
function del::node() {
    config::haproxy_backend "remove"
    local cluster_nodes=""
    local del_hosts_cmd=""
    command::exec "${MGMT_NODE}" "
        kubectl get node -o jsonpath='{range.items[*]}{.status.addresses[?(@.type==\"InternalIP\")].address} {.metadata.name }\\n{end}'
    "
    get::command_output "cluster_nodes" "$?" exit
    for host in $MASTER_NODES; do
        command::exec "${MGMT_NODE}" "
            etcd_pod=\$(kubectl -n kube-system get pods -l component=etcd --field-selector=status.phase=Running -o jsonpath='{\$.items[0].metadata.name}')
            etcd_node=\$(kubectl -n kube-system exec \$etcd_pod -- sh -c \"export ETCDCTL_API=3 ETCDCTL_CACERT=/etc/kubernetes/pki/etcd/ca.crt ETCDCTL_CERT=/etc/kubernetes/pki/etcd/server.crt ETCDCTL_KEY=/etc/kubernetes/pki/etcd/server.key ETCDCTL_ENDPOINTS=https://127.0.0.1:2379; etcdctl member list\"| grep $host | awk -F, '{print \$1}')
            echo \"\$etcd_pod \$etcd_node\"
            kubectl -n kube-system exec \$etcd_pod -- sh -c \"export ETCDCTL_API=3 ETCDCTL_CACERT=/etc/kubernetes/pki/etcd/ca.crt ETCDCTL_CERT=/etc/kubernetes/pki/etcd/server.crt ETCDCTL_KEY=/etc/kubernetes/pki/etcd/server.key ETCDCTL_ENDPOINTS=https://127.0.0.1:2379; etcdctl member remove \$etcd_node; etcdctl member list\"
        "
        check::exit_code "$?" "del" "remove $host etcd member"
    done
    for host in $MASTER_NODES $WORKER_NODES; do
        log::info "[del]" "node $host"
        local node_name; node_name=$(echo -ne "${cluster_nodes}" | grep "${host}" | awk '{print $2}')
        if [[ "${node_name}" == "" ]]; then
            log::warning "[del]" "node $host not found."
            read -r -t 10 -n 1 -p "Do you need to reset the node (y/n)? " answer
            [[ -z "$answer" || "$answer" != "y" ]] && exit || echo
        else
            log::info "[del]" "drain $host"
            command::exec "${MGMT_NODE}" "kubectl drain $node_name --force --ignore-daemonsets --delete-local-data"
            check::exit_code "$?" "del" "$host: drain"
            log::info "[del]" "delete node $host"
            command::exec "${MGMT_NODE}" "kubectl delete node $node_name"
            check::exit_code "$?" "del" "$host: delete"
            sleep 3
        fi
        reset::node "$host"
        del_hosts_cmd="${del_hosts_cmd}\nsed -i "/$host/d" /etc/hosts"
    done
    for host in $(echo -ne "${cluster_nodes}" | awk '{print $1}'); do
        log::info "[del]" "$host: remove del node hostname resolution"
        command::exec "${host}" "
            $(echo -ne "${del_hosts_cmd}")
        "
        check::exit_code "$?" "del" "remove del node hostname resolution"
    done
    [ "$MASTER_NODES" != "" ] && config::etcd_snapshot
    kube::status
}

# 添加节点(未验证)
function add::node() {
    # KUBE_VERSION未指定时，获取集群的版本
    if [[ "${KUBE_VERSION}" == "" || "${KUBE_VERSION}" == "latest" ]]; then
        command::exec "${MGMT_NODE}" "
            kubectl get node --selector='!node-role.kubernetes.io/worker' -o jsonpath='{range.items[*]}{.status.nodeInfo.kubeletVersion } {end}' | awk -F'v| ' '{print \$2}'
        "
        get::command_output "KUBE_VERSION" "$?" "exit"
    fi
    # 1. 初始化节点
    init::add_node
    # 2. 安装包
    install::package
    # 3. 加入集群
    kubeadm::join
    # 4. haproxy添加apiserver
    config::haproxy_backend "add"
    # 5. 更新 etcd snapshot 副本
    config::etcd_snapshot
    # 6. 查看集群状态
    kube::status
}

######################################################################################################
# main-入口
######################################################################################################

# 使用帮助
function help::usage() {
    cat << EOF
使用kubeadm安装kubernetes集群。

Usage:
  $(basename "$SCRIPT_FILE_NAME") [command]

Available Commands:
  init      初始化Kubernetes集群
  reinit    重新初始化集群
  reset     重置集群(删除k8s)
  add       将节点添加到群集中
  del       从群集中删除节点

Flag:
  -m,--master          master节点(逗号分隔), 默认: ''
  -w,--worker          work节点(逗号分隔), 默认: ''
  -u,--user            ssh用户, 默认: ${SSH_USER}
  -p,--password        ssh密码
     --private-key     ssh私钥
  -P,--port            ssh端口, 默认: ${SSH_PORT}
  -v,--version         kube版本, 默认: ${KUBE_VERSION}
  -U,--upgrade-kernel  升级内核
     --mgmt-node-ip    apiserver.cluster.local直接使用第一个master节点ip
     --10years         证书期限为10年
     --sudo            sudo模式
     --sudo-user       sudo用户
     --sudo-password   sudo用户密码

Example:
  [init cluster]
  $SCRIPT_FILE_NAME init \\
  --master 192.168.77.130,192.168.77.131,192.168.77.132 \\
  --worker 192.168.77.133,192.168.77.134,192.168.77.135 \\
  --user root \\
  --password 123456 \\
  --version 1.20.4

EOF
    exit 1
}

# 打印日志文件
echo -e "\033[36m tail -F $LOG_FILE -n 100 \033[0m"
# 解析命令行参数
[ "$#" == "0" ] && help::usage
while [ "${1:-}" != "" ]; do
    case $1 in
        # -------------------------------------------------------------- 指令
        init  )                 INIT_TAG=1
                                ;;
        reinit )                REINIT_TAG=1
                                ;;
        reset )                 RESET_TAG=1
                                ;;
        add )                   ADD_TAG=1
                                ;;
        del )                   DEL_TAG=1
                                ;;
        # -------------------------------------------------------------- 指令参数
        -m | --master )         shift
                                MASTER_NODES=${1:-$MASTER_NODES}
                                ;;
        -w | --worker )         shift
                                WORKER_NODES=${1:-$WORKER_NODES}
                                ;;
        -P | --port )           shift
                                SSH_PORT=${1:-$SSH_PORT}
                                ;;
        -u | --user )           shift
                                SSH_USER=${1:-$SSH_USER}
                                ;;
        -p | --password )       shift
                                SSH_PASSWORD=${1:-$SSH_PASSWORD}
                                ;;
        --private-key )         shift
                                SSH_PRIVATE_KEY=${1:-$SSH_SSH_PRIVATE_KEY}
                                ;;
        -v | --version )        shift
                                KUBE_VERSION=${1:-$KUBE_VERSION}
                                ;;
        -U | --upgrade-kernel ) UPGRADE_KERNEL_TAG=1
                                ;;
        --mgmt-node-ip )        MGMT_NODE_IP="first_master_ip"
                                ;;
        --10years )             CERT_YEAR_TAG=1
                                ;;
        --sudo )                SUDO_TAG=1
                                ;;
        --sudo-user )           shift
                                SUDO_USER=${1:-$SUDO_USER}
                                ;;
        --sudo-password )       shift
                                SUDO_PASSWORD=${1:-}
                                ;;
        * )                     help::usage
                                exit 1
    esac
    shift
done

# 开始
log::info "[start]" "bash $SCRIPT_FILE_NAME ${SCRIPT_PARAMETER//${SSH_PASSWORD:-${SUDO_PASSWORD:-}}/zzzzzz}"
# 数据处理
transform::data
# 预检
check::preflight
# 动作
if [[ "${INIT_TAG:-}" == "1" ]]; then
    [[ "$MASTER_NODES" == "" ]] && MASTER_NODES="127.0.0.1"
    init::cluster
elif [[ "${REINIT_TAG:-}" == "1" ]]; then
    reinit:cluster
elif [[ "${RESET_TAG:-}" == "1" ]]; then
    reset::cluster
elif [[ "${ADD_TAG:-}" == "1" ]]; then
    if [[ "$MASTER_NODES" != "" || "$WORKER_NODES" != "" ]]; then
        add::node
    else
        help::usage
    fi
elif [[ "${DEL_TAG:-}" == "1" ]]; then
    if [[ "$MASTER_NODES" != "" || "$WORKER_NODES" != "" ]]; then
        del::node
    else
        help::usage
    fi
else
    help::usage
fi

# bash <(curl -s https://gitee.com/LiZhiW/helm-chart/raw/master/01shell/k8s-install-rocky9.sh) [cmd]
