#!/bin/bash
# https://github.com/kubernetes/kubernetes/releases/tag/v1.30.5
# https://github.com/containerd/containerd/releases/download/v1.7.22/containerd-1.7.22-linux-amd64.tar.gz
# https://github.com/opencontainers/runc/releases/download/v1.1.14/runc.amd64
# https://github.com/containernetworking/plugins/releases/download/v1.5.1/cni-plugins-linux-amd64-v1.5.1.tgz
# https://github.com/containerd/nerdctl/releases/download/v1.7.7/nerdctl-1.7.7-linux-amd64.tar.gz
# https://github.com/kubernetes-sigs/cri-tools/releases/download/v1.31.1/crictl-v1.31.1-linux-amd64.tar.gz
# https://github.com/moby/buildkit/releases/download/v0.16.0/buildkit-v0.16.0.linux-amd64.tar.gz
# https://get.helm.sh/helm-v3.16.1-linux-amd64.tar.gz
set -euo pipefail

#****** flag vars **********#
# true or false
KUBEADM_INSTALL_FLAG=true 
KUBE_INIT_FLAG=true
CONTAINERD_SET_PRIV_REGISTRY_FLAG=true


#****** package vars ******#
CONTAINERD_VERSION="1.7.22"
CNI_VERSION="1.5.1"
RUNC_VERSION="1.1.14"
CRICTL_VERSION="1.31.1"
NERDCTL_VERSION="1.7.7"
BUILDKIT_VERSION="0.16.0"
HELM_VERSION="3.16.1"
CONTAINERD_PACK="containerd-${CONTAINERD_VERSION}-linux-amd64.tar.gz"
CNI_PACK="cni-plugins-linux-amd64-v${CNI_VERSION}.tgz"
RUNC_PACK="runc.amd64-v${RUNC_VERSION}"
CRICTL_PACK="crictl-v${CRICTL_VERSION}-linux-amd64.tar.gz"
NERDCTL_PACK="nerdctl-${NERDCTL_VERSION}-linux-amd64.tar.gz"
BUILDKIT_PACK="buildkit-v${BUILDKIT_VERSION}.linux-amd64.tar.gz"
HELM_PACK="helm-v${HELM_VERSION}-linux-amd64.tar.gz"


#****** proxy and priv registry vars and manual set registry ******#
GLOBAL_HTTP_PROXY="http://192.168.50.5:7890"
GLOBAL_HTTPS_PROXY="http://192.168.50.5:7890"
CONTAINERD_NO_PROXY="127.0.0.0/8,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16"

CONTAINERD_PRIV_REGISTRY="192.168.50.8"
CONTAINERD_PRIV_REGISTRY_URL="https://192.168.50.8:443"
# defalut DOCKER_REGISTRY_URL="https://docker.io"
DOCKER_REGISTRY_URL="https://192.168.50.8:443"

PRIV_REGISTRY_CAFILE="cacert.pem"

cat > $PRIV_REGISTRY_CAFILE <<__EOF__
-----BEGIN CERTIFICATE-----
MIIDxTCCAq2gAwIBAgIUcxAnanCSxmyTYSaMgVTFg7t1ckAwDQYJKoZIhvcNAQEL
BQAwajELMAkGA1UEBhMCQ04xEDAOBgNVBAgMB0JlaWppbmcxEDAOBgNVBAcMB0Jl
aWppbmcxDDAKBgNVBAoMA3hpZTEMMAoGA1UECwwDeGllMRswGQYDVQQDDBJjYS5o
b21lLnhpZXppcWlhbmcwHhcNMjQwODI5MjI0MzA1WhcNMzQwODI3MjI0MzA1WjBq
MQswCQYDVQQGEwJDTjEQMA4GA1UECAwHQmVpamluZzEQMA4GA1UEBwwHQmVpamlu
ZzEMMAoGA1UECgwDeGllMQwwCgYDVQQLDAN4aWUxGzAZBgNVBAMMEmNhLmhvbWUu
eGllemlxaWFuZzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAPSsltOf
CzN6qoipib3cWYwV9th2pHhwo+OllIyWO9I8SyHYK8ysOr5UplJLx+5mVZMkllJv
rKgoTnjsO+UWTIXDHthC1DXHmFNnD3Fcsd/idBarKuWL4UYANEnXYuZ/t0ywvaO1
ZhG5VG7rgz1RxogoR0H+vYw2Lsh4NhENC/Z56wJxuEx/0Skyfp+NGT9t/1vVAUFr
DBj/uLd88qvNJ9pDi0ArySEsLwIpv+totKkGZrKyuff1G9UjeYoiroezxuUJ+/qs
Jb0Pj9ZaCyMJiLyWPoLammpxXGwYJvlN9h3MTSdwigjvwpkba1YBu4i0sXIJSAyn
hF5DSoj/YJ0QdTUCAwEAAaNjMGEwHQYDVR0OBBYEFO+ttv3FUpyYYWD8g+9n4fL6
GyhRMB8GA1UdIwQYMBaAFO+ttv3FUpyYYWD8g+9n4fL6GyhRMA8GA1UdEwEB/wQF
MAMBAf8wDgYDVR0PAQH/BAQDAgGGMA0GCSqGSIb3DQEBCwUAA4IBAQCAbAXXJEkA
LHCNbObiPUXKPQS87wr07K1py93au2UPLzm8Hm4XrOH69b7Y11uSFP7/AmmAwy9a
dEH7B0zi5z1y+pQ5rg9NreXCojYxUaJByPn6SVe/sns1oBGCeH4SFloCiynQ/MqP
WbmKxYIkKk+0W7gB4CcrGisBApY74u4f3YR0EIeEd4veDrwOMkjIOPiV+F9F0+JY
D6UZOj6iizegGu/0jfMNi76iEzJFqTder2jSDrSJ+ViUxdJpEqljp9pQIezqWv97
q54QdvECNrcFgWHvv6YEAv2Sd3W2U6SFUzB4JJiPsfriiFd+QNu0D2hbFDC819Ua
E48OL4jE9DBe
-----END CERTIFICATE-----
__EOF__

# importantly !
######### if CONTAINERD_SET_PRIV_REGISTRY_FLAG=false,change those vars.
#   KUBE_INIT_REPOSITORY="registry.k8s.io"
#   KUBE_INIT_REPOSITORY="registry.aliyuncs.com/google_containers"
KUBE_INIT_REPOSITORY="${CONTAINERD_PRIV_REGISTRY}/k8s"
#   SANDBOX_IMAGE="registry.k8s.io/pause:3.9"
SANDBOX_IMAGE="${CONTAINERD_PRIV_REGISTRY}/k8s/pause:3.9"
# use in install CNI plugins.
#   DOCKER_SUB_PROJECT_REGISTRY="docker.io"
DOCKER_SUB_PROJECT_REGISTRY="docker.io/library"


#****** k8s  vars ******#
KUBE_VERSION="1.30.5"  # need confirm it is highest sub version. like v1.30.5 is v1.30.x highest sub version.
KUBE_CLUSTER_HOSTNAME_ARRAY=("k8s-master001.home.xieziqiang" "k8s-master002.home.xieziqiang" "k8s-master003.home.xieziqiang" "k8s-worker001.home.xieziqiang" "k8s-worker002.home.xieziqiang" "k8s-worker003.home.xieziqiang")              
# musk match HOSTNAME_ARRAY and IP_ARRAY one by one
KUBE_CLUSTER_IP_ARRAY=("10.1.1.60" "10.1.1.61" "10.1.1.62" "10.1.1.70" "10.1.1.71" "10.1.1.72")
KUBE_API_IP="10.1.1.60"
KUBE_API_DNS_DOMAIN="k8s-apiserver.home.xieziqiang"
KUBE_API_DNS_DOMAIN_PORT="6443"
KUBE_CURRENT_NODE_IP="$(hostname -I | awk '{print $1}')"

# k8s init vars
KUBE_ETCD_DATA_PATH="/var/lib/etcd"
KUBE_TOKEN="xieziq.9emxoyc82lo3ltf6"
KUBE_NET_PLUGIN_TYPE="flannel"   # item: flannel,calico,cilium
KUBE_SERVICE_NET="10.96.0.0/12"
KUBE_PODS_NET="10.128.0.0/9"
KUBE_PODS_SUBNET_LEN="24"
CLUSTER_NAME="xie-kubernetes"
CLUSTER_DNS_DOMAIN="cluster.local"

#****** containerd directory vars ******#
# containerd vars
CONTAINERD_INSTALL_PATH="/usr/local/containerd"
CONTAINERD_CONFIG_PATH="/usr/local/containerd/conf"
CONTAINERD_DATA_PATH="/data/containerd"
CONTAINERD_STATE_PATH="/run/containerd"       # socket file also in here
# runc vars
RUNC_INSTALL_BIN_PATH="/usr/local/containerd/bin"
# crictl vars
CRICTL_INSTALL_BIN_PATH="/usr/local/bin"      # suggest don't change it
CRICTL_CONF="/etc/crictl.yaml"                # suggest don't change it

# nerdctl vars
NERDCTL_INSTALL_BIN_PATH="/usr/local/bin"     # suggest don't change it

# buildkit vars
BUILDKIT_INSTALL_PATH="/usr/local/buildkit"
BUILDKIT_DATA_PATH="/data/buildkit"
BUILDKIT_STATE_PATH="/run/buildkit"                        # socket file localtion

# helm vars
HELM_INSTALL_BIN_PATH="/usr/local/bin"

# cni plugins var
CNI_BIN_PATH="/usr/local/containerd/cni/bin"
CNI_CONF_PATH="/usr/local/containerd/cni/net.d"


#****** functions ******#
color() {
    local message="$1"
    local status="$2"
    local terminal_width
    terminal_width=$(tput cols)
    local middle_column=$(( terminal_width / 2))
    local SETCOLOR_SUCCESS="\033[1;32m"  # 绿色
    local SETCOLOR_FAILURE="\033[1;31m"  # 红色
    local SETCOLOR_WARNING="\033[1;33m"  # 黄色
    local SETCOLOR_NORMAL="\033[0m"      # 重置颜色
    printf '%s\n\033[%sG' "${message}"  "${middle_column}"
    # Print the status message
    printf "["
    case ${status} in
        success|0)
            echo -en  "${SETCOLOR_SUCCESS}  OK  "
            ;;
        failure|1)
            echo -en  "${SETCOLOR_FAILURE}FAILED"
            ;;
        *)
            echo -en "${SETCOLOR_WARNING}WARNING"
            ;;
    esac
    echo -e "${SETCOLOR_NORMAL}]"
}

judge_true_false () { # $1 is var, $2 is function
    if ! [[ $1 =~ ^true|false$ ]];then
        color "$1 value is not true or false,check your vars set." 1
        exit
    elif [ "$1" = true ];then
        $2
    fi
}

create_dir () {
    local i
    for i in "$@";do
        if [ ! -d "$i" ];then
            mkdir -pv "$i" && color "$i dir is created" 0
        else
            color "$i dir is existed." 2
        fi
    done
}

judge_current_user () {
    if [ "$(id -u)" = "0" ];then
        color "current user is administrator" 0
    else
        color "current user is not administrator" 1
        exit
    fi
}

judge_file_exist () { 
    local i=0
    for i in "$@";do
        if [ -e "$i" ];then
            color "current dir $i exist." 0
        else
            color "current dir don't exist $i" 1
            exit
        fi
    done
}

ip_to_hostname () { # $1 is ip
    local __TEMP_RETURN_FLAGS
    __TEMP_RETURN_FLAGS="error_ip_nomatch_arr"
    for((i=0;i<${#KUBE_CLUSTER_IP_ARRAY[@]};i++));do
        if ! [[ ${KUBE_CLUSTER_IP_ARRAY[i]} =~ ^([0-9]{1,3}\.){3}[0-9]{1,3}$ ]];then
            __TEMP_RETURN_FLAGS="error_ip"
            break
        fi
        if [ "${KUBE_CLUSTER_IP_ARRAY[i]}" = "$1" ];then
            __TEMP_RETURN_FLAGS="${KUBE_CLUSTER_HOSTNAME_ARRAY[i]}"
            break
        fi
    done
    echo "$__TEMP_RETURN_FLAGS"
}

package_download () {
echo "begin download packages..."
[ -e "$CONTAINERD_PACK" ] || curl -x "$GLOBAL_HTTPS_PROXY" -fsSL https://github.com/containerd/containerd/releases/download/v${CONTAINERD_VERSION}/containerd-${CONTAINERD_VERSION}-linux-amd64.tar.gz -o ${CONTAINERD_PACK}
[ -e "$RUNC_PACK" ] || curl -x "$GLOBAL_HTTPS_PROXY" -fsSL https://github.com/opencontainers/runc/releases/download/v${RUNC_VERSION}/runc.amd64 -o ${RUNC_PACK}
[ -e "$CNI_PACK" ] || curl -x "$GLOBAL_HTTPS_PROXY" -fsSL https://github.com/containernetworking/plugins/releases/download/v${CNI_VERSION}/cni-plugins-linux-amd64-v${CNI_VERSION}.tgz -o ${CNI_PACK}
[ -e "$NERDCTL_PACK" ] || curl -x "$GLOBAL_HTTPS_PROXY" -fsSL https://github.com/containerd/nerdctl/releases/download/v${NERDCTL_VERSION}/nerdctl-${NERDCTL_VERSION}-linux-amd64.tar.gz -o ${NERDCTL_PACK}
[ -e "$CRICTL_PACK" ] || curl -x "$GLOBAL_HTTPS_PROXY" -fsSL https://github.com/kubernetes-sigs/cri-tools/releases/download/v${CRICTL_VERSION}/crictl-v${CRICTL_VERSION}-linux-amd64.tar.gz -o ${CRICTL_PACK}
[ -e "$BUILDKIT_PACK" ] || curl -x "$GLOBAL_HTTPS_PROXY" -fsSL https://github.com/moby/buildkit/releases/download/v${BUILDKIT_VERSION}/buildkit-v${BUILDKIT_VERSION}.linux-amd64.tar.gz -o ${BUILDKIT_PACK}
[ -e "$HELM_PACK" ] || curl -x "$GLOBAL_HTTPS_PROXY" -fsSL https://get.helm.sh/helm-v${HELM_VERSION}-linux-amd64.tar.gz -o ${HELM_PACK}
color "packages download complete." 0
}

config_containerd_priv_registry () { # $1 is priv_registry,$2 is priv_registry url,$3 is containerd priv conf path.
   judge_file_exist $PRIV_REGISTRY_CAFILE 
   create_dir "$CONTAINERD_CONFIG_PATH/certs.d/$CONTAINERD_PRIV_REGISTRY" "$CONTAINERD_CONFIG_PATH/certs.d/certs" "$CONTAINERD_CONFIG_PATH/certs.d/docker.io" "$CONTAINERD_CONFIG_PATH/certs.d/quay.io" 
   \cp $PRIV_REGISTRY_CAFILE "$CONTAINERD_CONFIG_PATH/certs.d/certs/"
   cat > "$CONTAINERD_CONFIG_PATH/certs.d/$CONTAINERD_PRIV_REGISTRY/hosts.toml" <<__EOF__
server = "$CONTAINERD_PRIV_REGISTRY_URL"

[host."$CONTAINERD_PRIV_REGISTRY_URL"]
  capabilities = ["pull", "resolve", "push"]
    ca = "$CONTAINERD_CONFIG_PATH/certs.d/certs/$PRIV_REGISTRY_CAFILE"
    skip_verify = false
__EOF__

    # set docker registry 
    cat > $CONTAINERD_CONFIG_PATH/certs.d/docker.io/hosts.toml <<__EOF__
server = "https://docker.io"

[host."${DOCKER_REGISTRY_URL}"]
  capabilities = ["pull", "resolve", "push"]
    ca = "$CONTAINERD_CONFIG_PATH/certs.d/certs/$PRIV_REGISTRY_CAFILE"
    skip_verify = false
__EOF__
    # set quay.io registry
    cat > $CONTAINERD_CONFIG_PATH/certs.d/quay.io/hosts.toml <<__EOF__
server = "https://quay.io"

[host."${DOCKER_REGISTRY_URL}"]
  capabilities = ["pull", "resolve", "push"]
    ca = "$CONTAINERD_CONFIG_PATH/certs.d/certs/$PRIV_REGISTRY_CAFILE"
    skip_verify = false
__EOF__
}

install_containerd_and_runc () {
    create_dir "$CONTAINERD_INSTALL_PATH" "$CONTAINERD_DATA_PATH" "$CONTAINERD_STATE_PATH" "$RUNC_INSTALL_BIN_PATH" "$CONTAINERD_CONFIG_PATH"
    tar -xvf "$CONTAINERD_PACK" -C "$CONTAINERD_INSTALL_PATH"
    \cp $RUNC_PACK $RUNC_INSTALL_BIN_PATH/runc
    # generate containerd config file
    $CONTAINERD_INSTALL_PATH/bin/containerd config default > $CONTAINERD_CONFIG_PATH/config.toml
    # change data dir,state dir,sock file,runc path,registory conf,sandbox_image
    sed -i -e "/^root = /c root = \"$CONTAINERD_DATA_PATH\"" \
           -e "/^state = /c state = \"$CONTAINERD_STATE_PATH\"" \
           -e "/\[grpc\]/,+1s@address = \".*\"@address = \"$CONTAINERD_STATE_PATH/containerd.sock\"@" \
           -e "/containerd.runtimes.runc.options/,+1s@BinaryName = \".*\"@BinaryName = \"$RUNC_INSTALL_BIN_PATH/runc\"@" \
           -e "/\[plugins.\"io.containerd.grpc.v1.cri\".registry\]/,+1s@config_path = \".*\"@config_path = \"${CONTAINERD_CONFIG_PATH}/certs.d\"@" \
           -e "/sandbox_image/s@sandbox_image = \".*\"@sandbox_image = \"$SANDBOX_IMAGE\"@" \
           $CONTAINERD_CONFIG_PATH/config.toml
    # user SystemdCgroup
    sed -i "/SystemdCgroup/s/false/true/" $CONTAINERD_CONFIG_PATH/config.toml

    # set priv registry
    judge_true_false $CONTAINERD_SET_PRIV_REGISTRY_FLAG  config_containerd_priv_registry
    
    chown -R 0:0 $CONTAINERD_INSTALL_PATH $RUNC_INSTALL_BIN_PATH $CONTAINERD_CONFIG_PATH
    chmod 755 $CONTAINERD_INSTALL_PATH/bin/* $RUNC_INSTALL_BIN_PATH/runc
    color "containerd and runc install success." 0
}

install_cni () {
    create_dir "$CNI_BIN_PATH" "$CNI_CONF_PATH"
    tar -xvf $CNI_PACK -C $CNI_BIN_PATH
    sed -i -e "/plugins.\"io.containerd.grpc.v1.cri\".cni]/,+2s@bin_dir = \".*\"@bin_dir = \"$CNI_BIN_PATH\"@" \
           -e "/plugins.\"io.containerd.grpc.v1.cri\".cni]/,+2s@conf_dir = \".*\"@conf_dir = \"$CNI_CONF_PATH\"@" \
           $CONTAINERD_CONFIG_PATH/config.toml
    chown -R 0:0 $CNI_BIN_PATH
    chmod 755 $CNI_BIN_PATH/*
    color "cni  plugin  install success." 0
}

install_crictl () {
    create_dir $CRICTL_INSTALL_BIN_PATH ${CRICTL_CONF%/*}
    tar -xvf $CRICTL_PACK -C $CRICTL_INSTALL_BIN_PATH/
    chown 0:0 $CRICTL_INSTALL_BIN_PATH/crictl
    chmod 755 $CRICTL_INSTALL_BIN_PATH/crictl
    cat > $CRICTL_CONF <<__EOF__
runtime-endpoint: $CONTAINERD_STATE_PATH/containerd.sock
image-endpoint: $CONTAINERD_STATE_PATH/containerd.sock
timeout: 10
debug: true
__EOF__
    color "$CRICTL_INSTALL_BIN_PATH/crictl install success" 0
}

install_nerdctl () {
    create_dir $NERDCTL_INSTALL_BIN_PATH
    tar -xvf $NERDCTL_PACK -C $NERDCTL_INSTALL_BIN_PATH/ nerdctl
    chown 0:0 $NERDCTL_INSTALL_BIN_PATH/nerdctl
    chmod 755 $NERDCTL_INSTALL_BIN_PATH/nerdctl

    cat > /etc/profile.d/containerd_nerdctl.sh <<__EOF__
export CNI_PATH="$CNI_BIN_PATH"
export NETCONFPATH="$CNI_CONF_PATH"
export CONTAINERD_ADDRESS="$CONTAINERD_STATE_PATH/containerd.sock"
export BUILDKIT_HOST="unix://$BUILDKIT_STATE_PATH/buildkitd.sock"
export PATH="\$PATH:$CONTAINERD_INSTALL_PATH/bin:$RUNC_INSTALL_BIN_PATH:$BUILDKIT_INSTALL_PATH/bin"
__EOF__
    echo -e "need run \E[33;1m source /etc/profile.d/containerd_nerdctl.sh\E[0m"
    color "$NERDCTL_INSTALL_BIN_PATH/nerdctl install success" 0
}

install_helm () {
    create_dir $HELM_INSTALL_BIN_PATH
    tar -xvf $HELM_PACK --strip-components=1 -C $HELM_INSTALL_BIN_PATH linux-amd64/helm
    chown 0:0 $NERDCTL_INSTALL_BIN_PATH/helm
    chmod 755 $NERDCTL_INSTALL_BIN_PATH/helm
}

install_buildkit () {
    create_dir $BUILDKIT_INSTALL_PATH $BUILDKIT_DATA_PATH $BUILDKIT_STATE_PATH
    tar -xvf $BUILDKIT_PACK -C $BUILDKIT_INSTALL_PATH/
    chown 0:0 $BUILDKIT_INSTALL_PATH/bin/*
    chmod 755 $BUILDKIT_INSTALL_PATH/bin/*
}

start_buildkit () {
    if systemctl status buildkit.socket &> /dev/null;then
        systemctl stop buildkit.socket
    fi
    if systemctl status buildkit.service &> /dev/null;then
        systemctl stop buildkit.service
    fi

    cat >  /lib/systemd/system/buildkit.socket <<__EOF__
[Unit]
Description=BuildKit
Documentation=https://github.com/moby/buildkit

[Socket]
ListenStream=$BUILDKIT_STATE_PATH/buildkitd.sock
SocketMode=0660

[Install]
WantedBy=sockets.target
__EOF__

    cat >  /lib/systemd/system/buildkit.service <<__EOF__
[Unit]
Description=BuildKit
Requires=buildkit.socket
After=buildkit.socket
Documentation=https://github.com/moby/buildkit

[Service]
Type=notify
ExecStart=$BUILDKIT_INSTALL_PATH/bin/buildkitd --addr fd:// \
          --root "$BUILDKIT_DATA_PATH" \
          --containerd-worker-addr  "$CONTAINERD_STATE_PATH/containerd.sock" \
          --containerd-cni-binary-dir "$CNI_BIN_PATH"

[Install]
WantedBy=multi-user.target
__EOF__
    systemctl daemon-reload
    systemctl enable buildkit.socket buildkit.service
    systemctl start buildkit.socket buildkit.service \
    && color "buildkit.socket,buildkit.service start sucess!" 0 \
    || { color "buildkit.socket,buildkit.service  start fail." 1; exit; }
}

start_containerd () {
    if systemctl status containerd.service &> /dev/null;then
        systemctl stop containerd.service
    fi
    cat > /lib/systemd/system/containerd.service <<EOF
# https://raw.githubusercontent.com/containerd/containerd/main/containerd.service
# Copyright The containerd Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

[Unit]
Description=containerd container runtime
Documentation=https://containerd.io
After=network.target local-fs.target

[Service]
Environment="HTTP_PROXY=$GLOBAL_HTTP_PROXY"
Environment="HTTPS_PROXY=$GLOBAL_HTTPS_PROXY"
Environment="NO_PROXY=$CONTAINERD_NO_PROXY"
Environment="PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:$CONTAINERD_INSTALL_PATH/bin:$RUNC_INSTALL_BIN_PATH:$BUILDKIT_INSTALL_PATH/bin"
ExecStartPre=-/sbin/modprobe overlay
ExecStartPre=-/sbin/modprobe br_netfilter
ExecStart=$CONTAINERD_INSTALL_PATH/bin/containerd -c $CONTAINERD_CONFIG_PATH/config.toml

Type=notify
Delegate=yes
KillMode=process
Restart=always
RestartSec=5

# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNPROC=infinity
LimitCORE=infinity

# Comment TasksMax if your systemd version does not supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
OOMScoreAdjust=-999

[Install]
WantedBy=multi-user.target
EOF
    systemctl daemon-reload
    systemctl enable containerd.service
    systemctl start containerd.service \
    && color "containerd.service start sucess!" 0 \
    || { color "containerd.service  start fail." 1; exit; }
}

set_kernel_args () {
    cat > /etc/modules-load.d/k8s-kernel-modules.conf <<__EOF__
overlay
br_netfilter
ip_vs
ip_vs_lc
ip_vs_wlc
ip_vs_rr
ip_vs_wrr
ip_vs_lblc
ip_vs_lblcr
ip_vs_dh
ip_vs_sh
ip_vs_fo
ip_vs_nq
ip_vs_sed
ip_vs_ftp
ip_vs_sh
nf_conntrack
ip_tables
ip_set
xt_set
ipt_set
ipt_rpfilter
ipt_REJECT
ipip
__EOF__
    cat > /etc/sysctl.d/99-z-k8s-kernel-args.conf <<__EOF__
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
fs.may_detach_mounts = 1
net.ipv4.conf.all.route_localnet = 1
vm.overcommit_memory=1
vm.panic_on_oom=0
fs.inotify.max_user_watches=89100
fs.file-max=52706963
fs.nr_open=52706963
net.netfilter.nf_conntrack_max=2310720

net.ipv4.tcp_keepalive_time = 600
net.ipv4.tcp_keepalive_probes = 3
net.ipv4.tcp_keepalive_intvl =15
net.ipv4.tcp_max_tw_buckets = 36000
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp_max_orphans = 327680
net.ipv4.tcp_orphan_retries = 3
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.ip_conntrack_max = 65536
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.tcp_timestamps = 0
net.core.somaxconn = 16384
__EOF__
    systemctl restart systemd-modules-load.service
    sysctl --system

    color "linux kernel args set complete." 0
}

k8s_install_containerd_runtime () {
    judge_file_exist "$CONTAINERD_PACK" "$RUNC_PACK"
    install_containerd_and_runc
    [ -e "$CNI_PACK" ] && install_cni || color "CNI_PACK is not exist or set." 2
    start_containerd

    [ -e "$CRICTL_PACK" ] && install_crictl || color "CRICTL_PACK is not exist or set." 2
    [ -e "$BUILDKIT_PACK" ] && { install_buildkit;start_buildkit; }|| color "BUILDKIT_PACK is not exist or set." 2
    [ -e "$NERDCTL_PACK" ] && install_nerdctl || color "NERDCTL_PACK is not exist or set." 2
    [ -e "$HELM_PACK" ] && install_helm || color "HELM_PACK is not exist or set." 2
}

k8s_set_hostname () {
    echo "change /etc/hosts file ..."
    tee /etc/hosts << __EOF__
127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6
${KUBE_API_IP} ${KUBE_API_DNS_DOMAIN}
__EOF__
    for((i=0;i<${#KUBE_CLUSTER_IP_ARRAY[@]};i++));do
        local __TEMP_TARGET_HOSTNAME
        __TEMP_TARGET_HOSTNAME="$(ip_to_hostname "${KUBE_CLUSTER_IP_ARRAY[i]}")"
        if [[ $__TEMP_TARGET_HOSTNAME =~ ^error_ip.*$ ]];then
            color "ip or hostname set error. error code is $__TEMP_TARGET_HOSTNAME" 1
            exit
        else
            echo "${KUBE_CLUSTER_IP_ARRAY[i]}  ${__TEMP_TARGET_HOSTNAME}" | tee -a /etc/hosts
        fi
    done

    hostnamectl set-hostname "$(ip_to_hostname "${KUBE_CURRENT_NODE_IP}")"
}

k8s_install_kubeadm () {
    # set /etc/hosts
    k8s_set_hostname
    sudo apt-get update
    # apt-transport-https 可能是一个虚拟包（dummy package）；如果是的话，你可以跳过安装这个包
    sudo apt-get install -y apt-transport-https ca-certificates curl gpg \
        sysstat iproute2 iputils-tracepath iputils-ping \
        libseccomp2 conntrack ipvsadm ipset 
    # 如果 `/etc/apt/keyrings` 目录不存在，则应在 curl 命令之前创建它，请阅读下面的注释。
    # sudo mkdir -p -m 755 /etc/apt/keyrings
    create_dir /etc/apt/keyrings
    if [ -e "/etc/apt/keyrings/kubernetes-apt-keyring.gpg" ];then
        rm -f "/etc/apt/keyrings/kubernetes-apt-keyring.gpg"
    fi
    curl -x "$GLOBAL_HTTPS_PROXY" -fsSL https://pkgs.k8s.io/core:/stable:/v${KUBE_VERSION%.*}/deb/Release.key | sudo gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg

    # 此操作会覆盖 /etc/apt/sources.list.d/kubernetes.list 中现存的所有配置。
    echo "deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v${KUBE_VERSION%.*}/deb/ /" | sudo tee /etc/apt/sources.list.d/kubernetes.list

    # install kubelet ,kubeadm,kubectl
    https_proxy=${GLOBAL_HTTPS_PROXY} apt update
    https_proxy=${GLOBAL_HTTPS_PROXY} sudo apt-get install -y kubelet kubeadm kubectl
    https_proxy=${GLOBAL_HTTPS_PROXY} sudo apt-mark hold kubelet kubeadm kubectl
    color "kubelet kubeadm kubectl install success." 0
}

k8s_check_before_reset_cluster () {
if [ -e "$HOME/.kube/config" ];then
    color "find $HOME/.kube/config" 2
    echo -e "\E[31;1m the k8s cluster may installed already,Would you want clear it ? you need decide in 15 seconds! default is clear,ctrl + c to cancel.\E[0m"
    for((i=15;i>=1;i--));do
        sleep 1
        echo "Countdown $i second"
    done
    color "begin clear k8s cluster" 2
    kubeadm reset -f ; ipvsadm --clear  ;iptables -F; rm -rf "$HOME/.kube" /etc/kubernetes/ "${KUBE_ETCD_DATA_PATH}" /var/lib/kubelet
    color "k8s cluster clear success." 0
fi
}

k8s_install_network_plugin () { # $1 is plugin type,item: flannel,calico,cilium
    case $1 in
    flannel)
    # https://github.com/flannel-io/flannel
        curl -x "$GLOBAL_HTTPS_PROXY" -fsSL https://github.com/flannel-io/flannel/releases/latest/download/kube-flannel.yml -o "$HOME/.kube/kube-flannel-${CLUSTER_NAME}.yml"
        sed -i "/hostPath/,+1{s@/opt/cni/bin@${CNI_BIN_PATH}@}"  "$HOME/.kube/kube-flannel-${CLUSTER_NAME}.yml"
        sed -i "/hostPath/,+1{s@/etc/cni/net.d@${CNI_CONF_PATH}@}"  "$HOME/.kube/kube-flannel-${CLUSTER_NAME}.yml"
        sed -i "/EnableNFTables/a\      \"SubnetLen\": ${KUBE_PODS_SUBNET_LEN}," "$HOME/.kube/kube-flannel-${CLUSTER_NAME}.yml"
        sed -i "/10.244.0.0\/16./s@10.244.0.0/16@${KUBE_PODS_NET}@" "$HOME/.kube/kube-flannel-${CLUSTER_NAME}.yml"
        sed -i "/docker.io/s@docker.io@${DOCKER_SUB_PROJECT_REGISTRY}@" "$HOME/.kube/kube-flannel-${CLUSTER_NAME}.yml"
        kubectl apply -f "$HOME/.kube/kube-flannel-${CLUSTER_NAME}.yml"
        ;;
    calico)
    # https://docs.tigera.io/calico/latest/getting-started/kubernetes/self-managed-onprem/onpremises#install-calico
        curl -x "$GLOBAL_HTTPS_PROXY" -fsSL https://raw.githubusercontent.com/projectcalico/calico/v3.28.1/manifests/calico-typha.yaml -o "$HOME/.kube/calico-typha-${CLUSTER_NAME}.yml"

        sed -i "/docker.io\/calico/s@docker.io/calico@docker.io/library/calico@" "$HOME/.kube/calico-typha-${CLUSTER_NAME}.yml"
        sed -i "/hostPath/,+1s@/opt/cni/bin@${CNI_BIN_PATH}@" "$HOME/.kube/calico-typha-${CLUSTER_NAME}.yml"    
        sed -i "/hostPath/,+1s@/etc/cni/net.d@${CNI_CONF_PATH}@" "$HOME/.kube/calico-typha-${CLUSTER_NAME}.yml"    
        sed -i "/__KUBECONFIG_FILEPATH__/s@__KUBECONFIG_FILEPATH__@${CNI_CONF_PATH}/calico-kubeconfig@" "$HOME/.kube/calico-typha-${CLUSTER_NAME}.yml" 
        kubectl create -f "$HOME/.kube/calico-typha-${CLUSTER_NAME}.yml" 
        kubectl apply -f - <<__EOF__
apiVersion: crd.projectcalico.org/v1
kind: IPPool
metadata:
  name: default-ipv4-ippool
spec:
  cidr: ${KUBE_PODS_NET}
  blockSize: ${KUBE_PODS_SUBNET_LEN}
  ipipMode: Always
  vxlanMode: Never
  natOutgoing: true
__EOF__
        ;;
    cilium)
    # https://docs.cilium.io/en/stable/gettingstarted/k8s-install-default/
        local CILIUM_CLI_VERSION
        local CLI_ARCH
        CILIUM_CLI_VERSION=$(curl -s  -x "$GLOBAL_HTTPS_PROXY" https://raw.githubusercontent.com/cilium/cilium-cli/main/stable.txt)
        CLI_ARCH=amd64;if [ "$(uname -m)" = "aarch64" ]; then CLI_ARCH=arm64; fi
        curl -x "$GLOBAL_HTTPS_PROXY" -fsSL  https://github.com/cilium/cilium-cli/releases/download/${CILIUM_CLI_VERSION}/cilium-linux-${CLI_ARCH}.tar.gz | tar -zxvf - -C  /usr/local/bin/
        /usr/local/bin/cilium install --version 1.16.1 \
            --set kubeProxyReplacement=true \
            --set ipam.mode=kubernetes \
            --set routingMode=tunnel \
            --set tunnelProtocol=vxlan \
            --set ipam.operator.clusterPoolIPv4PodCIDRList=${KUBE_PODS_NET} \
            --set ipam.Operator.ClusterPoolIPv4MaskSize=${KUBE_PODS_SUBNET_LEN} \
            --set ingressController.enabled=true \
            --set ingressController.loadbalancerMode=dedicated \
            --set ingressController.default=true     \
            --set cni.binPath=${CNI_BIN_PATH} \
            --set cni.confPath=${CNI_CONF_PATH}
        ;;
    *)
        color "network plugin not installed! unknown network plugin type." 1
        return 1
        ;;
    esac
    color "complete apply $1 CNI" 0
}

k8s_init_cluster_kubeadm () {
    k8s_check_before_reset_cluster
    create_dir "${HOME}/.kube"
    local KUBEADM_CONFIGFILE
    KUBEADM_CONFIGFILE="${HOME}/.kube/kubeadmin-config-${CLUSTER_NAME}.yaml"

    cat > "$KUBEADM_CONFIGFILE" <<__EOF__
apiVersion: kubeadm.k8s.io/v1beta3
bootstrapTokens:
- groups:
  - system:bootstrappers:kubeadm:default-node-token
# 自定义token，需要满足 "\\A([a-z0-9]{6})\\.([a-z0-9]{16})\\z"
  token: ${KUBE_TOKEN}
  ttl: 24h0m0s
  usages:
  - signing
  - authentication
kind: InitConfiguration
localAPIEndpoint:
# 这里的地址即为初始化的控制平面第一个节点的IP地址；
  advertiseAddress: ${KUBE_CURRENT_NODE_IP}
  bindPort: 6443
nodeRegistration:
  criSocket: unix://$CONTAINERD_STATE_PATH/containerd.sock
  imagePullPolicy: IfNotPresent
# 第一个控制平面节点的主机名称；
  name: $(ip_to_hostname "${KUBE_CURRENT_NODE_IP}")
  taints:
    - effect: NoSchedule
      key: node-role.kubernetes.io/master
    - effect: NoSchedule
      key: node-role.kubernetes.io/control-plane
---
apiServer:
  timeoutForControlPlane: 4m0s
# 将下面配置中的certSANS列表中的值，修改为客户端接入API Server时可能会使用的各类目标地址；
  certSANs:
    - ${KUBE_API_DNS_DOMAIN}
apiVersion: kubeadm.k8s.io/v1beta3
# 控制平面的接入端点，我们这里选择适配到${KUBE_API_DNS_DOMAIN}这一域名上
controlPlaneEndpoint: "${KUBE_API_DNS_DOMAIN}:${KUBE_API_DNS_DOMAIN_PORT}"
certificatesDir: /etc/kubernetes/pki
clusterName: ${CLUSTER_NAME}
controllerManager: {}
dns: {}
etcd:
  local:
    dataDir: ${KUBE_ETCD_DATA_PATH}
imageRepository: ${KUBE_INIT_REPOSITORY}
kind: ClusterConfiguration
# 更改此处版本号与实际的版本一致
kubernetesVersion: ${KUBE_VERSION}
networking:
#  集群要使用的域名，默认为cluster.local
  dnsDomain: ${CLUSTER_DNS_DOMAIN}
# service网络地址
  serviceSubnet: ${KUBE_SERVICE_NET}
# pod网络的地址，flannel网络插件默认使用10.244.0.0/16
  podSubnet: ${KUBE_PODS_NET}
scheduler: {}
__EOF__
    kubeadm config migrate --old-config "${KUBEADM_CONFIGFILE}" --new-config "${KUBEADM_CONFIGFILE}"

    if [ "${KUBE_NET_PLUGIN_TYPE}" = "cilium" ];then
        # when network plugin is cilium,then skip kube-proxy
        sed -i '/InitConfiguration/a skipPhases:\n  - addon/kube-proxy'  "${KUBEADM_CONFIGFILE}"
    else
        # when network plugin is not cilium,then install kube-proxy,and turn mode to ipvs
        cat >> "$KUBEADM_CONFIGFILE" <<__EOF__
---
# 用于配置kube-proxy上为Service指定的代理模式，默认为iptables；
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: "ipvs"
__EOF__
    fi

    echo "try to pull needed images..."
    if kubeadm config images list --config "${KUBEADM_CONFIGFILE}";then
        kubeadm config images pull --config "${KUBEADM_CONFIGFILE}"
    else
        color "get images fail." 1
        exit
    fi

    color "images pull success,begining init cluster first master node" 0
    __TEMP_KUBE_JOIN_COMMAND_PRE="$(kubeadm init --config "${KUBEADM_CONFIGFILE}" --upload-certs | tee /dev/tty | grep -m 2 -A 2 "kubeadm join ${KUBE_API_DNS_DOMAIN}:${KUBE_API_DNS_DOMAIN_PORT}")"
    __MASTER_NODE_JOIN_COMMAND="$(echo "$__TEMP_KUBE_JOIN_COMMAND_PRE" | head -n 3)"
    __WORKER_NODE_JOIN_COMMAND="$(echo "$__TEMP_KUBE_JOIN_COMMAND_PRE" | tail -n 2)"
    \cp /etc/kubernetes/admin.conf "$HOME/.kube/config"
    color "kubeadm init install success,then install ${KUBE_NET_PLUGIN_TYPE} CNI plugin..." 0
    k8s_install_network_plugin ${KUBE_NET_PLUGIN_TYPE}
    echo -e "\E[32;1m ######*****************k8s cluster ${CLUSTER_NAME} init complete**********************###### \E[0m"
    echo "for get network information:          kubectl get pods -n kube-flannel
                                                kubectl get pods -n kube-system
                                                cilium status"
    echo "for get nodes status:                 kubectl get nodes"
    echo
    echo "for other master nodes join cluster:  ${__MASTER_NODE_JOIN_COMMAND}"
    echo
    echo "for other worker nodes join cluster:  ${__WORKER_NODE_JOIN_COMMAND}"
}

main () {
    judge_current_user
    package_download
    set_kernel_args
    k8s_install_containerd_runtime
    judge_true_false $KUBEADM_INSTALL_FLAG k8s_install_kubeadm
    judge_true_false $KUBE_INIT_FLAG  k8s_init_cluster_kubeadm
}

main
