#!/bin/bash
# ********************************************************************
# 试验环境：CentOS 7.x
# Date: 2025-05-06
# Description：部署 Kubernetes v1.22.0 高可用集群(3个Master 节点)
# author Gavin
# email: lushuan2071@126.com
# command: sh init_k8s_install_cluster_ha.sh|tee  init_k8s_install_cluster_ha.log
# ********************************************************************

# 检查是否为root用户
if [ $(id -u) -ne 0 ]; then
    echo "错误：必须使用root权限执行此脚本！"
    exit 1
fi

# master 节点，也可以设置为vip,这里设置为k8s-master1 节点
K8S_MASTER1_IP="192.168.143.201"
# haproxy 监听端口，backend 为k8s master 集群，这里haproxy 高可用和k8s 集群高可用部署在了同三台主机，避免api server的6443 端口冲突
CONTROL_PLANE_ENDPOINT="192.168.143.220:6444"
K8S_MASTER_NAME="k8s-master1"
# ingress nginx controller 私有云部署，制定外部流量可访问的外部ip,使用节点的物理 IP 或 VIP,不想依赖云厂商的 LoadBalancer（避免成本或复杂性）
INGRESS_EXTERNAL_IPS="['192.168.143.220']"

KUBE_VERSION="1.22.0"
#KUBE_VERSION="1.24.3"
#KUBE_VERSION="1.25.3"
#KUBE_VERSION="1.26.2"
#KUBE_VERSION="1.27.3"
#KUBE_VERSION="1.29.3"
#KUBE_VERSION="1.30.0"

KUBE_RELEASE=${KUBE_VERSION}-0

IMAGE_URL="registry.cn-hangzhou.aliyuncs.com/google_containers"

# 集群信息
LOCAL_IP=`hostname -I|awk '{print $1}'`
K8S_MASTER2_IP="192.168.143.202"
K8S_MASTER3_IP="192.168.143.203"

# 网络配置，默认即可
POD_NETWORK="10.244.0.0/16"

# 容器运行时
DOCKER_VERSION=20.10.9

# Calico 网络插件
CALICO_VERSION=v3.22
#CALICO_VERSION=v3.24.1
#CALICO_VERSION=v3.26.1
#CALICO_VERSION=v3.27.3

# Flannel 网络插件
FLANNEL_VERSION=v0.20.1

# 部署ingress-nginx 外部访问
INGRESS_NGINX_VERSION=v1.1.3

# 手动处理
function install_prepare() {
  # 分别在每个节点上执行
  hostnamectl set-hostname k8s-master1
  hostnamectl set-hostname k8s-master2
  hostnamectl set-hostname k8s-master3
}

# 检查操作系统主版本是否为7
function check() {
  # 检查是否为 CentOS 系统
  if [ ! -f /etc/centos-release ]; then
      echo "错误：此脚本仅支持 CentOS 系统。"
      exit 1
  fi

  # 提取 CentOS 主版本号
  CENTOS_VERSION=$(grep -oP '(?<=release )\d+' /etc/centos-release)

  # 检查主版本是否为 7
  if [ "$CENTOS_VERSION" != "7" ]; then
      echo "错误：此脚本仅支持 CentOS 7，当前版本是 CentOS $CENTOS_VERSION。"
      exit 1
  fi

  echo "检测通过：当前系统是 CentOS 7。"
  # 在这里继续您的脚本逻辑...
}

# 下载常用组件,及其它初始化操作
function init() {
  echo -e "\n >>>>   下载常用组件,及其它初始化操作..."
  mkdir  /root/kubernetes_install  && cd /root/kubernetes_install
  mkdir  -p /data/docker /data/etcd
  #yum clean all && yum makecache  && yum repolist
  yum install -y vim net-tools  bash-completion wget lrzsz yum-utils

# 主机名解析,所有节点
cat >> /etc/hosts <<EOF
$K8S_MASTER1_IP k8s-master1
$K8S_MASTER2_IP k8s-master2
$K8S_MASTER3_IP k8s-master3
EOF

}

# 关闭SELinux（需要重启生效）
function close_SELinux() {
  echo -e "\n >>>>   关闭SELinux..."
  sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config
  setenforce 0 2>/dev/null
}

# close firewalld
function close_firewalld() {
  echo -e "\n >>>>   关闭防火墙..."
  systemctl stop firewalld
  systemctl disable firewalld
}

# 关闭swap 交换分区
function close_swap() {
    echo -e "\n >>>>   关闭swap 交换分区..."
    # 临时关闭
    swapoff -a
    # 永久关闭
    sed  -i '/swap/ s/^\(.*\)$/#\1/g' /etc/fstab
    # 验证是否禁用成功
    swapon -s # 没有输出表示禁用成功
}


# 内核优化
function kernel_optimizations() {
    echo -e "\n >>>>   内核优化...."
  modprobe overlay
  modprobe br_netfilter
cat > /etc/sysctl.d/kubernetes.conf <<EOF
# 允许桥接流量通过 iptables（K8s CNI 需要）
net.bridge.bridge-nf-call-iptables=1
net.bridge.bridge-nf-call-ip6tables=1
# 允许 IP 转发（K8s 网络插件如 Calico/Flannel 需要）
net.ipv4.ip_forward=1
# 调整虚拟内存管理（减少 swap 使用，K8s 推荐禁用 swap）
vm.swappiness=0
vm.overcommit_memory=1
vm.panic_on_oom=0
# 优化文件描述符限制
fs.file-max = 2097152
fs.inotify.max_user_instances = 8192
fs.inotify.max_user_watches = 89100
EOF

sysctl -p /etc/sysctl.d/kubernetes.conf

cat > /etc/sysctl.d/99-optimize.conf << 'EOF'
# 内存相关
net.core.rmem_max = 16777216
net.core.wmem_max = 16777216
net.ipv4.tcp_rmem = 4096 87380 16777216
net.ipv4.tcp_wmem = 4096 65536 16777216

# 文件描述符
fs.file-max = 131072

# TCP优化
net.ipv4.tcp_timestamps = 1
net.ipv4.tcp_window_scaling = 1
net.ipv4.tcp_sack = 1
net.ipv4.tcp_no_metrics_save = 1
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp_fin_timeout = 15
net.ipv4.tcp_max_syn_backlog = 4096
net.ipv4.tcp_max_tw_buckets = 2000000

# 网络队列
net.core.netdev_max_backlog = 3072

# 端口范围
net.ipv4.ip_local_port_range = 1024 65535

# 重试次数
net.ipv4.tcp_retries1 = 2
net.ipv4.tcp_retries2 = 5
net.ipv4.tcp_synack_retries = 2
net.ipv4.tcp_syn_retries = 2

# 安全设置
net.ipv4.tcp_syncookies = 1
kernel.panic = 10

#防止防火墙iptables报错table full, dropping packet，提升
net.netfilter.nf_conntrack_max = 6553500
#net.netfilter.nf_conntrack_buckets = 100000
net.netfilter.nf_conntrack_tcp_timeout_established = 600

fs.file-max = 4194304
fs.nr_open = 4194304

#LVS arp real server 设置
net.ipv4.ip_forward = 1
#net.ipv4.conf.lo.arp_ignore = 1
#net.ipv4.conf.lo.arp_announce = 2
#net.ipv4.conf.all.arp_ignore = 1
#net.ipv4.conf.all.arp_announce = 2

#修改swap内存使用条件为0，尽可能减少系统对swap使用
vm.swappiness = 0

# 避免 TIME_WAIT 过多导致端口耗尽
#net.ipv4.tcp_tw_recycle = 0  # 在 NAT 环境下可能导致问题，建议关闭
net.ipv4.tcp_max_orphans = 16384

# 优化 ARP 缓存
net.ipv4.neigh.default.gc_thresh1 = 1024
net.ipv4.neigh.default.gc_thresh2 = 2048
net.ipv4.neigh.default.gc_thresh3 = 4096

EOF

# 应用配置
sysctl -p /etc/sysctl.d/99-optimize.conf

# 加载vxlan 内核模块
# 手动加载（如果未加载）
sudo modprobe vxlan
# 永久生效（写入 /etc/modules-load.d/）
echo "vxlan" | sudo tee /etc/modules-load.d/vxlan.conf


}

# 所有 master 节点和 node 节点安装配置IPVS
function ipvs_set() {
  echo -e "\n >>>>   所有 master 节点和 node 节点安装配置IPVS..."
  yum install  -y ipvsadm ipset sysstat conntrack libseccomp
# 修改配置文件
cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
ipvs_modules="ip_vs ip_vs_lc ip_vs_wlc ip_vs_rr ip_vs_wrr ip_vs_lblc ip_vs_lblcr ip_vs_dh ip_vs_sh ip_vs_fo ip_vs_nq ip_vs_sed ip_vs_ftp nf_conntrack ip_tables ip_set xt_set ipt_set ipt_rpfilter ipt_REJECT ipip "
for kernel_module in \${ipvs_modules}; do
  /sbin/modinfo -F filename \${kernel_module} > /dev/null 2>&1
  if [ $? -eq 0 ]; then
    /sbin/modprobe \${kernel_module}
  fi
done
EOF

  # 赋权
  chmod 755 /etc/sysconfig/modules/ipvs.modules
  # 执行
  sh /etc/sysconfig/modules/ipvs.modules
  # 验证ip vs模块是否加载
  lsmod | grep ip_vs
}

# 安装高可用代理(all master 节点执行，建议手动执行)
function install_haproxy_keepalived() {
  echo -e "\n >>>>   安装高可用代理 haproxy && keepalived..."
  sudo yum install -y haproxy
  cp /etc/haproxy/haproxy.cfg /etc/haproxy/haproxy.cfg.bak

cat > /etc/haproxy/haproxy.cfg << EOF
global
  log /dev/log local0
  maxconn 5000
  user haproxy
  group haproxy
  daemon

defaults
  log global
  mode tcp
  timeout connect 5s
  timeout client 30s
  timeout server 30s

frontend k8s-apiserver
  bind *:6443
  bind 192.168.143.300:6443  # VIP 绑定到 6443 端口
  mode tcp
  default_backend k8s-masters

backend k8s-masters
  mode tcp
  balance roundrobin  # 轮询负载均衡
  option tcp-check
  server k8s-master1 192.168.143.201:6443 check
  server k8s-master2 192.168.143.202:6443 check
  server k8s-master3 192.168.143.203:6443 check
EOF
}

#Kubernetes-v1.24之前版本无需安装cri-dockerd (可选)
function install_cri_dockerd() {
  echo -e "\n >>>> Kubernetes-v1.24之前版本无需安装cri-dockerd (可选)..."
  # 下载 cri-dockerd v0.2.6
  wget https://github.com/Mirantis/cri-dockerd/releases/download/v0.2.6/cri-dockerd-0.2.6-3.el7.x86_64.rpm

  # 安装 RPM 包
  sudo rpm -ivh cri-dockerd-0.2.6-3.el7.x86_64.rpm

  sudo systemctl enable cri-docker
  sudo systemctl start cri-docker
  # 输出应显示 `active (running)`。
  systemctl status cri-docker

  #sudo kubeadm init \
  #--pod-network-cidr=10.244.0.0/16 \
  #--cri-socket=unix:///var/run/cri-dockerd.sock
}

# 所有 master和node 节点安装和配置docker
function install_docker() {
  echo -e "\n >>>> 所有 master 节点和 node 节点安装配置Docker..."
  # 设置Docker仓库，镜像源更全
  yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
  yum repolist
  # 安装
  sudo yum install docker-ce-$DOCKER_VERSION docker-ce-cli-$DOCKER_VERSION containerd.io -y

  mkdir -p /data/docker

# 启动并设置开机自启
systemctl enable docker && systemctl start docker && systemctl status docker

cat > /etc/docker/daemon.json << END
{
  "exec-opts": [
    "native.cgroupdriver=systemd"
  ],
  "bip": "223.223.0.1/16",
  "registry-mirrors": [
    "https://fz5yth0r.mirror.aliyuncs.com",
    "https://docker.registry.cyou",
    "https://docker-cf.registry.cyou",
    "https://dockercf.jsdelivr.fyi",
    "https://docker.jsdelivr.fyi",
    "https://dockertest.jsdelivr.fyi",
    "https://mirror.aliyuncs.com",
    "https://dockerproxy.com",
    "https://mirror.baidubce.com",
    "https://docker.m.daocloud.io",
    "https://docker.nju.edu.cn",
    "https://docker.mirrors.sjtug.sjtu.edu.cn",
    "https://docker.mirrors.ustc.edu.cn",
    "https://mirror.iscas.ac.cn",
    "https://dockerhub.icu",
    "https://docker.rainbond.cc"
  ],
  "data-root": "/data/docker",
  "default-address-pools": [
    {
      "base": "221.80.0.0/16",
      "size": 24
    }
  ],
  "storage-driver": "overlay2",
  "log-driver": "json-file",
  "log-opts": {
    "max-size": "100m",
    "max-file": "3"
  },
  "insecure-registries": [
    "192.168.143.102:8000"
  ]
}
END

systemctl restart docker && systemctl status docker

docker -v
docker info|grep Cgroup
}

# 所有master 和 node 节点安装kubeadm等相关包
function install_kubeadm() {
  echo -e "\n >>>> 1. Master 节点：必须安装 kubeadm、kubelet、kubectl。 2. Worker 节点：必须安装 kubelet，建议安装 kubeadm（方便维护），kubectl 可选..."
# 配置yum源
cat  > /etc/yum.repos.d/kubernetes.repo <<EOF
[kubernetes]
name=Kubernetes
baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
       http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF

  yum update

  sleep 6
  # 安装工具
  # 找到要安装的版本号
  yum list kubeadm --showduplicates | sort -r

  # 安装指定版本（这里用的是1.22.0）
  yum install -y kubeadm-$KUBE_RELEASE kubelet-$KUBE_RELEASE kubectl-$KUBE_RELEASE --disableexcludes=kubernetes

  # node 节点安装
  #yum install -y kubeadm-1.22.0 kubelet-1.22.0

  # 设置kubelet的cgroupdriver（kubelet的cgroupdriver默认为systemd，如果上面没有设置docker的exec-opts为systemd，这里就需要将kubelet的设置为cgroupfs）
  #$ sed -i "s/cgroup-driver=systemd/cgroup-driver=cgroupfs/g" /etc/systemd/system/kubelet.service.d/10-kubeadm.conf

  # 启动kubelet 所有的节点，这里kubelet 无法启动，因为kubeadm init 未初始化
  systemctl enable kubelet && systemctl start kubelet && systemctl status kubelet

  # kubectl 自动补全功能
  # 立即生效
  source /etc/profile.d/bash_completion.sh
  # 将补全脚本写入全局配置
  kubectl completion bash | sudo tee /etc/bash_completion.d/kubectl > /dev/null
  # 重新加载 Bash
  source ~/.bashrc

  # 验证安装
  kubeadm version
  kubelet --version
  kubectl version --short  # Master 或需要调试的节点

  # 创建或编辑 kubelet 的配置文件
cat <<EOF | sudo tee /etc/sysconfig/kubelet
KUBELET_EXTRA_ARGS="--container-runtime=docker --runtime-request-timeout=15m"
EOF

  # 重启 kubelet
  sudo systemctl daemon-reload
  sudo systemctl restart kubelet
  sudo systemctl status kubelet
}

# 准备安装kubernetes 等相关镜像,master 节点执行
function pre_pull_k8s_images() {
  echo -e "\n >>>> 准备安装kubernetes 等相关镜像,master 节点执行..."
  # 查看kubeadm 所需要的镜像列表
  kubeadm config images list --kubernetes-version=$KUBE_VERSION
  # 提前下载镜像,使用中国区镜像加速
  #kubeadm config images pull --kubernetes-version=$KUBE_VERSION --image-repository registry.aliyuncs.com/google_containers
  # or
  kubeadm config images pull --kubernetes-version=$KUBE_VERSION --image-repository $IMAGE_URL
  # 查看下载的镜像，镜像保存在k8s.io namesapce 下
  ctr -n=k8s.io i ls
  docker images
}

# 只在master 第一个节点上执行,建议手动安装
function kubeadm_init() {
  echo -e "\n >>>> 只在master 第一个节点上执行,建议手动安装..."
  pre_pull_k8s_images
  # 准备kubeadm 初始化配置文件
  kubeadm config print init-defaults > kube-config.yaml
  # 修改配置文件
  sed -i "s/advertiseAddress: 1.2.3.4/advertiseAddress: $LOCAL_IP/g" kube-config.yaml
  sed  "/kind: ClusterConfiguration/a \controlPlaneEndpoint: $CONTROL_PLANE_ENDPOINT" kube-config.yaml
  sed -i "s/name: node/name: $K8S_MASTER_NAME/g" kube-config.yaml
  sed -ri "s#imageRepository: k8s.gcr.io#imageRepository: $IMAGE_URL#g" kube-config.yaml
  sed -ri "s#imageRepository: registry.k8s.io#imageRepository: $IMAGE_URL#g" kube-config.yaml

  # 追加制定pod ip网段
  sed -i  "/serviceSubnet/a\  podSubnet: $POD_NETWORK" kube-config.yaml

  # kubernetesVersion 默认是v1.22.0 版本，这里可以修改为自定义的版本
  #sed -i  "s/kubernetesVersion: 1.22.0/kubernetesVersion: 1.22.1/g" kube-config.yaml

# 添加ip vs 和 kubelet 配置
cat << EOF >> kube-config.yaml
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: ipvs
---
kind: KubeletConfiguration
apiVersion: kubelet.config.k8s.io/v1beta1
cgroupDriver: systemd
EOF

  cat kube-config.yaml
  # 开始部署
  # 使用部署配置文件初始化k8s 集群, 注：upload-certs 标志用来将在所有控制平面实例之间的共享证书上传到集群，若是不加会报错.
  # --control-plane-endpoint="<LB_IP_or_DNS>:6443"
  kubeadm init --control-plane-endpoint="$CONTROL_PLANE_ENDPOINT" --config  kube-config.yaml --upload-certs|tee  kubeadm_init.log

  # 安装成功后执行
  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config
  export KUBECONFIG=/etc/kubernetes/admin.conf
  # 节点应为NotReady 状态，因为没有安装网络插件 Calico/Flannel
  kubectl get node
}

# 安装 cni flannel 网络插件，cni flannel 和 calico 二选一
function flannel_install() {
  echo -e "\n >>>>  安装 cni flannel 网络插件..."
  cd /root/kubernetes_install
  # 下载 Flannel 指定版本 的 Kubernetes 部署清单
  wget https://raw.githubusercontent.com/flannel-io/flannel/$FLANNEL_VERSION/Documentation/kube-flannel.yml
  cp kube-flannel.yml kube-flannel.yml.bak
  # 编辑 kube-flannel.yml，根据实际网络环境修改 net-conf.json 部分
  sed -i 's|"Type": "vxlan"|"Type": "vxlan",|g' kube-flannel.yml
  # 启用智能路由（同子网走 host-gw,若节点在同一子网，自动切换为高性能 host-gw 模式
  sed -i '/Type/a \        "DirectRouting": true' kube-flannel.yml

  # 镜像
  grep image: kube-flannel.yml|grep -v apply|uniq
  #image: docker.io/rancher/mirrored-flannelcni-flannel-cni-plugin:v1.1.0
  #image: docker.io/rancher/mirrored-flannelcni-flannel:v0.20.1

  # 应用配置（需在 kubectl 可用的 Master 节点执行）
  kubectl apply -f kube-flannel.yml
  sleep 5
  # 检查部署状态
  kubectl get pods -n kube-system -l app=flannel
  # 查看路由
  ip route | grep flannel

}


# 安装calico v3.22 网络插件,在第一个master 节点配置
function calico_install() {
  echo -e "\n >>>>  安装calico v3.22 网络插件,在第一个master 节点配置"
  cd /root/kubernetes_install
  curl https://projectcalico.docs.tigera.io/archive/$CALICO_VERSION/manifests/calico.yaml -O
  sed  -i '/value: "Always"/a \            - name: IP_AUTODETECTION_METHOD\n              value: "interface=eth0,ens*,bond0"' calico.yaml
  kubectl -n kubs-system get pod
  # calico 镜像预下载
  cat calico.yaml |grep image:|awk '{print "docker pull " $2}'|uniq|sh -

  kubectl apply -f calico.yaml
  sleep 5
  kubectl get nodes
  kubectl get pods -n kube-system
  # 检查 Calico 配置
  kubectl get ippool -o yaml
  # 重启 Calico DaemonSet
  # kubectl rollout restart daemonset calico-node -n kube-system

  # 检查calico 所在主机是否加载所需的内核模块
  lsmod|egrep "ip_set|xt_set"
  # 检查节点 IP 在 calico-node 启动参数中是否正确
  kubectl describe ds calico-node -n kube-system | grep -A 5 'IP:'
  # 查看当前calico 组件版本
  kubectl get pods -n kube-system -l k8s-app=calico-node -o jsonpath='{.items[0].spec.containers[0].image}'
}

# 安装 calicoctl(可选)，方便直接查看calico 路由信息
function calicoctl_install() {
  # 安装 calicoctl，注意要和 calico 集群版本匹配
  echo -e "\n >>>>  安装 calicoctl(可选)，方便对 calico 进行配置管理 ..."
  wget https://github.com/projectcalico/calico/releases/download/v3.22.5/calicoctl-linux-amd64
  mv calicoctl-linux-amd64 /usr/local/bin/calicoctl
  chmod +x /usr/local/bin/calicoctl

  mkdir  ~/.calico

cat > ~/.calico/calicoctl.cfg << EOF
apiVersion: projectcalico.org/v3
kind: CalicoAPIConfig
metadata:
spec:
  datastoreType: "kubernetes"
  kubeconfig: "~/.kube/config"  # 替换为实际路径
EOF

  # 验证
  # 查看
  calicoctl version
  # 查看calico节点
  calicoctl get node
  # 查看calico 路由信息
  calicoctl get ippool -o yaml
  # or
  kubectl get ippools default-ipv4-ippool -o yaml

}

# 手动拉取镜像，并推送到自己的镜像仓库下(可选)
function pre_pull_ingress_images() {
  echo -e "\n >>>>   手动拉取镜像，并推送到自己的镜像仓库下(可选)..."
  # 拉取镜像
  docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/ingress-nginx-controller:v1.8.0
  docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-webhook-certgen:v20230407

  # 重新打标签
  docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/ingress-nginx-controller:v1.8.0 your-registry.com/ingress-nginx/controller:v1.8.0
  docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-webhook-certgen:v20230407 your-registry.com/ingress-nginx/kube-webhook-certgen:v20230407

  # 推送至私有仓库
  docker push your-registry.com/ingress-nginx/controller:v1.8.0
  docker push your-registry.com/ingress-nginx/kube-webhook-certgen:v20230407

  # 修改部署文件指向私有仓库
  sed -i 's|registry.k8s.io/ingress-nginx|your-registry.com/ingress-nginx|g' ingress-nginx.yaml
}

# 部署ingress-Nginx 控制器，使用国内镜像代理
function ingress_nginx_install() {
  echo -e "\n >>>>   部署ingress-Nginx 控制器，使用国内镜像代理..."
  cd /root/kubernetes_install
  wget https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-$INGRESS_NGINX_VERSION/deploy/static/provider/cloud/deploy.yaml
  # deploy.yaml 默认配置更适用于公有云，ingress-nginx.yaml 更适用于私有云，会做一些私有化定制
  cp deploy.yaml ingress-nginx.yaml
  # 查看镜像
  grep image: deploy.yaml|uniq

  # 手动拉取镜像，并推送到自己的镜像仓库下(可选)
  #pre_pull_ingress_images

   #  方案一（阿里云）
   # 替换控制器镜像(阿里云)
  sed -i "s|k8s.gcr.io/ingress-nginx/controller:$INGRESS_NGINX_VERSION@sha256:31f47c1e202b39fadecf822a9b76370bd4baed199a005b3e7d4d1455f4fd3fe2|registry.cn-hangzhou.aliyuncs.com/chenby/controller:$INGRESS_NGINX_VERSION|g" ingress-nginx.yaml

  # 替换 Webhook 证书生成器镜像（两处相同镜像）(阿里云)
  sed -i "s|k8s.gcr.io/ingress-nginx/kube-webhook-certgen:v1.1.1@sha256:64d8c73dca984af206adf9d6d7e46aa550362b1d7a01f3a0a91b20cc67868660|registry.cn-hangzhou.aliyuncs.com/chenby/kube-webhook-certgen:v1.1.1|g" ingress-nginx.yaml

  grep image: ingress-nginx.yaml|uniq

    # 备用方案（中科大镜像）
  #sed -i 's|registry.k8s.io/ingress-nginx/controller|quay.mirrors.ustc.edu.cn/ingress-nginx/controller|g' ingress-nginx.yaml
  #sed -i 's|registry.k8s.io/ingress-nginx/kube-webhook-certgen|quay.mirrors.ustc.edu.cn/ingress-nginx/kube-webhook-certgen|g' ingress-nginx.yaml

  # 私有云部署调整(公有云部署则不需要调整)
  # Local 流量仅路由到当前Nginx Controller节点上的 Pod; Cluster: 流量路由到集群所有节点的 Pod
  sed -i 's|externalTrafficPolicy: Local|externalTrafficPolicy: Cluster|g' ingress-nginx.yaml
  # 手动指定外部可达的 IP $INGRESS_EXTERNAL_IPS，将 192.168.143.221 绑定到 Service，使该 IP 能够接收来自集群外部的流量，并转发到 Ingress-NGINX 的 Pod
  # 绕过LoadBalancer： 在不使用云厂商的 LoadBalancer 或 MetalLB 等工具时，直接通过节点 IP 暴露服务
  sed -i "/externalTrafficPolicy/a\  externalIPs: $INGRESS_EXTERNAL_IPS" ingress-nginx.yaml
  # 查看
  egrep 'image:|externalTrafficPolicy|externalTrafficPolicy|externalIPs' ingress-nginx.yaml

  # 部署
  kubectl apply -f ingress-nginx.yaml
  sleep 5
  # 验证状态
  kubectl get pods -n ingress-nginx -w
  # 验证镜像是否替换成功
  kubectl get pods -n ingress-nginx -o jsonpath='{.items[*].spec.containers[*].image}' | tr ' ' '\n'

}

# 添加其它master 节点至集群，需要手动操作
function master_join() {
  pre_pull_k8s_images
  # 生成新的 join 命令（含证书密钥），在第一个master 节点执行
  kubeadm token create --print-join-command
  # 生成certificate-key，在第一个master 节点执行
  kubeadm init  phase upload-certs --upload-certs
  # 示例
 kubeadm join 192.168.143.220:6444 --token 6hed03.twyzg4nm2qfvo0jq --discovery-token-ca-cert-hash sha256:2c70bdc2dfe8ae0c75730474ac6207407b91690db49ccc55a633dab97667a26f  --control-plane --certificate-key c09bac786d4f1f8bb1289b4308e6fd8a1985e36c3ce2c9b4860973f7ca147472

}

# 添加其它 node 节点至集群，需要手动操作
function node_join() {
  # 示例
  kubeadm join 192.168.143.220:6443 --token abcdef.0123456789abcdef \
	--discovery-token-ca-cert-hash sha256:dddb841d885dd66e295e87f263b0637b0b3e4f34260eed3ec828e6bdc6d57379
}

# 清理旧集群(可选操作)
function kubernetes_reset() {
    # 1. 强制重置 1) 删除 /etc/kubernetes/ 下的配置文件 2) 清理 kubelet 和 iptables 规则 3) 不会删除容器运行时（如 Docker/containerd）的容器和镜像
    sudo kubeadm reset --force
    # 2. 清理残留文件（可选）
    # 删除残留配置(可选)
    sudo rm -rf /etc/kubernetes/ /var/lib/kubelet/ /var/lib/etcd/ $HOME/.kube/config /etc/cni/net.d/
    # 清理 CNI 网络插件（如 Calico、Flannel）
    sudo rm -rf /etc/cni/net.d/
    # 3. 重启 kubelet
    sudo systemctl restart kubelet
    sudo systemctl status kubelet  # 检查状态
    # 4. 重新初始化 Master 节点
    kubeadm init --control-plane-endpoint="$CONTROL_PLANE_ENDPOINT" --config  kube-config.yaml --upload-certs|tee  kubeadm_init.log
}

function test_deploy() {
  kubectl describe node k8s-master2 | grep Taints

  # 允许master 节点可调度，这里只为做测试
  kubectl taint nodes k8s-master2 node-role.kubernetes.io/master:NoSchedule-
  kubectl taint nodes k8s-master3 node-role.kubernetes.io/master:NoSchedule-

  # 创建ingress 访问证书tls认证
  # 生成自签名证书
  openssl req -x509 -sha256 -nodes -days 365 -newkey rsa:2048 -keyout mynginx-tls.key -out mynginx-tls.crt -subj "/C=CN/ST=BJ/L=BJ/O=nginx/CN=www.lushuan.org"
  openssl req -x509 -sha256 -nodes -days 365 -newkey rsa:2048 -keyout mynginx2-tls.key -out mynginx2-tls.crt -subj "/C=CN/ST=BJ/L=BJ/O=nginx/CN=www.lushuan.com"

  # 创建密钥
  kubectl create secret tls mynginx-tls-secret --key mynginx-tls.key --cert mynginx-tls.crt
  kubectl create secret tls mynginx2-tls-secret --key mynginx2-tls.key --cert mynginx2-tls.crt
  # 查看
  kubectl get secret

# master 三个节点执行
cat >> /etc/hosts <<EOF
192.168.143.220 www.lushuan.org www.lushuan.com
EOF

# 查看证书到期时间
echo | openssl s_client -showcerts -connect www.lushuan.org:443 -servername api 2>/dev/null | openssl x509 -noout -enddate
echo | openssl s_client -showcerts -connect www.lushuan.com:443 -servername api 2>/dev/null | openssl x509 -noout -enddate


cat > nginx-deploy.yaml << EOF
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx
spec:
  selector:
    matchLabels:
      app: nginx
  replicas: 3
  template:
    metadata:
      labels:
        app: nginx
    spec:
      containers:
      - name: nginx
        image: nginx:1.25.4  # nginx:latest
        imagePullPolicy: IfNotPresent
        ports:
        - containerPort: 80
        resources:   # 必须配置资源限制
          limits:    # 限制最高使用cpu
            cpu: 300m
          requests:  # 限制请求cpu
            cpu: 100m
---
apiVersion: v1
kind: Service
metadata:
  name: nginx-service
spec:
  selector:
    app: nginx
  ports:
    - protocol: TCP
      port: 80
      targetPort: 80
  type: NodePort
  #type: ClusterIP
  #type: LoadBalancer
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
  name: nginx-ingress
  annotations:
    # 使用 Nginx Ingress 控制器
    kubernetes.io/ingress.class: "nginx"
    # 启用 HTTPS 重定向（HTTP -> HTTPS）
    nginx.ingress.kubernetes.io/force-ssl-redirect: "true"
    # 自定义 Nginx 配置（示例：增加客户端 body 大小限制）
    nginx.ingress.kubernetes.io/proxy-body-size: "20m"
    nginx.ingress.kubernetes.io/use-regex: "true" ##指定后面rules定义的path可以使用正则表达式
    nginx.ingress.kubernetes.io/proxy-connect-timeout: "600" ##连接超时时间,默认为5s
    nginx.ingress.kubernetes.io/proxy-send-timeout: "600" ##后端服务器回转数据超时时间,默认为60s
    nginx.ingress.kubernetes.io/proxy-read-timeout: "600" ##后端服务器响应超时时间,默认为60s
    nginx.ingress.kubernetes.io/rewrite-target: /   ##URL重写
    nginx.ingress.kubernetes.io/app-root: /index.html
spec:
  tls:
  - hosts:
    - www.lushuan.org
    secretName: mynginx-tls-secret  # 证书 Secret（由 Cert-Manager 自动创建）
    #namespace: ingress-nginx  # 指定 Secret 所在的命名空间
  - hosts:
    - www.lushuan.com
    secretName: mynginx2-tls-secret  # 证书 Secret（由 Cert-Manager 自动创建）
    #namespace: ingress-nginx  # 指定 Secret 所在的命名空间
  rules:
    - host: www.lushuan.org
      http:
        paths:
        - path: /
          pathType: Prefix
          backend:
            service:
              name: nginx-service
              port:
                number: 80
        - path: /api
          pathType: Prefix
          backend:
            service:
              name: nginx2-service
              port:
                number: 80
    - host: www.lushuan.com  # 多域名访问
      http:
        paths:
        - path: /
          pathType: Prefix
          backend:
            service:
              name: nginx-service
              port:
                number: 80
        - path: /api
          pathType: Prefix
          backend:
            service:
              name: nginx2-service
              port:
                number: 80
EOF

  kubectl apply -f nginx-deploy.yaml
  kubectl get pod,svc
  # 测试
  curl 192.168.143.201:port
  # windows 通过管理员配置 C:\Windows\System32\Drivers\etc
  # 备注：ingress 实现的蓝绿发布、灰度发布又怎么搞呢？
}


# 封装一个计时器函数
run_with_timer() {
    local function_name=$1
    local start_time=$(date +%s.%N)

    echo "Starting function: $function_name at $(date -d @$start_time)"

    # 执行传入的函数
    "$@"

    local end_time=$(date +%s.%N)
    local elapsed_time=$(echo "$end_time - $start_time" | bc)

    echo "Function $function_name finished at $(date -d @$end_time)"
    echo "Execution time: ${elapsed_time} seconds"
}

function main() {
  echo -e ">>>> 部署 Kubernetes v1.22.0 高可用集群(3个Master 节点) ...."
  # 操作系统版本检查
  run_with_timer check

  # 下载常用组件及初始化操作
  run_with_timer init

  # install docker v20.10 (all master && nodes)
  run_with_timer install_docker

  # close selinux  (all master && nodes)
  run_with_timer close_SELinux

  # close firewalld (all master && nodes)
  run_with_timer close_firewalld

  # close swap (all master && nodes)
  run_with_timer close_swap

  # 内核参数优化 (all master && nodes)
  run_with_timer kernel_optimizations

  # 安装配置ipvs (all master && nodes)
  run_with_timer ipvs_set

  # 所有master 节点安装kubeadm等相关包(all master)
  run_with_timer install_kubeadm

  # 所有 master 节点预下载k8s 组件镜像(all master)
  run_with_timer pre_pull_k8s_images

  # kubeadm init 建议手动执行(the first master)
  #kubeadm_init

  # install calico(the first master, cni calico 和 flannel 二选一)
  #run_with_timer calico_install

  # install flannel(the first master, cni calico 和 flannel 二选一)
  #run_with_timer flannel_install


  # others 选择性执行
  # 部署ingress-Nginx 控制器，使用国内镜像代理(可选)
  # run_with_timer ingress_nginx_install

  # 安装高可用代理(all master 节点执行，建议手动执行)
  # run_with_timer install_haproxy_keepalived

  #Kubernetes-v1.24之前版本无需安装cri-dockerd (可选)
  # run_with_timer install_cri_dockerd

  # 安装 calicoctl(可选)，方便直接查看calico 路由信息
  #run_with_timer calicoctl_install

  # 清理旧集群(可选操作)
  #run_with_timer kubernetes_reset

  # master join,手动操作
  #master_join

  # node join,手动执行
  #node_join

  # 测试集群 (the first master)
  #test_deploy

}

run_with_timer main
