#!/bin/bash
# *************************************
# 功能: 网络解决方案的功能函数库
# 作者: 王树森
# 联系: wangshusen@sswang.com
# 版本: v0.1
# 日期: 2024-04-22
# *************************************

# 网络解决方案定制部署的时候，用户交互逻辑
k8s_cluster_network_deploy_user_interact(){
  # 接收参数
  local network_type="$1"

  # 获取当前的网络解决方案类型
  local current_network_type=$(k8s_cluster_get_network_type)
  
  # 网络解决方案是否部署下去
  if [ "${current_network_type}" == "${network_type}" ]; then
    print::msg "all" "warning" "${current_network_type}网络解决方案已部署，无需重复部署!!!"
  elif [ "${current_network_type}" == "none" ]; then
    # 部署期望的网络解决方案
    k8s_network_install "${network_type}"
  else
    print::msg "console" "warning" "k8s集群当前的网络解决方案是: ${current_network_type}"
    local user_define=$(user_define_yes_no_interact "是否要部署${network_type}网络解决方案" "no")
    if [ "${user_define}" == "yes" ]; then
      # 清理当前的网络解决方案
      k8s_cluster_network_clean "${current_network_type}"
      # 确认远程主机是否存活 - 意义不大，可以忽略
      #  while true; do
      #   local remote_status=$(remote_host_exist_check "${master1}")
      #   [ "${remote_status}" == "is_exist"  ] && break
      #   print::msg "console" "warning" "集群节点正在重启，等待k8s控制节点运行，请稍后..."
      #   sleep 3
      # done
      # 确认当前k8s集群状态是否正常
      while true; do
        local k8s_cluster_status=$(k8s_cluster_exist_check)
        [ "${k8s_cluster_status}" == "is_running"  ] && break
        print::msg "console" "warning" "集群节点重启成功，等待k8s服务正常运行，请稍后..."
        sleep 5
      done
      print::msg "console" "warning" "k8s服务运行成功，开始部署${network_type}网络解决方案"
      # 部署期望的网络解决方案
      k8s_network_install "${network_type}"
    else
      print::msg "all" "warning" "继续使用${current_network_type}网络解决方案, 暂时不改变!!!"
    fi
  fi
}


# 检测下载文件是否正常
k8s_cluster_network_yaml_wget_check(){
  # 获取参数
  local path_to_file="$1"
  
  # 检测文件逻辑
  file_size=$(ls -l "${path_to_file}" |awk '{print $5}')
  if [ "${file_size}" -ne 0 ]; then
    local file_status="normal"
  else
    local file_status="abnormal"
  fi
  echo "${file_status}"
}

# 检测本地配置文件是否存在函数
k8s_cluster_network_yaml_get(){
  # 获取参数
  local path_to_file="$1"
  local url_to_file="$2"
  local file_name=$(basename ${path_to_file})

  # 功能逻辑
  if [ -f "${path_to_file}" ]; then
    print::msg "all" "warning" "${file_name} 已存在，无需重复获取!!!"
  else
    # 
    local wget_num=0
    while [ "${wget_num}" -lt 3 ]; do
      # 下载文件
      wget --timeout=3 --waitretry=2 --tries=3 "${url_to_file}" -O "${path_to_file}-bak"
      # 检测文件是否下载成功
      local file_status=$(k8s_cluster_network_yaml_wget_check "${path_to_file}-bak")
      if [ "${file_status}" != "normal" ]; then
        sleep 3
      else
        break
      fi
      let wget_num+=1
    done
    # 防止修改失误，做文件备份
    cp "${path_to_file}-bak" "${path_to_file}"
    print::msg "all" "success" "${file_name} 已下载成功，可以正常使用!!!"
  fi
}

# 定制flannel网络清单文件函数
k8s_cluster_network_flannel_yaml_conf(){
  # 获取参数
  local local_path_to_file="$1"
  local yaml_image_repo="docker.io/flannel"
  local yaml_image_harbor="${harbor_addr}/${harbor_k8s_repo}"

  # 修改配置文件
  sed -i "s#${yaml_image_repo}#${yaml_image_harbor}#g" ${local_path_to_file}
  
}

# 定制calico网络清单文件函数
k8s_cluster_network_calico_yaml_conf(){
  # 获取参数
  local local_path_to_file="$1"
  local yaml_image_repo="docker.io/calico"
  local yaml_image_harbor="${harbor_addr}/${harbor_k8s_repo}"

  # 修改配置文件
  sed -i 's#"type": "calico-ipam"#"type": "host-local",\
              "subnet": "usePodCidr"#' "${local_path_to_file}"
  sed -i '/CALICO_DISABLE_FILE_LOGGING/i \
            - name: CALICO_IPV4POOL_CIDR \
              value: "K8S_POD_CIDR_DEFINE" \
            - name: CALICO_IPV4POOL_BLOCK_SIZE \
              value: "24" \
            - name: USE_POD_CIDR \
              value: "true" \
            - name: IP_AUTODETECTION_METHOD \
              value: interface=K8S_NODE_NET_DEV' "${local_path_to_file}"
  sed -i "s#K8S_POD_CIDR_DEFINE#${K8S_POD_CIDR_DEFINE}#" "${local_path_to_file}"
  sed -i "s#K8S_NODE_NET_DEV#${K8S_NODE_NET_DEV}#" "${local_path_to_file}"

  # 修改镜像文件
  sed -i "s#${yaml_image_repo}#${yaml_image_harbor}#g" ${local_path_to_file}

}

# 检查helm 环境是否部署成功
k8s_cluster_network_helm_check(){
  # 获取远程helm的版本信息
  local current_version=$(ssh "${login_user}@${master1}" "helm version" | awk -F'v|-' '{print $3}')

  # 对比版本信息
  if [ "${current_version}" == "${helm_ver}" ]; then
    local helm_status="is_success"
  else
    local helm_status="is_false"
  fi
  echo "${helm_status}"
}

# 定制helm环境的部署函数
k8s_cluster_network_helm_deploy(){
  # 定制参数
  local local_path_to_file="/tmp/linux-amd64/helm"
  local remote_path_to_file="/usr/local/bin/helm"

  # 检测远程主机的helm环境是否存在
  local helm_staus=$(ssh "${login_user}@${master1}" [ -f ${remote_path_to_file} ] \
                          && echo "is_exist" || echo "not_exist")

  if [ "${helm_staus}" == "is_exist" ]; then
    print::msg "log" "warning" "目标主机已存在helm环境，无需重复部署!!!"
  else
    # 本地解压
    tar xf "${helm_dir}/${helm_tar}" -C /tmp
  
    # 传输文件到远程主机
    k8s_cluster_network_yaml_scp "${local_path_to_file}" "${remote_path_to_file}"
  fi
}

# 定制cilium依赖的helm仓库信息函数
k8s_cluster_network_chart_values_get(){
  # 定制参数
  local local_cilium_values_file="$1"
  local remote_cilium_values_file="$2"

  # 定制helm环境参数
  local helm_status=$(k8s_cluster_network_helm_check)
  local repo_status=$(ssh "${login_user}@${master1}" helm repo list \
                     | grep cilium >>/dev/null 2>&1 \
                     && echo "is_exist" || echo "not_esist")
  
  local add_repo_cmd="helm repo add cilium https://helm.cilium.io/"
  local update_repo_cmd="helm repo update"
  local get_chart_cmd="helm pull cilium/cilium --version=${cilium_ver} --untar"
  local tail_cmd="rm -rf ~/cilium"
  # 更新repo
  if [ "${helm_status}" == "is_success" ]; then
    # 避免重复增加cilium的helm仓库
    if [ "${repo_status}" == "is_exist" ]; then
      local heml_repo_exec="${update_repo_cmd}; ${get_chart_cmd}"
    else
      local heml_repo_exec="${add_repo_cmd}; ${update_repo_cmd}; ${get_chart_cmd}"
    fi
    print::msg "console" "info" "需要和cilium的helm仓库同步信息，时间可能稍长，请耐心等待..."
    ssh "${login_user}@${master1}" "[ -f ${remote_cilium_values_file} ] && rm -rf ${remote_cilium_values_file} ~/cilium; \
                                   ${heml_repo_exec}"

    # 将cilum的values文件拷贝到部署服务器，方便后续配置修改
    local cilium_values_dir=$(dirname "${local_cilium_values_file}")
    [ -d "${cilium_values_dir}" ] || mkdir -p "${cilium_values_dir}"
    scp "${login_user}@${master1}:~/cilium/values.yaml" "${local_cilium_values_file}"
    ssh "${login_user}@${master1}" "${tail_cmd}"
    print::msg "all" "success" "已获取cilium values.yaml文件!!!"
  else
    print::msg "all" "error" "helm在远程主机未部署成功，请确保helm环境正常!!!"
  fi
}

# 定制cilium的部署values文件
k8s_cluster_network_cilium_values_conf(){
  # 定制环境变量
  local cilium_values_file="$1"
  # 备份values.yaml
  cp ${cilium_values_file}{,.bak}

  # 定制cilium的镜像地址
  local cilium_harbor_repo="${harbor_addr}/${harbor_k8s_repo}"
  for repo_name in quay.io/cilium docker.io/library ghcr.io/spiffe; do
    sed -i "s#${repo_name}#${cilium_harbor_repo}#g" "${cilium_values_file}"
  done
  sed -i "s#useDigest: true#useDigest: false#g" "${cilium_values_file}"

  # 定制ipam的管理模式
  if [ "${cilium_ipam_type}" == "kubernetes" ]; then
    sed -i "s@mode: \"cluster-pool\"@mode: \"${cilium_ipam_type}\"@" "${cilium_values_file}"
    sed -i "s@10.0.0.0/8@${K8S_POD_CIDR_DEFINE}@" "${cilium_values_file}"
    sed -i "s@ipv4NativeRoutingCIDR: \"@ipv4NativeRoutingCIDR: \"${K8S_POD_CIDR_DEFINE}@" "${cilium_values_file}"
  elif [ "${cilium_ipam_type}" == "cluster-pool" ]; then
    # 确定宿主机的IP段
    if [ "${target_net}" == "10.0.0" ]; then
       sed -i 's#10.0.0.0/8#11.0.0.0/8#' "${cilium_values_file}"
    fi
  fi

  # 启用vxlan模式
  if [ "${cilium_net_type}" == "native" ]; then
    sed -i "s@routingMode: \"\"@routingMode: \"${cilium_net_type}\"@" "${cilium_values_file}"
    sed -i "s#autoDirectNodeRoutes: false#autoDirectNodeRoutes: true#" "${cilium_values_file}"
  fi

  # 启用ebpf模式
  if [ "${cilium_use_ebpf}" == "yes" ]; then
    sed -i "s@masquerade: ~@masquerade: true@" "${cilium_values_file}" 
    local edit_num=$(grep -nir -A2 "^nodePort:" "${cilium_values_file}" | awk -F'-' '/enabled:/{print $1}')
    sed -i "${edit_num}s@enabled: false@enabled: true@" "${cilium_values_file}"
  fi

  # 启用非kube-proxy模式
  if [ "${cilium_deploy_type}" == "nokubeproxy" ]; then
    sed -i "s@^#kubeProxyR.*@kubeProxyReplacement: \"true\"@" "${cilium_values_file}"
    sed -i "s@^k8sServiceHost:.*@k8sServiceHost: \"${master1}\"@" "${cilium_values_file}"
    sed -i "s@^k8sServicePort:.*@k8sServicePort: \"${K8S_SVC_PORT}\"@" "${cilium_values_file}"
  fi
}

# 定制网络清单文件属性函数
k8s_cluster_network_yaml_conf(){
  # 获取参数
  local k8s_cluster_network_type="$1"
  local local_path_to_file="$2"

  # 根据网络类型定制yaml清单文件
  case "${k8s_cluster_network_type}" in
    "flannel")
      k8s_cluster_network_flannel_yaml_conf "${local_path_to_file}";;
    "calico")
      k8s_cluster_network_calico_yaml_conf "${local_path_to_file}";;
    "cilium")
      k8s_cluster_network_cilium_values_conf "${local_path_to_file}";;
    *)
      print::msg "console" "warning" "k8s项目暂不支持 ${k8s_cluster_network_type} 方案"
      print::msg "console" "warning" "请使用如下几种网络解决方案: "
      print::msg "console" "warning" "Flannel、Calico、Cilium";;
  esac
}

# 传递网络配置文件到远程主机
k8s_cluster_network_yaml_scp(){
  # 接收参数
  local local_path_to_file="$1"
  local remote_path_to_file="$2"
  local remote_file=$(basename ${remote_path_to_file})
  local remote_dir=$(dirname ${remote_path_to_file})
   
  # 定制文件传输逻辑
  ssh "${login_user}@${master1}" "[ ! -d ${remote_dir} ] && mkdir ${remote_dir} -p; \
         [ -f ${remote_path_to_file} ] && rm -f ${remote_path_to_file}"
  scp "${local_path_to_file}" "${login_user}@${master1}:${remote_dir}"
  print::msg "all" "success" "${remote_file} 已传递到远程的master主机 ${remote_dir} 目录!!!"
}

# 定制镜像文件获取的逻辑函数
k8s_cluster_network_image_get_logic(){
  # 接收参数
  local image_repo="$1"

  # 获取镜像名称信息
  local image_name=$(echo "${image_repo}" | awk -F '/' '{print $NF}')
  local image_new_name="${harbor_addr}/${harbor_k8s_repo}/${image_name}"

  # 判断harbor是否存在镜像文件
  local check_image_status=$(grep "${image_new_name}" "${harbor_images_list_file}" >>/dev/null 2>&1 \
                             && echo "存在" || echo "不存在")
  # 如果存在目标镜像文件，则跳过镜像的获取和提交
  if [ "${check_image_status}" == "存在" ]; then
    print::msg "all" "warning" "${image_new_name} 镜像文件在harbor镜像仓库已存在，不用重复获取!!!"
    # 对于已存在的镜像，可以直接跳过，对下一项进行检测
    
  else
    ssh "${login_user}@${master1}" "docker pull ${image_repo}; \
       docker tag ${image_repo} ${image_new_name}; \
       docker push ${image_new_name}; \
       docker rmi ${image_repo}"
  fi
}

# 定制cilium 的 values 文件依赖的镜像名称函数
k8s_cluster_network_cilium_get_images_name(){
  # 接收参数
  local file_name="$1"
  
  # 获取镜像名称列表
  local repository_list="awk -F'\"' '/repository:/{print \$2}' ${file_name}"
  local tag_list="awk -F'\"' '/tag:/{print \$2}' ${file_name}"
  local cilium_image_list="/tmp/cilium_image_list.txt"

  # 生成cilium所有的镜像文件列表
  > "${cilium_image_list}"
  paste <(eval ${repository_list}) <(eval ${tag_list}) | while read repo_name image_tag; do
     echo "${repo_name}:${image_tag}" >> "${cilium_image_list}"
  done
}

# 获取yaml文件所依赖的镜像函数
k8s_cluster_network_image_get(){
  # 接受参数
  local local_path_to_file="$1" 
  local k8s_cluster_network_type="$2"

  # 提交镜像之前，判断一下本地镜像仓库是否存在该镜像
  print::msg "all" "warning" "获取网络解决方案依赖镜像并提交到本地harbor镜像仓库!!!"
  if [ "${k8s_cluster_network_type}" == "cilium" ]; then
    local cilium_image_list="/tmp/cilium_image_list.txt"
    # 获取cilium的所有镜像文件列表 
    k8s_cluster_network_cilium_get_images_name "${local_path_to_file}"
    local network_image=$(cat ${cilium_image_list})
  else
    local network_image=$(grep ' image:' ${local_path_to_file} | uniq | awk '{print $NF}')
  fi
  
  # 获取镜像仓库镜像列表
  get_proj_image_list "${harbor_k8s_repo}"
  
  # 对比镜像文件信息
  for i in ${network_image}; do
    # 获取镜像文件
    k8s_cluster_network_image_get_logic "${i}"
  done
}

# 远程执行网络解决方案的资源清单文件函数
k8s_cluster_network_yaml_apply(){
  # 获取参数
  local remote_path_to_file="$1"
  local k8s_cluster_network_type="$2"

  # 定制远程执行命令
  if [ "${k8s_cluster_network_type}" == "cilium" ]; then
    # 定制cilium的无kube-proxy的模式操作
    if [ "${cilium_deploy_type}" == "nokubeproxy" ]; then
      # 清理kube-proxy组件
      local pre_cmd="kubectl  delete ds/kube-proxy cm/kube-proxy -n kube-system"
      ssh "${login_user}@${master1}" "${pre_cmd}"
      # 所有主机重置防火墙规则
      local iptables_reset="iptables-save | grep -v KUBE | iptables-restore"
      for node in ${all_k8s_list}; do
        ssh "${login_user}@${node}" "${iptables_reset}"
      done
    fi
    local exec_cmd="helm install cilium cilium/cilium -n kube-system -f ${remote_path_to_file}"
  else
    local exec_cmd="kubectl apply -f ${remote_path_to_file}"
  fi

  # 执行资源清单文件
  ssh "${login_user}@${master1}" "${exec_cmd}"
}

# 检测flannel网络环境
k8s_network_status_check(){
  # 获取参数
  local network_ns="$1"
  local network_ds_name="$2"
 
  # 获取集群的网络pod数量
  local check_num=0
  while [ "${check_num}" -lt 3 ];do
    # 尝试获取远程网络解决方案的网络状态
    local get_network_pod_num=$(ssh "${login_user}@${master1}" \
                        "kubectl get ds ${network_ds_name} -n ${network_ns} -o json" 2>/dev/null \
                        | jq ".status.numberReady")
    # 为了保证条件判断的成立，应该避免值为空
    [ -z "${get_network_pod_num}" ] && get_network_pod_num=0
    if [ ${get_network_pod_num} -gt 0 ];then
      local network_status="running"
      break
    else
      local network_status="notrun"
      let check_num+=1
      # 等待3秒后再次测试效果
      sleep 3
    fi
  done
  # 输出
  echo "${network_status}"
}

# 获取当前的节点网络状态函数
k8s_cluster_network_nodes_list(){
  # 信息显示
  print::msg "console" "warning" "正在检查当前集群的节点网络状态:"
  waiting 3
  echo_tag "=" 50
  ssh "${login_user}@${master1}" "/usr/bin/kubectl get nodes"
  echo_tag "=" 50
  echo -e "\e[0m"
}

# flannel网络环境清理后的检测函数
k8s_cluster_network_status_check(){
  # 获取参数
  local k8s_cluster_network_type="$1"
  local network_ns="$2"
  local network_ds_name="$3"

  # 获取当前的网络状态
  local current_network_status=$(k8s_network_status_check "${network_ns}" "${network_ds_name}")

  # 判断网络后续处理
  if [ "${current_network_status}" == "running" ]; then
    print::msg "log" "success" "当前k8s集群 ${k8s_cluster_network_type} 网络解决方案部署成功!!!"
  else
    print::msg "log" "warning" "当前k8s集群 ${k8s_cluster_network_type} 网络解决方案部署失败"
    print::msg "log" "warning" "pod创建时间稍长，可能出现该提示，属于正常，请自行检查效果!!!"
  fi
  k8s_cluster_network_nodes_list
}

# 检测k8s集群网络环境状态
k8s_cluster_network_install_status(){
  # 获取参数
  local k8s_cluster_network_type="$1"

  # 根据网络类型定制yaml清单文件
  case "${k8s_cluster_network_type}" in
    "flannel")
      k8s_cluster_network_status_check "${k8s_cluster_network_type}" \
                                         "${flannel_ns}" "${flannel_ds_name}";;
    "calico")
      k8s_cluster_network_status_check "${k8s_cluster_network_type}" \
                                         "${calico_ns}" "${calico_ds_name}";;
    "cilium")
      k8s_cluster_network_status_check "${k8s_cluster_network_type}" \
                                         "${cilium_ns}" "${cilium_ds_name}";;
    *)
      print::msg "console" "warning" "k8s项目暂不支持 ${k8s_cluster_network_type} 方案"
      print::msg "console" "warning" "请使用如下几种网络解决方案: "
      print::msg "console" "warning" "Flannel、Calico、Cilium";; 
  esac
}

# 定制cilium的配置文件传递
k8s_cluster_network_cilium_files_scp(){
  # 传输 cilium cli 文件
  tar xf "${cilium_cli_dir}/${cilium_cli_tar}" -C /tmp
  k8s_cluster_network_yaml_scp "/tmp/cilium" "/usr/local/bin/cilium"
}

# 检测k8s集群节点的网络状态服务是否正常
remote_k8s_node_network_status_check(){
  # 检测集群控制节点的网络状态
  local net_status=$(ssh "${login_user}@${master1}" "kubectl get nodes" \
                          | awk '/master/{print $2}' | head -n1)
  [ "${net_status}" == "Ready" ] && echo "is_running" || echo "not_running"
}


# 定制网络解决方案安装逻辑函数
k8s_cluster_network_install_logic(){
  # 获取参数
  local k8s_cluster_network_type="$1"
  local local_path_to_file="$2"
  local url_to_file="$3"
  local remote_path_to_file="$4"
  
  # 1 获取配置清单文件
  if [ "${k8s_cluster_network_type}" == "cilium" ]; then
    # 对于cilium来说，需要通过helm方式来获取values.yaml 文件
    k8s_cluster_network_helm_deploy
    k8s_cluster_network_chart_values_get "${local_path_to_file}" "${remote_path_to_file}"
  else
    # 对于flannel 和 calico，仅需要从互联网获取即可
    k8s_cluster_network_yaml_get "${local_path_to_file}" "${url_to_file}"
  fi

  # 2 获取镜像文件
  k8s_cluster_network_image_get "${local_path_to_file}" "${k8s_cluster_network_type}"

  # 3 定制网络解决方案配置文件
  k8s_cluster_network_yaml_conf "${k8s_cluster_network_type}" "${local_path_to_file}"

  # 4 将合适的文件模板传递到master主机
  if [ "k8s_cluster_network_type" == "cilium" ]; then
    # 对于cilium来说，需要额外传输 cilium cli 以及 values.yaml
    k8s_cluster_network_cilium_files_scp  
  fi
  k8s_cluster_network_yaml_scp "${local_path_to_file}" "${remote_path_to_file}"

  # 5 运行资源清单文件
  k8s_cluster_network_yaml_apply "${remote_path_to_file}" "${k8s_cluster_network_type}"

  # 这里涉及到pod的启动，所以稍微等待一下 
  # waiting 5
  # 确认远程主机是否存活
  while true; do
    local remote_status=$(remote_k8s_node_network_status_check)
    [ "${remote_status}" == "is_running"  ] && break
    print::msg "console" "warning" "集群网络解决方案已部署，等待服务启动，请稍后..."
    sleep 5
  done


  # 6 检测网络解决方案效果
  k8s_cluster_network_install_status "${k8s_cluster_network_type}"
}

# Centos内核升级逻辑
centos_kernel_update_logic(){
  # 接受参数
  local node="$1"
  
  # 创建远程目录
  ssh "${login_user}@${node}" "[ -d ${remote_kernel_dir} ] && rm -rf ${remote_kernel_dir}; \
                                mkdir -p ${remote_kernel_dir}"
  # 传输文件
  scp ${kernel_centos_dir}/*.rpm ${login_user}@${node}:${remote_kernel_dir}/

  # 安装内核软件后设定启动顺序
  ssh "${login_user}@${node}" "yum install -y ${remote_kernel_dir}/*.rpm; \
                               grub2-set-default 0; reboot"
}
# Centos内核对比逻辑
centos_kernel_number_check_logic(){
  # 接收参数
  local node="$1"
  
  # 获取常见变量
  local kernel_num=$(ssh ${login_user}@${node} "uname -r" | awk -F'-' '{print $1}')
  local kernel_main_num=$(echo "${kernel_num}" | awk -F'.' '{print $1}')
  local kernel_second_num=$(echo "${kernel_num}" | awk -F'.' '{print $2}')
  local kernel_except_main_num=$(echo "${kernel_version}" | awk -F'.' '{print $1}')
  
  # 判断内核升级逻辑
  if [ ${kernel_except_main_num} -gt ${kernel_main_num} ]; then
    print::msg "all" "warning" "CentOS主机 ${node} 内核主版本偏低，需要执行升级内核操作!!!"
    centos_kernel_update_logic "${node}"
  elif [ ${kernel_except_main_num} -eq ${kernel_main_num} ]; then
    if [ ${kernel_second_num} -lt 9 ]; then
      print::msg "all" "warning" "CentOS主机 ${node} 内核次版本偏低，需要执行升级内核操作!!!"
      centos_kernel_update_logic "${node}"
    fi
  fi
}

# 检测远程主机的内核版本
k8s_network_cilium_centos_update_kernel_logic(){
  # 检测所有远程主机的类型
  for node in ${all_k8s_list}; do
    local node_type=$(get_remote_os_type "remote" "${node}")
    if [ "${node_type}" == "CentOS" ];then
      centos_kernel_number_check_logic "${node}"
      # 注意：只要能够保证远端的Centos的内核版本升级成功
      #       至于Centos主机是否重启成功，都无所谓
      #       因为k8s会自动将cilium部署到所有的活动主机上面
    fi
  done
  
}

# 定制网络函数
k8s_network_install(){
  # 根据参数，选择不同的解决方案
  # 接收参数
  local net_type="$1"
  
  print::msg "all" "warning" "开始执行k8s集群定制网络解决方案操作!!!"
  # 定制网络解决方案
  if [ "${net_type}" == "flannel"  ];then
    local current_flannel_status=$(k8s_network_status_check "${flannel_ns}" "${flannel_ds_name}")
    if [ "${current_flannel_status}" == "notrun" ]; then
      local local_path_to_file="${addons_flannel}/${flannel_yaml}"
      local url_to_file="${flannel_url}/${flannel_yaml}"
      local remote_path_to_file="${remote_dir}/flannel/${flannel_yaml}"
      k8s_cluster_network_install_logic "${net_type}" "${local_path_to_file}" \
                                        "${url_to_file}" "${remote_path_to_file}"
    else
      print::msg "all" "warning" "k8s集群flannel网络解决方案已运行，无需重复执行!!!"
    fi
  elif [ "${net_type}" == "calico" ];then
    local current_calico_status=$(k8s_network_status_check "${calico_ns}" "${calico_ds_name}")
    if [ "${current_calico_status}" == "notrun" ]; then
      local local_path_to_file="${addons_calico}/${calico_yaml}"
      local url_to_file="${calico_url}"
      local remote_path_to_file="${remote_dir}/calico/${calico_yaml}"
      k8s_cluster_network_install_logic "${net_type}" "${local_path_to_file}" \
                                        "${url_to_file}" "${remote_path_to_file}"
    else
      print::msg "all" "warning" "k8s集群flannel网络解决方案已运行，无需重复执行!!!"
    fi
    
  elif [ "${net_type}" == "cilium" ];then
    local current_cilium_status=$(k8s_network_status_check "${cilium_ns}" "${cilium_ds_name}")
    if [ "${current_cilium_status}" == "notrun" ]; then
      # 保证Centos类型主机的内核版本，满足cilium的要求
      k8s_network_cilium_centos_update_kernel_logic
      print::msg "all" "warning" "k8s集群所有节点的内核版本满足cilium的部署要求，开始部署cilium!!!"
      # 部署cilium环境
      local local_path_to_file="${cilium_values_dir}/${cilium_yaml}"
      local url_to_file="-"
      local remote_path_to_file="${remote_dir}/cilium/${cilium_yaml}"
      k8s_cluster_network_install_logic "${net_type}" "${local_path_to_file}" \
                                        "${url_to_file}" "${remote_path_to_file}"
    else
      print::msg "all" "warning" "k8s集群flannel网络解决方案已运行，无需重复执行!!!"
    fi
  else
    print::msg "console" "error" "目前该脚本暂不支持其他类型的网络解决方案，有需求可以联系："
    print::msg "console" "error" "   抖音号：sswang_yys, B站：自学自讲\n"
  fi
  print::msg "all" "success" "指定网络解决方案已经部署完毕!!!\n"
}
