#!/bin/bash

#==============================================================#
# File      :   setup5.sh
# Desc      :   只在部署节点执行，开始部署K8S集群
# 
# Usage     :   bash setup1.sh
# Author    :   KoalaAn (zhongweibest@outlook.com)
# License   :   AGPLv3
#==============================================================#


CURRENT_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
. ${CURRENT_DIR}/config_common.sh
. ${CURRENT_DIR}/config_node.sh








# 初始化一个名为$CLUSTER_NAME的k8s集群配置
sudo rm -rf /etc/kubeasz/clusters/${CLUSTER_NAME}
echo ${KUBE_PATH}
sudo ${KUBE_PATH}/ezctl new ${CLUSTER_NAME}
# docker exec -it kubeasz ezctl new ${CLUSTER_NAME}
 
if [[ $? -ne 0 ]];then
    echo "cluster name [${CLUSTER_NAME}] was exist in ${KUBE_PATH}/clusters/${CLUSTER_NAME}."
    exit 1
fi



# 设置参数，启用离线安装
# 离线安装文档：https://github.com/easzlab/kubeasz/blob/3.6.2/docs/setup/offline_install.md
sudo sed -i 's/^INSTALL_SOURCE.*$/INSTALL_SOURCE: "online"/g' ${KUBE_PATH}/clusters/${CLUSTER_NAME}/config.yml



############################################################
# /etc/kubeasz/clusters/${CLUSTER_NAME}/config.yml 修改
############################################################


# 修改集群名称
sudo sed -ri "s+^(CLUSTER_NAME:).*$+\1 \"${CLUSTER_NAME}\"+g" ${KUBE_PATH}/clusters/${CLUSTER_NAME}/config.yml

## k8s上日志及容器数据存独立磁盘步骤（参考阿里云的）
sudo mkdir -p /var/lib/container/{kubelet,docker,nfs_dir} /var/lib/{kubelet,docker} /nfs_dir


# docker data dir
DOCKER_STORAGE_DIR="/var/lib/container/docker"
sudo sed -ri "s+^(STORAGE_DIR:).*$+STORAGE_DIR: \"${DOCKER_STORAGE_DIR}\"+g" ${KUBE_PATH}/clusters/${CLUSTER_NAME}/config.yml

# containerd data dir
CONTAINERD_STORAGE_DIR="/var/lib/container/containerd"
sudo sed -ri "s+^(STORAGE_DIR:).*$+STORAGE_DIR: \"${CONTAINERD_STORAGE_DIR}\"+g" ${KUBE_PATH}/clusters/${CLUSTER_NAME}/config.yml

# kubelet logs dir
KUBELET_ROOT_DIR="/var/lib/container/kubelet"
sudo sed -ri "s+^(KUBELET_ROOT_DIR:).*$+KUBELET_ROOT_DIR: \"${KUBELET_ROOT_DIR}\"+g" ${KUBE_PATH}/clusters/${CLUSTER_NAME}/config.yml

if [[ $CLUSTER_NAME != 'aws' ]]; then
    # docker aliyun repo
    REG_MIRRORS="https://pqbap4ya.mirror.aliyuncs.com"
    sudo sed -ri "s+^REG_MIRRORS:.*$+REG_MIRRORS: \'[\"${REG_MIRRORS}\"]\'+g" ${KUBE_PATH}/clusters/${CLUSTER_NAME}/config.yml
fi

# [docker]信任的HTTP仓库
sudo sed -ri "s+127.0.0.1/8+${NET_NUM}.0/24+g" ${KUBE_PATH}/clusters/${CLUSTER_NAME}/config.yml
# disable dashboard auto install
sudo sed -ri "s+^(dashboard_install:).*$+\1 \"no\"+g" ${KUBE_PATH}/clusters/${CLUSTER_NAME}/config.yml

# 融合配置准备(按示例部署命令这里会生成testk8s.boge.com这个域名，部署脚本会基于这个域名签证书，优势是后面访问kube-apiserver，可以基于此域名解析任意IP来访问，灵活性更高)
CLUSEER_WEBSITE="${CLUSTER_NAME}k8s.${DOMAIN_NAME}"
# 找到MASTER_CERT_HOSTS字符串所在的行号 例如这里找到的是81行
lb_num=$(grep -wn '^MASTER_CERT_HOSTS:' ${KUBE_PATH}/clusters/${CLUSTER_NAME}/config.yml |awk -F: '{print $1}')
# 把81 +1   81+2
lb_num1=$(expr ${lb_num} + 1)
lb_num2=$(expr ${lb_num} + 2)
# 替换第82行的内容
sudo sed -ri "${lb_num1}s+.*$+  - "${CLUSEER_WEBSITE}"+g" ${KUBE_PATH}/clusters/${CLUSTER_NAME}/config.yml
# 替换第83行的内容
sudo sed -ri "${lb_num2}s+(.*)$+#\1+g" ${KUBE_PATH}/clusters/${CLUSTER_NAME}/config.yml

# node节点最大pod 数
MAX_PODS="120"
sudo sed -ri "s+^(MAX_PODS:).*$+\1 ${MAX_PODS}+g" ${KUBE_PATH}/clusters/${CLUSTER_NAME}/config.yml

# calico 自建机房都在二层网络可以设置 CALICO_IPV4POOL_IPIP=“off”,以提高网络性能; 公有云上VPC在三层网络，需设置CALICO_IPV4POOL_IPIP: "Always"开启ipip隧道
#sed -ri "s+^(CALICO_IPV4POOL_IPIP:).*$+\1 \"off\"+g" ${KUBE_PATH}/clusters/${CLUSTER_NAME}/config.yml



############################################################
# /etc/kubeasz/clusters/${CLUSTER_NAME}/hosts 修改
############################################################


# 修改二进制安装脚本配置 hosts
# clean old ip
sudo sed -ri '/192.168.1.1/d' ${KUBE_PATH}/clusters/${CLUSTER_NAME}/hosts
sudo sed -ri '/192.168.1.2/d' ${KUBE_PATH}/clusters/${CLUSTER_NAME}/hosts
sudo sed -ri '/192.168.1.3/d' ${KUBE_PATH}/clusters/${CLUSTER_NAME}/hosts
sudo sed -ri '/192.168.1.4/d' ${KUBE_PATH}/clusters/${CLUSTER_NAME}/hosts
sudo sed -ri '/192.168.1.5/d' ${KUBE_PATH}/clusters/${CLUSTER_NAME}/hosts

# 输入准备创建ETCD集群的主机位
for i in "${!ETCD_LIST[@]}"; do
    item_ip="${ETCD_LIST[$i]}"
    sudo sed -i "/\[etcd/a $item_ip"  ${KUBE_PATH}/clusters/${CLUSTER_NAME}/hosts
done

# 输入准备创建KUBE-MASTER集群的主机位
for i in "${!MASTER_NODE_IP_LIST[@]}"; do
    item_ip="${MASTER_NODE_IP_LIST[$i]}"
    sudo sed -i "/\[kube_master/a $item_ip"  ${KUBE_PATH}/clusters/${CLUSTER_NAME}/hosts
done

# 输入准备创建KUBE-NODE集群的主机位
for i in "${!WORKER_NODE_IP_LIST[@]}"; do
    item_ip="${WORKER_NODE_IP_LIST[$i]}"
    sudo sed -i "/\[kube_node/a $item_ip"  ${KUBE_PATH}/clusters/${CLUSTER_NAME}/hosts
done

# 配置容器运行时CNI
# 判断 ${cni} 不在 flannel  calico cilium 中的一个， 就echo "cni need be flannel or calico or cilium."
if [[ ! " flannel calico cilium " =~ " ${cni} " ]]; then
    echo "cni need be flannel or calico or cilium."
    exit 1
fi
sudo sed -ri "s+^CLUSTER_NETWORK=.*$+CLUSTER_NETWORK=\"${cni}\"+g" ${KUBE_PATH}/clusters/${CLUSTER_NAME}/hosts

if [[ ! " containerd docker " =~ " ${cri} " ]]; then
    echo "cri need be containerd or docker."
    exit 1
fi
sudo sed -ri "s+^CONTAINER_RUNTIME=.*$+CONTAINER_RUNTIME=\"${cri}\"+g" ${KUBE_PATH}/clusters/${CLUSTER_NAME}/hosts

# 配置K8S的ETCD数据备份的定时任务
#  https://github.com/easzlab/kubeasz/blob/master/docs/op/cluster_restore.md
if cat /etc/redhat-release &>/dev/null;then
    if ! grep -w '94.backup.yml' /var/spool/cron/root &>/dev/null;then echo "00 00 * * * /usr/local/bin/ansible-playbook -i /etc/kubeasz/clusters/${CLUSTER_NAME}/hosts -e @/etc/kubeasz/clusters/${CLUSTER_NAME}/config.yml /etc/kubeasz/playbooks/94.backup.yml &> /dev/null; find /etc/kubeasz/clusters/${CLUSTER_NAME}/backup/ -type f -name '*.db' -mtime +3|xargs rm -f" >> /var/spool/cron/root;else echo exists ;fi
    chown root.crontab /var/spool/cron/root
    chmod 600 /var/spool/cron/root
    rm -f /var/run/cron.reboot
    service crond restart
else
    if ! grep -w '94.backup.yml' /var/spool/cron/crontabs/root &>/dev/null;then echo "00 00 * * * /usr/local/bin/ansible-playbook -i /etc/kubeasz/clusters/${CLUSTER_NAME}/hosts -e @/etc/kubeasz/clusters/${CLUSTER_NAME}/config.yml /etc/kubeasz/playbooks/94.backup.yml &> /dev/null; find /etc/kubeasz/clusters/${CLUSTER_NAME}/backup/ -type f -name '*.db' -mtime +3|xargs rm -f" >> /var/spool/cron/crontabs/root;else echo exists ;fi
    chown root.crontab /var/spool/cron/crontabs/root
    chmod 600 /var/spool/cron/crontabs/root
    rm -f /var/run/crond.reboot
    service cron restart
fi




#---------------------------------------------------------------------------------------------------
# 准备开始安装了
sudo rm -rf ${KUBE_PATH}/{dockerfiles,docs,.gitignore,pics,dockerfiles} &&\
find ${KUBE_PATH}/ -name '*.md'|sudo xargs rm -f

# 输出所有节点主机名称
read -p "Enter to continue deploy k8s to all nodes >>>"

# now start deploy k8s cluster 
# 一条命令全部部署
# sudo ${KUBE_PATH}/ezctl setup ${CLUSTER_NAME}  all

# 以下是分步部署

# to prepare CA/certs & kubeconfig & other system settings 
sudo ${KUBE_PATH}/ezctl setup ${CLUSTER_NAME} 01
sleep 1

# to setup the etcd cluster
sudo ${KUBE_PATH}/ezctl setup ${CLUSTER_NAME} 02
sleep 1

# to setup the container runtime(docker or containerd)
sudo ${KUBE_PATH}/ezctl setup ${CLUSTER_NAME} 03
sleep 1

# to setup the master nodes
sudo ${KUBE_PATH}/ezctl setup ${CLUSTER_NAME} 04
sleep 1

# to setup the worker nodes
sudo ${KUBE_PATH}/ezctl setup ${CLUSTER_NAME} 05
sleep 1

# to setup the network plugin(flannel、calico...)
sudo ${KUBE_PATH}/ezctl setup ${CLUSTER_NAME} 06
sleep 1

# to setup other useful plugins(metrics-server、coredns...)
sudo ${KUBE_PATH}/ezctl setup ${CLUSTER_NAME} 07
sleep 1

# [可选]对集群所有节点进行操作系统层面的安全加固  https://github.com/dev-sec/ansible-os-hardening
# sudo ansible-playbook roles/os-harden/os-harden.yml
# sleep 1
# cd `dirname ${software_packet:-/tmp}`


k8s_bin_path='/etc/kubeasz/bin'


echo "-------------------------  k8s version list  ---------------------------"
sudo ${k8s_bin_path}/kubectl version
echo
echo "-------------------------  All Healthy status check  -------------------"
sudo ${k8s_bin_path}/kubectl get componentstatus
echo
echo "-------------------------  k8s cluster info list  ----------------------"
sudo ${k8s_bin_path}/kubectl cluster-info
echo
echo "-------------------------  k8s all nodes list  -------------------------"
sudo ${k8s_bin_path}/kubectl get node -o wide
echo
echo "-------------------------  k8s all-namespaces's pods list   ------------"
sudo ${k8s_bin_path}/kubectl get pod --all-namespaces
echo
echo "-------------------------  k8s all-namespaces's service network   ------"
sudo ${k8s_bin_path}/kubectl get svc --all-namespaces
echo
echo "-------------------------  k8s welcome for you   -----------------------"
echo

# you can use k alias kubectl to siample
# echo "alias k=kubectl && complete -F __start_kubectl k" >> ~/.bashrc

# get dashboard url
sudo ${k8s_bin_path}/kubectl cluster-info|grep dashboard|awk '{print $NF}'|sudo tee -a /root/k8s_results

# get login token
sudo ${k8s_bin_path}/kubectl -n kube-system describe secret $(sudo ${k8s_bin_path}/kubectl -n kube-system get secret | grep admin-user | awk '{print $1}')|grep 'token:'|awk '{print $NF}'|sudo tee -a /root/k8s_results
echo
echo "you can look again dashboard and token info at  >>> /root/k8s_results <<<"
echo ">>>>>>>>>>>>>>>>> You need to excute command [ reboot ] to restart all nodes <<<<<<<<<<<<<<<<<<<<"
#find / -type f -name "kubeasz*.tar.gz" -o -name "k8s_install_new.sh"|xargs rm -f

























