#!/bin/bash
# 此脚本用于基于k8s来构建etcd服务，为另外一个k8s集群提供etcd服务
echo "************************"
echo "重置Kubeadm"
echo "************************"
kubeadm reset --force --cri-socket=unix:///var/run/cri-dockerd.sock

echo "************************"
echo "配置kubelet服务"
echo "************************"
echo "************************"
echo "1 生成: kubelet.conf"
echo "************************"
cat << EOF > /etc/systemd/system/kubelet.service.d/kubelet.conf
# 将下面的 "systemd" 替换为你的容器运行时所使用的 cgroup 驱动。
# kubelet 的默认值为 "cgroupfs"。
# 如果需要的话，将 "containerRuntimeEndpoint" 的值替换为一个不同的容器运行时。
#
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
    anonymous:
        enabled: false
    webhook:
        enabled: false
authorization:
    mode: AlwaysAllow
cgroupDriver: systemd
address: 127.0.0.1
containerRuntimeEndpoint: unix:///var/run/cri-dockerd.sock
staticPodPath: /etc/kubernetes/manifests
EOF

echo "**************************************"
echo "2. 生成: 20-etcd-service-manager.conf"
echo "**************************************"
cat << EOF > /etc/systemd/system/kubelet.service.d/20-etcd-service-manager.conf
[Service]
ExecStart=
ExecStart=/usr/bin/kubelet --config=/etc/systemd/system/kubelet.service.d/kubelet.conf
Restart=always
EOF

# echo "***********************************"
# echo "3. 重启kubelet服务"
# echo "***********************************"
# systemctl daemon-reload
# systemctl restart kubelet
# # 查看kuelet的状态
# systemctl status kubelet

echo "***********************************"
echo "配置ETCD服务"
echo "***********************************"
echo "***********************************"
echo "1. 配置环境变量"
echo "***********************************"
# 执行脚本来构建etcd集群，ip地址是宿主机的IP, 名称是宿主机的host名称
export HOST0=192.168.1.165
export HOST1=192.168.1.166
export HOST2=192.168.1.167
export NAME0="cn1"
export NAME1="cn2"
export NAME2="cn3"



echo "***********************************"
echo "2. 生成kubeadmcfg.yaml"
echo "***********************************"
# Create temp directories to store files that will end up on other hosts
mkdir -p /tmp/${HOST0}/ /tmp/${HOST1}/ /tmp/${HOST2}/
#HOSTS=(192.168.1.165 192.168.1.166 192.168.1.167)
HOSTS=(${HOST0} ${HOST1} ${HOST2})
NAMES=(${NAME0} ${NAME1} ${NAME2})

for i in "${!HOSTS[@]}"; do
HOST=${HOSTS[$i]}
NAME=${NAMES[$i]}

cat << EOF > /tmp/${HOST}/kubeadmcfg.yaml
---
apiVersion: "kubeadm.k8s.io/v1beta3"
kind: InitConfiguration
nodeRegistration:
    name: ${NAME}
    criSocket: unix:///var/run/cri-dockerd.sock
localAPIEndpoint:
    advertiseAddress: ${HOST}
---
apiVersion: "kubeadm.k8s.io/v1beta3"
kind: ClusterConfiguration
kubernetesVersion: 1.28.2
imageRepository: bg9:8083/registry.k8s.io
etcd:
    local:
        serverCertSANs:
        - "${HOST}"
        peerCertSANs:
        - "${HOST}"
        extraArgs:
            initial-cluster: ${NAMES[0]}=https://${HOSTS[0]}:2380,${NAMES[1]}=https://${HOSTS[1]}:2380,${NAMES[2]}=https://${HOSTS[2]}:2380
            initial-cluster-state: new
            name: ${NAME}
            listen-peer-urls: https://${HOST}:2380
            listen-client-urls: https://${HOST}:2379
            advertise-client-urls: https://${HOST}:2379
            initial-advertise-peer-urls: https://${HOST}:2380
EOF
done

echo "***********************************"
echo "3. 通过'kubeadm init'生成ETCD证书"
echo "***********************************"
# 生成ETCD的CA证书
生成HOST2主机的证书
kubeadm init phase certs etcd-ca --config=/tmp/${HOST2}/kubeadmcfg.yaml
kubeadm init phase certs etcd-server --config=/tmp/${HOST2}/kubeadmcfg.yaml 
kubeadm init phase certs etcd-peer --config=/tmp/${HOST2}/kubeadmcfg.yaml 
kubeadm init phase certs etcd-healthcheck-client --config=/tmp/${HOST2}/kubeadmcfg.yaml 
kubeadm init phase certs apiserver-etcd-client --config=/tmp/${HOST2}/kubeadmcfg.yaml 
cp -R /etc/kubernetes/pki /tmp/${HOST2}/
# cleanup non-reusable certificates
find /etc/kubernetes/pki -not -name ca.crt -not -name ca.key -type f -delete

# 生成HOST1主机的证书
kubeadm init phase certs etcd-server --config=/tmp/${HOST1}/kubeadmcfg.yaml 
kubeadm init phase certs etcd-peer --config=/tmp/${HOST1}/kubeadmcfg.yaml 
kubeadm init phase certs etcd-healthcheck-client --config=/tmp/${HOST1}/kubeadmcfg.yaml 
kubeadm init phase certs apiserver-etcd-client --config=/tmp/${HOST1}/kubeadmcfg.yaml 
cp -R /etc/kubernetes/pki /tmp/${HOST1}/
find /etc/kubernetes/pki -not -name ca.crt -not -name ca.key -type f -delete

# 生成HOST0主机的证书
kubeadm init phase certs etcd-server --config=/tmp/${HOST0}/kubeadmcfg.yaml 
kubeadm init phase certs etcd-peer --config=/tmp/${HOST0}/kubeadmcfg.yaml 
kubeadm init phase certs etcd-healthcheck-client --config=/tmp/${HOST0}/kubeadmcfg.yaml 
kubeadm init phase certs apiserver-etcd-client --config=/tmp/${HOST0}/kubeadmcfg.yaml 
# No need to move the certs because they are for HOST0

# clean up certs that should not be copied off this host
find /tmp/${HOST2} -name ca.key -type f -delete
find /tmp/${HOST1} -name ca.key -type f -delete

echo "***********************************"
echo "4. 复制ETCD证书到各个节点"
echo "***********************************"
for i in "${!HOSTS[@]}"; do
    HOST=${HOSTS[$i]}
    # 本机不需要copy操作，只有远程连接场景需要
    if [ $i -gt 0 ]; then
        echo "current i: $i"
        sshpass -p root scp -r /tmp/${HOST}/*pki root@${HOST}:/etc/kubernetes/
    fi
    sshpass -p root scp -r /tmp/${HOST}/kubeadmcfg.yaml root@${HOST}:/root
done

echo "***********************************"
echo "5. 创建静态的pod清单文件"
echo "***********************************"
echo "***********************************"
echo "5.1 创建HOST0静态的pod清单文件"
echo "***********************************"
kubeadm init phase etcd local --config=/tmp/${HOST0}/kubeadmcfg.yaml --v=5
echo "***********************************"
echo "5.2 创建HOST1静态的pod清单文件"
echo "***********************************"
sshpass -p root ssh root@${HOST1} kubeadm init phase etcd local --config=/root/kubeadmcfg.yaml
echo "***********************************"
echo "5.3 创建HOST2静态的pod清单文件"
echo "***********************************"
sshpass -p root ssh root@${HOST2} kubeadm init phase etcd local --config=/root/kubeadmcfg.yaml


echo "***********************************"
echo "6. 重启Kubelet服务"
echo "***********************************"

systemctl daemon-reload
systemctl restart kubelet
# 查看kuelet的状态
systemctl status kubelet

sshpass -p root ssh root@${HOST1} systemctl daemon-reload && systemctl restart kubelet && systemctl status kubelet
sshpass -p root ssh root@${HOST2} systemctl daemon-reload && systemctl restart kubelet && systemctl status kubelet

echo "***********************************"
echo "COMPLETE!"
echo "***********************************"