#!/bin/bash

# Kubernetes离线安装脚本 - Debian 11
# 适用于使用二进制包离线安装Kubernetes，并包含CFSSL证书生成流程
# 本脚本使用systemctl管理所有服务组件

# 环境变量配置
export K8S_VERSION="1.26.0"
export CNI_VERSION="v1.7.1"
export ETCD_VERSION="3.6.3"
export CONTAINERD_VERSION="1.6.39"
export HOST_IP=$(hostname -I | awk '{print $1}')
export MASTER_IP=$HOST_IP
export K8S_DIR="/opt/kubernetes"
export CERT_DIR="/etc/kubernetes/pki"
export CFSSL_DIR="/usr/local/bin"

# 创建必要目录
mkdir -p $K8S_DIR/bin $CERT_DIR $K8S_DIR/manifests $K8S_DIR/config

# 1. 安装CFSSL工具链
install_cfssl() {
    echo "正在安装CFSSL工具..."
    mkdir -p $CFSSL_DIR
    tar -zxvf cfssl-tools.tar.gz -C $CFSSL_DIR
    chmod +x $CFSSL_DIR/cfssl $CFSSL_DIR/cfssljson $CFSSL_DIR/cfssl-certinfo
    ln -s $CFSSL_DIR/cfssl /usr/bin/cfssl
    ln -s $CFSSL_DIR/cfssljson /usr/bin/cfssljson
    ln -s $CFSSL_DIR/cfssl-certinfo /usr/bin/cfssl-certinfo
}

# 解压二进制包（假设二进制包已提前准备好）
unpackage() {
  echo "正在解压etcd..."
  tar -zxvf etcd-v$ETCD_VERSION-linux-amd64.tar.gz -C /tmp/
  cp /tmp/etcd-v$ETCD_VERSION-linux-amd64/etcd* $K8S_DIR/bin/

  echo "正在解压Kubernetes..."
  tar -zxvf kubernetes-server-linux-amd64.tar.gz -C /tmp/
  cp /tmp/kubernetes/server/bin/{kube-apiserver,kube-controller-manager,kube-scheduler,kubectl,kubelet,kube-proxy} $K8S_DIR/bin/

  echo 'export PATH=$PATH:$K8S_DIR/bin' >> /etc/profile
  source /etc/profile
}

# 2. 生成CA证书
# 证书用途：用于签署其他所有Kubernetes组件证书
generate_ca_cert() {
    echo "正在生成CA证书..."
    cd $CERT_DIR
    
    # 创建CA配置文件
    cat > ca-config.json <<EOF
{
  "signing": {
    "default": {
      "expiry": "8760h"
    },
    "profiles": {
      "kubernetes": {
        "usages": ["signing", "key encipherment", "server auth", "client auth"],
        "expiry": "8760h"
      }
    }
  }
}
EOF

    # 创建CA证书请求
    cat > ca-csr.json <<EOF
{
  "CN": "kubernetes",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "Beijing",
      "L": "Beijing",
      "O": "Kubernetes",
      "OU": "CA"
    }
  ]
}
EOF

    # 生成CA证书和私钥
    cfssl gencert -initca ca-csr.json | cfssljson -bare ca -
}

# 3. 生成etcd CA证书
# 证书用途：专门用于etcd集群的CA证书
generate_etcd_ca_cert() {
    echo "正在生成etcd CA证书..."
    cd $CERT_DIR
    
    # 创建etcd CA证书请求
    cat > etcd-ca-csr.json <<EOF
{
  "CN": "etcd",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "Beijing",
      "L": "Beijing",
      "O": "etcd",
      "OU": "CA"
    }
  ]
}
EOF

    # 生成etcd CA证书和私钥
    cfssl gencert -initca etcd-ca-csr.json | cfssljson -bare etcd-ca -
}

# 4. 生成etcd服务器证书
# 证书用途：etcd服务器身份验证
generate_etcd_server_cert() {
    echo "正在生成etcd服务器证书..."
    cd $CERT_DIR
    
    # 创建etcd服务器证书请求
    cat > etcd-server-csr.json <<EOF
{
  "CN": "etcd-server",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "Beijing",
      "L": "Beijing",
      "O": "etcd",
      "OU": "Server"
    }
  ],
  "hosts": [
    "127.0.0.1",
    "::1",
    "$MASTER_IP",
    "localhost"
  ]
}
EOF

    # 生成etcd服务器证书和私钥
    cfssl gencert \
      -ca=etcd-ca.pem \
      -ca-key=etcd-ca-key.pem \
      -config=ca-config.json \
      -profile=kubernetes \
      etcd-server-csr.json | cfssljson -bare etcd-server
}

# 5. 生成etcd节点间通信证书
# 证书用途：etcd集群节点间的通信认证
generate_etcd_peer_cert() {
    echo "正在生成etcd节点间通信证书..."
    cd $CERT_DIR
    
    # 创建etcd节点间通信证书请求
    cat > etcd-peer-csr.json <<EOF
{
  "CN": "etcd-peer",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "Beijing",
      "L": "Beijing",
      "O": "etcd",
      "OU": "Peer"
    }
  ],
  "hosts": [
    "127.0.0.1",
    "::1",
    "$MASTER_IP",
    "localhost"
  ]
}
EOF

    # 生成etcd节点间通信证书和私钥
    cfssl gencert \
      -ca=etcd-ca.pem \
      -ca-key=etcd-ca-key.pem \
      -config=ca-config.json \
      -profile=kubernetes \
      etcd-peer-csr.json | cfssljson -bare etcd-peer
}

# 6. 生成etcd客户端证书
# 证书用途：Kubernetes API Server访问etcd的客户端认证
generate_etcd_client_cert() {
    echo "正在生成etcd客户端证书..."
    cd $CERT_DIR
    
    # 创建etcd客户端证书请求
    cat > etcd-client-csr.json <<EOF
{
  "CN": "etcd-client",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "Beijing",
      "L": "Beijing",
      "O": "etcd",
      "OU": "Client"
    }
  ]
}
EOF

    # 生成etcd客户端证书和私钥
    cfssl gencert \
      -ca=etcd-ca.pem \
      -ca-key=etcd-ca-key.pem \
      -config=ca-config.json \
      -profile=kubernetes \
      etcd-client-csr.json | cfssljson -bare etcd-client
}

# 7. 生成API Server证书
# 证书用途：Kubernetes API Server的身份认证
generate_apiserver_cert() {
    echo "正在生成API Server证书..."
    cd $CERT_DIR
    
    # 创建API Server证书请求
    cat > apiserver-csr.json <<EOF
{
  "CN": "kube-apiserver",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "Beijing",
      "L": "Beijing",
      "O": "Kubernetes",
      "OU": "APIServer"
    }
  ],
  "hosts": [
    "127.0.0.1",
    "::1",
    "$MASTER_IP",
    "kubernetes",
    "kubernetes.default",
    "kubernetes.default.svc",
    "kubernetes.default.svc.cluster",
    "kubernetes.default.svc.cluster.local"
  ]
}
EOF

    # 生成API Server证书和私钥
    cfssl gencert \
      -ca=ca.pem \
      -ca-key=ca-key.pem \
      -config=ca-config.json \
      -profile=kubernetes \
      apiserver-csr.json | cfssljson -bare apiserver
}

# 8. 生成ServiceAccount密钥对
# 证书用途：用于验证ServiceAccount令牌
generate_service_account_keys() {
    echo "正在生成service-account密钥对..."
    openssl genrsa -out service-account-key.pem 2048
    openssl rsa -in service-account-key.pem -pubout -out service-account.pem
}

# 9. 生成Controller Manager证书
# 证书用途：Controller Manager组件的身份认证
generate_controller_manager_cert() {
    echo "正在生成Controller Manager证书..."
    cd $CERT_DIR
    
    # 创建Controller Manager证书请求
    cat > controller-manager-csr.json <<EOF
{
  "CN": "system:kube-controller-manager",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "Beijing",
      "L": "Beijing",
      "O": "system:kube-controller-manager",
      "OU": "ControllerManager"
    }
  ]
}
EOF

    # 生成Controller Manager证书和私钥
    cfssl gencert \
      -ca=ca.pem \
      -ca-key=ca-key.pem \
      -config=ca-config.json \
      -profile=kubernetes \
      controller-manager-csr.json | cfssljson -bare controller-manager
}

# 10. 生成Scheduler证书
# 证书用途：Scheduler组件的身份认证
generate_scheduler_cert() {
    echo "正在生成Scheduler证书..."
    cd $CERT_DIR
    
    # 创建Scheduler证书请求
    cat > scheduler-csr.json <<EOF
{
  "CN": "system:kube-scheduler",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "Beijing",
      "L": "Beijing",
      "O": "system:kube-scheduler",
      "OU": "Scheduler"
    }
  ]
}
EOF

    # 生成Scheduler证书和私钥
    cfssl gencert \
      -ca=ca.pem \
      -ca-key=ca-key.pem \
      -config=ca-config.json \
      -profile=kubernetes \
      scheduler-csr.json | cfssljson -bare scheduler
}

# 11. 生成Admin证书
# 证书用途：管理员用户的身份认证
generate_admin_cert() {
    echo "正在生成Admin证书..."
    cd $CERT_DIR
    
    # 创建Admin证书请求
    cat > admin-csr.json <<EOF
{
  "CN": "admin",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "Beijing",
      "L": "Beijing",
      "O": "system:masters",
      "OU": "Admin"
    }
  ]
}
EOF

    # 生成Admin证书和私钥
    cfssl gencert \
      -ca=ca.pem \
      -ca-key=ca-key.pem \
      -config=ca-config.json \
      -profile=kubernetes \
      admin-csr.json | cfssljson -bare admin
}

# 12. 生成Kubelet证书
# 证书用途：Kubelet组件的身份认证
generate_kubelet_cert() {
    echo "正在生成Kubelet证书..."
    cd $CERT_DIR
    
    local hostname=$(hostname)
    
    # 创建Kubelet证书请求
    cat > kubelet-csr.json <<EOF
{
  "CN": "system:node:$hostname",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "Beijing",
      "L": "Beijing",
      "O": "system:nodes",
      "OU": "Kubelet"
    }
  ],
  "hosts": [
    "$hostname",
    "$HOST_IP"
  ]
}
EOF

    # 生成Kubelet证书和私钥
    cfssl gencert \
      -ca=ca.pem \
      -ca-key=ca-key.pem \
      -config=ca-config.json \
      -profile=kubernetes \
      kubelet-csr.json | cfssljson -bare kubelet
}

# 13. 生成Kube Proxy证书
# 证书用途：Kube Proxy组件的身份认证
generate_kube_proxy_cert() {
    echo "正在生成Kube Proxy证书..."
    cd $CERT_DIR
    
    # 创建Kube Proxy证书请求
    cat > kube-proxy-csr.json <<EOF
{
  "CN": "system:kube-proxy",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "Beijing",
      "L": "Beijing",
      "O": "system:node-proxier",
      "OU": "KubeProxy"
    }
  ]
}
EOF

    # 生成Kube Proxy证书和私钥
    cfssl gencert \
      -ca=ca.pem \
      -ca-key=ca-key.pem \
      -config=ca-config.json \
      -profile=kubernetes \
      kube-proxy-csr.json | cfssljson -bare kube-proxy
}

# 14. 生成Kubeconfig文件
# 用途：配置各组件如何连接到Kubernetes API Server
generate_kubeconfig() {
    echo "正在生成Kubeconfig文件..."
    cd $K8S_DIR/config
    
    # 设置集群参数
    kubectl config set-cluster kubernetes \
      --certificate-authority=$CERT_DIR/ca.pem \
      --server=https://$MASTER_IP:6443 \
      --embed-certs=true \
      --kubeconfig=kubelet.kubeconfig
    
    # 设置客户端认证参数
    kubectl config set-credentials system:node:$(hostname) \
      --client-certificate=$CERT_DIR/kubelet.pem \
      --client-key=$CERT_DIR/kubelet-key.pem \
      --embed-certs=true \
      --kubeconfig=kubelet.kubeconfig
    
    # 设置上下文参数
    kubectl config set-context default \
      --cluster=kubernetes \
      --user=system:node:$(hostname) \
      --kubeconfig=kubelet.kubeconfig
    
    # 设置默认上下文
    kubectl config use-context default --kubeconfig=kubelet.kubeconfig
    
    # 为其他组件生成kubeconfig
    # Controller Manager
    kubectl config set-cluster kubernetes \
      --certificate-authority=$CERT_DIR/ca.pem \
      --server=https://$MASTER_IP:6443 \
      --embed-certs=true \
      --kubeconfig=controller-manager.kubeconfig
    
    kubectl config set-credentials system:kube-controller-manager \
      --client-certificate=$CERT_DIR/controller-manager.pem \
      --client-key=$CERT_DIR/controller-manager-key.pem \
      --embed-certs=true \
      --kubeconfig=controller-manager.kubeconfig
    
    kubectl config set-context default \
      --cluster=kubernetes \
      --user=system:kube-controller-manager \
      --kubeconfig=controller-manager.kubeconfig
    
    kubectl config use-context default --kubeconfig=controller-manager.kubeconfig
    
    # Scheduler
    kubectl config set-cluster kubernetes \
      --certificate-authority=$CERT_DIR/ca.pem \
      --server=https://$MASTER_IP:6443 \
      --embed-certs=true \
      --kubeconfig=scheduler.kubeconfig
    
    kubectl config set-credentials system:kube-scheduler \
      --client-certificate=$CERT_DIR/scheduler.pem \
      --client-key=$CERT_DIR/scheduler-key.pem \
      --embed-certs=true \
      --kubeconfig=scheduler.kubeconfig
    
    kubectl config set-context default \
      --cluster=kubernetes \
      --user=system:kube-scheduler \
      --kubeconfig=scheduler.kubeconfig
    
    kubectl config use-context default --kubeconfig=scheduler.kubeconfig
    
    # Admin
    kubectl config set-cluster kubernetes \
      --certificate-authority=$CERT_DIR/ca.pem \
      --server=https://$MASTER_IP:6443 \
      --embed-certs=true \
      --kubeconfig=admin.kubeconfig
    
    kubectl config set-credentials admin \
      --client-certificate=$CERT_DIR/admin.pem \
      --client-key=$CERT_DIR/admin-key.pem \
      --embed-certs=true \
      --kubeconfig=admin.kubeconfig
    
    kubectl config set-context default \
      --cluster=kubernetes \
      --user=admin \
      --kubeconfig=admin.kubeconfig
    
    kubectl config use-context default --kubeconfig=admin.kubeconfig
    
    # Kube Proxy
    kubectl config set-cluster kubernetes \
      --certificate-authority=$CERT_DIR/ca.pem \
      --server=https://$MASTER_IP:6443 \
      --embed-certs=true \
      --kubeconfig=kube-proxy.kubeconfig
    
    kubectl config set-credentials system:kube-proxy \
      --client-certificate=$CERT_DIR/kube-proxy.pem \
      --client-key=$CERT_DIR/kube-proxy-key.pem \
      --embed-certs=true \
      --kubeconfig=kube-proxy.kubeconfig
    
    kubectl config set-context default \
      --cluster=kubernetes \
      --user=system:kube-proxy \
      --kubeconfig=kube-proxy.kubeconfig
    
    kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig
}

# 15. 安装CNI插件
# 用途：为Pod提供网络连接功能
install_cni_plugins() {
    echo "正在安装CNI插件..."
    mkdir -p /opt/cni/bin
    tar -zxvf cni-plugins-linux-amd64-$CNI_VERSION.tgz -C /opt/cni/bin/
}

# 16. 安装etcd
# 用途：作为Kubernetes的存储后端
install_etcd() {
    echo "正在安装etcd..."
    
    # 创建etcd数据目录
    mkdir -p /var/lib/etcd
    
    # 创建etcd配置文件
    cat > $K8S_DIR/config/etcd.config.yml <<EOF
name: etcd-server
data-dir: /var/lib/etcd
listen-client-urls: https://$MASTER_IP:2379,https://127.0.0.1:2379
advertise-client-urls: https://$MASTER_IP:2379
listen-peer-urls: https://$MASTER_IP:2380
initial-advertise-peer-urls: https://$MASTER_IP:2380
initial-cluster: etcd-server=https://$MASTER_IP:2380
initial-cluster-token: etcd-cluster-0
initial-cluster-state: new
client-transport-security:
  cert-file: $CERT_DIR/etcd-server.pem
  key-file: $CERT_DIR/etcd-server-key.pem
  client-cert-auth: true
  trusted-ca-file: $CERT_DIR/etcd-ca.pem
peer-transport-security:
  cert-file: $CERT_DIR/etcd-peer.pem
  key-file: $CERT_DIR/etcd-peer-key.pem
  client-cert-auth: true
  trusted-ca-file: $CERT_DIR/etcd-ca.pem
EOF

    # 创建etcd服务文件
    cat > /etc/systemd/system/etcd.service <<EOF
[Unit]
Description=etcd service
Documentation=https://github.com/coreos/etcd
After=network.target

[Service]
Type=notify
ExecStart=$K8S_DIR/bin/etcd --config-file=$K8S_DIR/config/etcd.config.yml
Restart=always
RestartSec=5
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF
}

# 17. 安装Kubernetes组件
# 用途：安装Kubernetes核心组件
install_kubernetes_components() {
    echo "正在安装Kubernetes组件..."
    
    # 创建kube-apiserver服务文件
    cat > /etc/systemd/system/kube-apiserver.service <<EOF
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes
After=network.target

[Service]
ExecStart=$K8S_DIR/bin/kube-apiserver \\
  --advertise-address=$MASTER_IP \\
  --bind-address=0.0.0.0 \\
  --secure-port=6443 \\
  --etcd-servers=https://$MASTER_IP:2379 \\
  --etcd-cafile=$CERT_DIR/etcd-ca.pem \\
  --etcd-certfile=$CERT_DIR/etcd-client.pem \\
  --etcd-keyfile=$CERT_DIR/etcd-client-key.pem \\
  --service-cluster-ip-range=10.96.0.0/12 \\
  --service-node-port-range=30000-32767 \\
  --client-ca-file=$CERT_DIR/ca.pem \\
  --tls-cert-file=$CERT_DIR/apiserver.pem \\
  --tls-private-key-file=$CERT_DIR/apiserver-key.pem \\
  --authorization-mode=Node,RBAC \\
  --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota \\
  --kubelet-preferred-address-types=InternalIP,Hostname,ExternalIP \\
  --kubelet-client-certificate=$CERT_DIR/kubelet.pem \\
  --kubelet-client-key=$CERT_DIR/kubelet-key.pem \\
  --enable-aggregator-routing=true \\
  --allow-privileged=true \\
  --event-ttl=1h \\
  --kubelet-certificate-authority=$CERT_DIR/ca.pem \\
  --service-account-key-file=$CERT_DIR/service-account.pem \\
  --service-account-signing-key-file=$CERT_DIR/service-account-key.pem \\
  --service-account-issuer=https://kubernetes.default.svc.cluster.local \\
  --enable-bootstrap-token-auth=true \\
  --v=2
Restart=always
RestartSec=5
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF

    # 配置kube-controller-manager
    cat > $K8S_DIR/config/kube-controller-manager.config.yml <<EOF
apiVersion: controller-manager.config.k8s.io/v1
kind: KubeControllerManagerConfiguration
address: 0.0.0.0
port: 10257
kubeconfig: $K8S_DIR/config/controller-manager.kubeconfig
clusterSigningCertFile: $CERT_DIR/ca.pem
clusterSigningKeyFile: $CERT_DIR/ca-key.pem
rootCAFile: $CERT_DIR/ca.pem
serviceAccountPrivateKeyFile: $CERT_DIR/service-account-key.pem
useServiceAccountCredentials: true
controllers:
- '*'
- '-cloud-provider'
leaderElection:
  leaderElect: true
  leaseDuration: 15s
  renewDeadline: 10s
  retryPeriod: 2s
nodeMonitorGracePeriod: 40s
podEvictionTimeout: 5m0s
terminatedPodGCThreshold: 12500
horizontalPodAutoscalerSyncPeriod: 15s
nodeStartupGracePeriod: 1m0s
nodeEvictionRate: 0.1
v: 2
EOF

    # 创建kube-controller-manager服务文件
    cat > /etc/systemd/system/kube-controller-manager.service <<EOF
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/kubernetes/kubernetes
After=network.target

[Service]
ExecStart=$K8S_DIR/bin/kube-controller-manager \\
  --config=$K8S_DIR/config/kube-controller-manager.config.yml \\
  --authentication-kubeconfig=$K8S_DIR/config/controller-manager.kubeconfig \\
  --authorization-kubeconfig=$K8S_DIR/config/controller-manager.kubeconfig \\
  --client-ca-file=$CERT_DIR/ca.pem \\
  --cluster-signing-cert-file=$CERT_DIR/ca.pem \\
  --cluster-signing-key-file=$CERT_DIR/ca-key.pem \\
  --service-account-private-key-file=$CERT_DIR/service-account-key.pem \\
  --use-service-account-credentials=true \\
  --v=2 \\
  --bind-address=0.0.0.0 \\
  --allocate-node-cidrs=true \\
  --cluster-cidr=192.168.0.0/16 \\
  --cluster-name=kubernetes \\
  --leader-elect=true \\
  --root-ca-file=$CERT_DIR/ca.pem \\
  --service-cluster-ip-range=10.96.0.0/12 \\
  --kube-api-qps=50 \\
  --kube-api-burst=100
Restart=always
RestartSec=5

[Install]
WantedBy=multi-user.target
EOF

    # 配置kube-scheduler
    cat > $K8S_DIR/config/kube-scheduler.config.yml <<EOF
apiVersion: kubescheduler.config.k8s.io/v1
kind: KubeSchedulerConfiguration
clientConnection:
  kubeconfig: $K8S_DIR/config/scheduler.kubeconfig
leaderElection:
  leaderElect: true
EOF

    # 创建kube-scheduler服务文件
    cat > /etc/systemd/system/kube-scheduler.service <<EOF
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/kubernetes/kubernetes
After=network.target

[Service]
ExecStart=$K8S_DIR/bin/kube-scheduler \\
  --config=$K8S_DIR/config/kube-scheduler.config.yml \\
  --authentication-kubeconfig=$K8S_DIR/config/scheduler.kubeconfig \\
  --authorization-kubeconfig=$K8S_DIR/config/scheduler.kubeconfig \\
  --v=2
Restart=always
RestartSec=5

[Install]
WantedBy=multi-user.target
EOF

    # 配置kubelet
    cat > $K8S_DIR/config/kubelet.config.yml <<EOF
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
address: 0.0.0.0
port: 10250
readOnlyPort: 0
authentication:
  anonymous:
    enabled: false
  webhook:
    enabled: true
  x509:
    clientCAFile: $CERT_DIR/ca.pem
authorization:
  mode: Webhook
clusterDNS:
- 10.96.0.10
clusterDomain: cluster.local
resolvConf: /etc/resolv.conf
runtimeRequestTimeout: 10m0s
serializeImagePulls: false
cgroupDriver: systemd
hairpinMode: promiscuous-bridge
fileCheckFrequency: 20s
httpCheckFrequency: 20s
staticPodPath: $K8S_DIR/manifests
rotateCertificates: true
serverTLSBootstrap: true
EOF

    # 创建kubelet服务文件
    cat > /etc/systemd/system/kubelet.service <<EOF
[Unit]
Description=Kubernetes Kubelet
Documentation=https://github.com/kubernetes/kubernetes
After=network.target
Requires=containerd.service

[Service]
ExecStart=$K8S_DIR/bin/kubelet \\
  --config=$K8S_DIR/config/kubelet.config.yml \\
  --kubeconfig=$K8S_DIR/config/kubelet.kubeconfig \\
  --container-runtime=remote \\
  --container-runtime-endpoint=unix:///run/containerd/containerd.sock \\
  --image-pull-progress-deadline=2m \\
  --v=2
Restart=always
RestartSec=5
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF

    # 配置kube-proxy
    cat > $K8S_DIR/config/kube-proxy.config.yml <<EOF
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clientConnection:
  kubeconfig: $K8S_DIR/config/kube-proxy.kubeconfig
mode: "ipvs"
ipvs:
  scheduler: "rr"
  strictARP: true
clusterCIDR: 10.96.0.0/12
healthzBindAddress: 0.0.0.0:10256
metricsBindAddress: 0.0.0.0:10249
conntrack:
  maxPerCore: 32768
  min: 131072
  tcpCloseWaitTimeout: 1h0m0s
  tcpEstablishedTimeout: 24h0m0s
EOF

    # 创建kube-proxy服务文件
    cat > /etc/systemd/system/kube-proxy.service <<EOF
[Unit]
Description=Kubernetes Kube Proxy
Documentation=https://github.com/kubernetes/kubernetes
After=network.target

[Service]
ExecStart=$K8S_DIR/bin/kube-proxy \\
  --config=$K8S_DIR/config/kube-proxy.config.yml \\
  --v=2
Restart=always
RestartSec=5

[Install]
WantedBy=multi-user.target
EOF
}

# 18. 配置容器运行时(containerd)
# 用途：为Kubernetes提供容器运行时环境
configure_container_runtime() {
    echo "正在配置容器运行时(containerd)..."
    
    # 安装containerd
    tar -zxvf containerd-$CONTAINERD_VERSION-linux-amd64.tar.gz -C /usr/local
    cp runc.amd64 /usr/local/bin/runc
    
    # 配置containerd
    mkdir -p /etc/containerd
    containerd config default > /etc/containerd/config.toml
    
    # 修改配置使用systemd cgroup driver
    sed -i 's/SystemdCgroup = false/SystemdCgroup = true/g' /etc/containerd/config.toml
    
    # 创建containerd服务文件
    cat > /etc/systemd/system/containerd.service <<EOF
[Unit]
Description=containerd container runtime
Documentation=https://containerd.io
After=network.target local-fs.target

[Service]
# ExecStartPre=-/sbin/modprobe overlay
ExecStart=/usr/local/bin/containerd
Type=notify
Delegate=yes
KillMode=process
Restart=always
RestartSec=5
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNPROC=infinity
LimitCORE=infinity
LimitNOFILE=1048576
# Comment TasksMax if your systemd version does not supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
OOMScoreAdjust=-999

[Install]
WantedBy=multi-user.target
EOF
}

# 19. 配置系统环境
# 用途：配置Debian 11系统环境以满足Kubernetes运行要求
configure_system() {
    echo "正在配置系统环境..."
    
    # 关闭交换空间
    swapoff -a
    sed -i '/swap/d' /etc/fstab
    
    # 配置系统参数
    cat > /etc/modules-load.d/k8s.conf <<EOF
overlay
br_netfilter
EOF

    modprobe overlay
    modprobe br_netfilter
    
    cat > /etc/sysctl.d/k8s.conf <<EOF
net.bridge.bridge-nf-call-iptables  = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.ipv4.ip_forward                 = 1
EOF

    sysctl --system
    
    # 配置防火墙规则
    ufw allow 6443/tcp
    ufw allow 2379-2380/tcp
    ufw allow 10250/tcp
    ufw allow 10251/tcp
    ufw allow 10252/tcp
    ufw allow 10255/tcp
    ufw allow 80/tcp
    ufw allow 443/tcp
    ufw reload
    
    # 添加Kubernetes命令补全
    echo "source <(kubectl completion bash)" >> ~/.bashrc
    echo "alias k=kubectl" >> ~/.bashrc
    echo "complete -o default -F __start_kubectl k" >> ~/.bashrc
}

# 20. 启动服务
# 用途：启动所有Kubernetes相关服务
start_services() {
    echo "正在启动服务..."
    
    # 重新加载systemd配置
    systemctl daemon-reload
    
    # 启动并启用containerd
    systemctl enable containerd
    systemctl start containerd
    
    # 启动并启用etcd
    systemctl enable etcd
    systemctl start etcd
    
    # 启动并启用Kubernetes组件
    systemctl enable kube-apiserver kube-controller-manager kube-scheduler kubelet kube-proxy
    systemctl start kube-apiserver kube-controller-manager kube-scheduler kubelet kube-proxy
    
    # 检查服务状态
    echo "检查服务状态..."
    systemctl status containerd etcd kube-apiserver kube-controller-manager kube-scheduler kubelet kube-proxy --no-pager
}

# 21. 安装网络插件(Calico)
# 用途：为Kubernetes集群提供网络功能
install_network_plugin() {
    echo "请在安装完成后手动应用网络插件:"
    echo "kubectl apply -f calico.yaml"
    echo "提示: Calico配置文件需要提前下载到离线环境"
}

# 主函数
main() {
    echo "开始离线安装Kubernetes $K8S_VERSION..."
    
    # 检查是否有root权限
    if [ "$(id -u)" -ne 0 ]; then
        echo "请使用root权限运行此脚本"
        exit 1
    fi
    
    # 执行安装步骤
    configure_system
    install_cfssl
    unpackage
    configure_container_runtime
    
    # 生成证书
    generate_ca_cert
    generate_etcd_ca_cert
    generate_etcd_server_cert
    generate_etcd_peer_cert
    generate_etcd_client_cert
    generate_apiserver_cert
    generate_service_account_keys
    generate_controller_manager_cert
    generate_scheduler_cert
    generate_admin_cert
    generate_kubelet_cert
    generate_kube_proxy_cert
    
    # 生成kubeconfig文件
    generate_kubeconfig
    
    # 安装组件
    install_cni_plugins
    install_etcd
    install_kubernetes_components
    
    # 启动服务
    start_services
    install_network_plugin
    
    echo "Kubernetes $K8S_VERSION 离线安装完成!"
    echo "请执行以下命令验证安装:"
    echo "kubectl get nodes"
    echo "kubectl get pods -A"
}

# 执行主函数
main "$@"
