#!/bin/bash
BaseDir=$(cd "$(dirname "$0")"; pwd)
cd ${BaseDir}

set -x
set -o errexit

#make docker registry is ready
docker ps |grep kubekey-registry

#
cp -f kubesphere3_cfg.yaml ../kubesphere-all-v3.0.0-offline-linux-amd64/.
cp -f private_key ../kubesphere-all-v3.0.0-offline-linux-amd64/.

vagrant up

#
pushd ../kubesphere-all-v3.0.0-offline-linux-amd64

sudo ./kk init os -f kubesphere3_cfg.yaml -s ./dependencies/ --add-images-repo

grep -rins 'dockerhub.kubekey.local' /etc/hosts || (
  echo "192.168.99.1 dockerhub.kubekey.local" >> /etc/hosts
)
curl -k https://dockerhub.kubekey.local/v2/
ping -c 1 dockerhub.kubekey.local

# 部署
sudo ./kk create cluster -f kubesphere3_cfg.yaml

# 访问地址
echo "Console: http://192.168.99.10:30880
Account: admin
Password: P@88w0rd
"
popd

#
vagrant ssh kubesphere-dev -c "/home/vagrant/cmd-completion.sh &> /dev/null"
vagrant ssh kubesphere-node1 -c "/home/vagrant/cmd-completion.sh &> /dev/null"

:<<EOF
错误诊断
$ kubectl get po -A
NAMESPACE           NAME                                          READY   STATUS             RESTARTS   AGE
kube-system         calico-kube-controllers-677cbc8557-5zvxx      1/1     Running            0          14m
kube-system         calico-node-997zb                             1/1     Running            0          14m
kube-system         calico-node-kjgx9                             0/1     CrashLoopBackOff   7          13m
kube-system         coredns-79878cb9c9-pb648                      1/1     Running            0          14m
kube-system         coredns-79878cb9c9-stwkv                      1/1     Running            0          14m
kube-system         kube-apiserver-kubesphere-dev                 1/1     Running            0          14m
kube-system         kube-controller-manager-kubesphere-dev        1/1     Running            0          14m
kube-system         kube-proxy-bzj9j                              1/1     Running            0          13m
kube-system         kube-proxy-jrbt4                              1/1     Running            0          14m
kube-system         kube-scheduler-kubesphere-dev                 1/1     Running            0          14m
kube-system         metrics-server-98546f9bd-w922k                0/1     CrashLoopBackOff   6          11m
kube-system         nodelocaldns-6hwmj                            1/1     Running            0          14m
kube-system         nodelocaldns-lw5wc                            1/1     Running            0          13m
kube-system         openebs-localpv-provisioner-5cd9579c5-dqr4f   1/1     Running            0          13m
kube-system         openebs-ndm-l7zzc                             1/1     Running            0          13m
kube-system         openebs-ndm-operator-6656f85b86-hzqls         1/1     Running            1          13m
kube-system         openebs-ndm-r6ckp                             0/1     CrashLoopBackOff   7          13m
kubesphere-system   ks-installer-78745765f5-cppch                 1/1     Running            0          13m

kubectl logs -n kube-system calico-node-kjgx9
2020-11-03 00:21:38.147 [INFO][8] startup/startup.go 374: Hit error connecting to datastore - retry error=Get https://10.233.0.1:443/api/v1/nodes/foo: dial tcp 10.233.0.1:443: connect: connection refused

kubectl logs -n kube-system metrics-server-98546f9bd-w922k 
Get https://10.233.0.1:443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication: dial tcp 10.233.0.1:443: connect: connection refused

kubectl logs -n kube-system openebs-ndm-r6ckp 
Get https://10.233.0.1:443/api?timeout=32s: dial tcp 10.233.0.1:443: connect: connection refused

kubectl describe pods -n kube-system openebs-ndm-r6ckp 
  node-disk-manager:
    Container ID:  docker://b0c59109a566e4af5aec124a1b42dd7bbc24104fadcba12af5e705071875d279
    Image:         dockerhub.kubekey.local/kubesphere/node-disk-manager:0.5.0
    Image ID:      docker-pullable://dockerhub.kubekey.local/kubesphere/node-disk-manager@sha256:3359bf4ab78ca961b321d291158b98d384021c34fdc9c7d62d82c0c4ea5b3bea
    Port:          <none>
    Host Port:     <none>
    Args:
      -v=4
    State:          Waiting
      Reason:       CrashLoopBackOff
    Last State:     Terminated
      Reason:       Error
      Exit Code:    1
      Started:      Tue, 03 Nov 2020 00:37:38 +0000
      Finished:     Tue, 03 Nov 2020 00:37:38 +0000
    Ready:          False
    Restart Count:  10
    Liveness:       exec [pgrep ndm] delay=30s timeout=1s period=60s #success=1 #failure=3
    Environment:
      NAMESPACE:          kube-system (v1:metadata.namespace)
      NODE_NAME:           (v1:spec.nodeName)
      SPARSE_FILE_DIR:    /var/openebs/sparse
      SPARSE_FILE_SIZE:   10737418240
      SPARSE_FILE_COUNT:  0
    Mounts:
      /host/node-disk-manager.config from config (ro,path="node-disk-manager.config")
      /host/proc from procmount (ro)
      /run/udev from udev (rw)
      /var/openebs/ndm from basepath (rw)
      /var/openebs/sparse from sparsepath (rw)
      /var/run/secrets/kubernetes.io/serviceaccount from openebs-maya-operator-token-ngcnh (ro)
Conditions:
  Type              Status
  Initialized       True 
  Ready             False 
  ContainersReady   False 
  PodScheduled      True 
Volumes:
  config:
    Type:      ConfigMap (a volume populated by a ConfigMap)
    Name:      openebs-ndm-config
    Optional:  false
  udev:
    Type:          HostPath (bare host directory volume)
    Path:          /run/udev
    HostPathType:  Directory
  procmount:
    Type:          HostPath (bare host directory volume)
    Path:          /proc
    HostPathType:  Directory
  basepath:
    Type:          HostPath (bare host directory volume)
    Path:          /var/openebs/ndm
    HostPathType:  DirectoryOrCreate
  sparsepath:
    Type:          HostPath (bare host directory volume)
    Path:          /var/openebs/sparse
    HostPathType:  
  openebs-maya-operator-token-ngcnh:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  openebs-maya-operator-token-ngcnh
    Optional:    false
QoS Class:       BestEffort
Node-Selectors:  <none>
Tolerations:     node.kubernetes.io/disk-pressure:NoSchedule
                 node.kubernetes.io/memory-pressure:NoSchedule
                 node.kubernetes.io/network-unavailable:NoSchedule
                 node.kubernetes.io/not-ready:NoExecute
                 node.kubernetes.io/pid-pressure:NoSchedule
                 node.kubernetes.io/unreachable:NoExecute
                 node.kubernetes.io/unschedulable:NoSchedule
Events:
  Type     Reason     Age                   From                       Message
  ----     ------     ----                  ----                       -------
  Normal   Scheduled  29m                   default-scheduler          Successfully assigned kube-system/openebs-ndm-r6ckp to kubesphere-node1
  Normal   Pulled     28m (x4 over 28m)     kubelet, kubesphere-node1  Successfully pulled image "dockerhub.kubekey.local/kubesphere/node-disk-manager:0.5.0"
  Normal   Created    28m (x4 over 28m)     kubelet, kubesphere-node1  Created container node-disk-manager
  Normal   Started    28m (x4 over 28m)     kubelet, kubesphere-node1  Started container node-disk-manager
  Normal   Pulling    27m (x5 over 29m)     kubelet, kubesphere-node1  Pulling image "dockerhub.kubekey.local/kubesphere/node-disk-manager:0.5.0"
  Warning  BackOff    4m2s (x121 over 28m)  kubelet, kubesphere-node1  Back-off restarting failed container

kubectl exec -n kube-system -it openebs-ndm-r6ckp sh

$ sudo docker ps -a|grep openebs-ndm
e4ed5470d4a2        dockerhub.kubekey.local/kubesphere/node-disk-manager   "/usr/local/bin/entr…"   About a minute ago   Exited (1) About a minute ago                         k8s_node-disk-manager_openebs-ndm-r6ckp_kube-system_052d7e42-8d8b-4eb2-b8d3-66e897dc7109_11
2410fa1bba4d        dockerhub.kubekey.local/kubesphere/pause:3.1           "/pause"                 32 minutes ago       Up 32 minutes                                         k8s_POD_openebs-ndm-r6ckp_kube-system_052d7e42-8d8b-4eb2-b8d3-66e897dc7109_0
[vagrant@kubesphere-node1 ~]$ sudo docker log
login   logout  logs    
[vagrant@kubesphere-node1 ~]$ sudo docker logs k8s_node-disk-manager_openebs-ndm-r6ckp_kube-system_052d7e42-8d8b-4eb2-b8d3-66e897dc7109_11
[entrypoint.sh] launching ndm process.
I1103 00:42:48.812592       6 commands.go:64] Starting Node Device Manager Daemon...
I1103 00:42:48.812684       6 commands.go:65] Version Tag : v0.5.0
I1103 00:42:48.812690       6 commands.go:66] GitCommit : 63ca87a283e5feea87821c4f96c8b38c038343e3
Get https://10.233.0.1:443/api?timeout=32s: dial tcp 10.233.0.1:443: connect: connection refused

EOF

sudo grep 'client-certificate-data' /etc/kubernetes/admin.conf | head -n 1 | awk '{print $2}' | base64 -d > kubecfg.crt
sudo grep 'client-key-data' /etc/kubernetes/admin.conf | head -n 1 | awk '{print $2}' | base64 -d > kubecfg.key
openssl pkcs12 -export -clcerts -inkey kubecfg.key -in kubecfg.crt -out kubecfg.p12 -passout pass:"" -name "kubernetes-client"
#CURL可以直接访问
curl -k --cert ./kubecfg.crt --key ./kubecfg.key https://lb.kubesphere.local:6443/api/v1/nodes

#或者提供了供浏览器的pfx/p12证书，其他机器上这样也可以
:<<EOF
node机器上的诊断
scp -i private_key -o StrictHostKeyChecking=no kubecfg.p12 vagrant@192.168.99.11:/home/vagrant/kubecfg.p12

openssl pkcs12 -clcerts -nokeys -in kubecfg.p12 -password pass:"" -out client.pem
openssl pkcs12 -nocerts -nodes -in kubecfg.p12 -password pass:"" -out key.pem
curl -k --cert ./client.pem --key ./key.pem https://lb.kubesphere.local:6443/api/v1/nodes
EOF
