
# 集群部署需要事先做好所有节点免秘钥登录
# 单点部署 sh start.sh "192.168.1.1"
# 集群部署 sh start.sh "192.168.1.1 192.168.1.2 192.168.1.3"

bash init_node.sh "$1"

mkdir -p ~/.kube && cp config ~/.kube/config && cp ~/.kube/config /etc/kubernetes/admin.conf
mkdir -p kubeconfig && echo "" > kubeconfig/dev-kubeconfig
curl -LO https://dl.k8s.io/release/v1.24.0/bin/linux/amd64/kubectl && chmod +x kubectl  && cp kubectl /usr/bin/ && mv kubectl /usr/local/bin/
node=`kubectl  get node -o wide |grep -v NAME |awk '{print $1}'| head -n 1`
kubectl label node $node train=true cpu=true notebook=true service=true org=public istio=true kubeflow=true kubeflow-dashboard=true mysql=true redis=true monitoring=true logging=true harbor=true--overwrite

# 创建命名空间
sh create_ns_secret.sh
# 部署dashboard
kubectl apply -f dashboard/v2.2.0-cluster.yaml
# 部署mysql
kubectl create -f mysql/pv-pvc-hostpath.yaml
kubectl create -f mysql/service.yaml
kubectl create -f mysql/configmap-mysql.yaml
kubectl create -f mysql/deploy.yaml
# 部署redis
kubectl create -f redis/pv-hostpath.yaml
kubectl create -f redis/configmap.yaml
kubectl create -f redis/service.yaml
# 如果自己需要使用pv来保存redis队列数据，可以修改master.yaml
kubectl create -f redis/master.yaml
# 部署kube-batch
#kubectl create -f kube-batch/deploy.yaml

# 部署prometheus
cd prometheus
kubectl delete -f ./operator/operator-crd.yml
sleep 5
kubectl apply -f ./operator/operator-crd.yml
kubectl apply -f ./operator/operator-rbac.yml
kubectl wait crd/podmonitors.monitoring.coreos.com --for condition=established --timeout=60s
kubectl apply -f ./operator/operator-dp.yml
kubectl apply -f ./alertmanater/alertmanager-main-sa.yml
kubectl apply -f ./alertmanater/alertmanager-main-secret.yml
kubectl apply -f ./alertmanater/alertmanager-main-svc.yml
kubectl apply -f ./alertmanater/alertmanager-main.yml
kubectl apply -f ./node-exporter/node-exporter-sa.yml
kubectl apply -f ./node-exporter/node-exporter-rbac.yml
kubectl apply -f ./node-exporter/node-exporter-svc.yml
kubectl apply -f ./node-exporter/node-exporter-ds.yml
kubectl apply -f ./kube-state-metrics/kube-state-metrics-sa.yml
kubectl apply -f ./kube-state-metrics/kube-state-metrics-rbac.yml
kubectl apply -f ./kube-state-metrics/kube-state-metrics-svc.yml
kubectl apply -f ./kube-state-metrics/kube-state-metrics-dp.yml
kubectl apply -f ./grafana/pv-pvc-hostpath.yml
kubectl apply -f ./grafana/grafana-sa.yml
kubectl apply -f ./grafana/grafana-source.yml
kubectl apply -f ./grafana/grafana-datasources.yml
kubectl apply -f ./grafana/grafana-admin-secret.yml
kubectl apply -f ./grafana/grafana-svc.yml
kubectl delete configmap grafana-config all-grafana-dashboards --namespace=monitoring
kubectl create configmap grafana-config --from-file=./grafana/grafana.ini --namespace=monitoring
kubectl create configmap all-grafana-dashboards --from-file=./grafana/dashboard --namespace=monitoring
kubectl delete -f ./grafana/grafana-dp.yml
sleep 5
kubectl apply -f ./grafana/grafana-dp.yml
kubectl apply -f ./service-discovery/kube-controller-manager-svc.yml
kubectl apply -f ./service-discovery/kube-scheduler-svc.yml
kubectl apply -f ./prometheus/prometheus-secret.yml
kubectl apply -f ./prometheus/prometheus-rules.yml
kubectl apply -f ./prometheus/prometheus-rbac.yml
kubectl apply -f ./prometheus/prometheus-svc.yml
kubectl wait crd/prometheuses.monitoring.coreos.com --for condition=established --timeout=60s
kubectl delete -f ./prometheus/prometheus-main.yml
sleep 5
kubectl apply -f ./prometheus/pv-pvc-hostpath.yaml
kubectl apply -f ./prometheus/prometheus-main.yml
kubectl apply -f ./servicemonitor/alertmanager-sm.yml
kubectl apply -f ./servicemonitor/coredns-sm.yml
kubectl apply -f ./servicemonitor/kube-apiserver-sm.yml
kubectl apply -f ./servicemonitor/kube-controller-manager-sm.yml
kubectl apply -f ./servicemonitor/kube-scheduler-sm.yml
kubectl apply -f ./servicemonitor/kubelet-sm.yml
kubectl apply -f ./servicemonitor/kubestate-metrics-sm.yml
kubectl apply -f ./servicemonitor/node-exporter-sm.yml
kubectl apply -f ./servicemonitor/prometheus-operator-sm.yml
kubectl apply -f ./servicemonitor/prometheus-sm.yml
kubectl apply -f ./servicemonitor/pushgateway-sm.yml
kubectl apply -f ./prometheus_adapter/metric_rule.yaml
kubectl apply -f ./prometheus_adapter/prometheus_adapter.yaml
cd ../


# 部署gpu的监控
kubectl apply -f gpu/nvidia-device-plugin.yml
kubectl apply -f gpu/dcgm-exporter.yaml
kubectl apply -f gpu/dcgm-exporter-sm.yaml

# 部署frameworkcontroller nni超参搜索使用
kubectl create serviceaccount frameworkcontroller --namespace kubeflow
kubectl create clusterrolebinding frameworkcontroller-kubeflow --clusterrole=cluster-admin --user=system:serviceaccount:kubeflow:frameworkcontroller
kubectl create -f frameworkcontroller/frameworkcontroller-with-default-config.yaml
sleep 5
kubectl wait crd/frameworks.frameworkcontroller.microsoft.com --for condition=established --timeout=60s

kubectl create serviceaccount frameworkbarrier --namespace pipeline
kubectl create serviceaccount frameworkbarrier --namespace automl
kubectl create serviceaccount frameworkbarrier --namespace kubeflow
kubectl create clusterrole frameworkbarrier --verb=get,list,watch --resource=frameworks
kubectl create clusterrolebinding frameworkbarrier-pipeline --clusterrole=frameworkbarrier  --user=system:serviceaccount:pipeline:frameworkbarrier
kubectl create clusterrolebinding frameworkbarrier-automl --clusterrole=frameworkbarrier  --user=system:serviceaccount:automl:frameworkbarrier
kubectl create clusterrolebinding frameworkbarrier-kubeflow --clusterrole=frameworkbarrier  --user=system:serviceaccount:kubeflow:frameworkbarrier

# 部署volcano
kubectl delete -f volcano/volcano-development.yaml
kubectl delete secret volcano-admission-secret -n kubeflow
kubectl apply -f volcano/volcano-development.yaml
kubectl wait crd/jobs.batch.volcano.sh --for condition=established --timeout=60s

# 部署istio
if [ `kubectl get node|awk '{print $5}'|grep -v VER|awk -F "." '{print $2}'|head -n 1` -gt 20 ];then
  kubectl apply -f istio/install-1.15.0.yaml
else
  kubectl apply -f istio/install-crd.yaml
  kubectl wait crd/envoyfilters.networking.istio.io --for condition=established --timeout=60s
  kubectl apply -f istio/install.yaml
fi

# k8s 1.21+
# kubectl delete -f istio/install.yaml
# kubectl apply -f istio/install-1.15.0.yaml

# 部署kfp pipeline
kubectl apply -f kubeflow/secret.yaml
kubectl apply -f kubeflow/sa-rbac.yaml
kubectl create -f kubeflow/pipeline/minio-pv-hostpath.yaml
kubectl apply -f kubeflow/pipeline/minio-artifact-secret.yaml
kubectl apply -f kubeflow/pipeline/pipeline-runner-rolebinding.yaml

cd kubeflow/pipeline/1.6.0/kustomize/

#kustomize build cluster-scoped-resources/ | kubectl apply -f -
kubectl apply -k cluster-scoped-resources
kubectl wait crd/applications.app.k8s.io --for condition=established --timeout=60s
#kustomize build env/platform-agnostic/  | kubectl apply -f -
kubectl apply -k env/platform-agnostic
cd ../../../../
kubectl  apply -f kubeflow/pipeline-runner-rbac.yaml

# 部署trainjob:tfjob/pytorchjob/mpijob/mxnetjob/xgboostjobs
kubectl apply -k kubeflow/train-operator/manifests/overlays/standalone
# 部署sparkjob
kubectl apply -f spark/install.yaml
# 部署paddlejob
kubectl apply -f paddle/crd.yaml
kubectl apply -f paddle/operator.yaml

# 部署管理平台
kubectl delete configmap kubernetes-config -n infra
kubectl create configmap kubernetes-config --from-file=kubeconfig -n infra

kubectl delete configmap kubernetes-config -n pipeline
kubectl create configmap kubernetes-config --from-file=kubeconfig -n pipeline

kubectl delete configmap kubernetes-config -n automl
kubectl create configmap kubernetes-config --from-file=kubeconfig -n automl

kubectl create -f pv-pvc-infra.yaml
kubectl create -f pv-pvc-jupyter.yaml
kubectl create -f pv-pvc-automl.yaml
kubectl create -f pv-pvc-pipeline.yaml
kubectl create -f pv-pvc-service.yaml

kubectl delete -k cube/overlays
kubectl apply -k cube/overlays

kubectl wait crd/virtualservices.networking.istio.io --for condition=established --timeout=60s
kubectl wait crd/gateways.networking.istio.io --for condition=established --timeout=60s

kubectl apply -f gateway.yaml
kubectl apply -f sa-rbac.yaml
kubectl apply -f virtual.yaml

# 节点打pvrole
if [ `kubectl get node|grep -v NAME|wc -l` -gt 2 ];then
  for node_name in `kubectl get node|awk '{print $1}'|grep -v NAME`;do
    node_idx=1
    kubectl label node ${node_name} pvrole=node${node_idx}
    node_idx=$((node_idx+1))
  done
else
  kubectl label node $node pvrole=node1
fi



# 部署EFK,默认es是单节点不影响单机和集群部署使用。集群扩容3副本即可
kubectl create ns logging
kubectl apply -f efk/pv/
kubectl apply -f efk/

# 部署kafka operater
if [ `kubectl get node|grep -v NAME|wc -l` -gt 2 ];then
  kubeclt apply -f kafka/cluster/storage-class-zk.yaml -f kafka/cluster/storage-class-kafka.yaml
  kubectl apply -f kafka/cluster/pv-zk1.yaml -f kafka/cluster/pv-zk2.yaml -f kafka/cluster/pv-zk3.yaml
  kubectl apply -f kafka/cluster/pv-kafka1.yaml -f kafka/cluster/pv-kafka2.yaml -f kafka/cluster/pv-kafka3.yaml

  kubectl apply -f kafka/cluster/kafka-operator.yaml
  kubectl wait crd/kafkas.kafka.strimzi.io --for condition=established --timeout=90s
  kubeclt apply -f kafka/cluster/kafka-external-svc.yaml
  kubeclt apply -f kafka/cluster/kafka-cluster.yaml
else
  echo "install single kafka"
fi



# 部署flink集群
kubectl delete -k flink
kubectl apply -k flink

# 部署alluxio+minio集群版本
cp alluxio/helm /usr/local/bin && chmod o+x /usr/local/bin/helm
if [ `kubectl get node|grep -v NAME|wc -l` -gt 2 ];then
  kubectl apply -f minio/cluster/

  sleep_time=5
  while true;do
    if [ `kubectl get pod -n infra|grep minio|wc -l` -gt 4 ];then
      echo "alluxio cluster start install"
      cd alluxio/2.9 && helm install alluxio ./alluxio -n infra
      break
    fi
    sleep 5
    echo "wait minio is running wait_time:${sleep_time}"

    sleep_time=$((sleep_time+5))
    if [ $sleep_time -gt 300 ];then
      echo "minio cluster at 300s is not running,exit this install,please check minio config"
      break
    fi
  done
  # 自动到minio里面创建一个alluxio的bucket,用于存储alluxio数据
  cd ../..
  kubectl apply -f minio/minio-mc.yaml
  kubectl -n infra wait sts/minio --for=jsonpath='{.status.readyReplicas}'=6 --timeout=600s
  # 修改kubeflow挂载alluxio目录,需要等待alluxio-fuse就绪等候主机挂载上/data/k8s目录
  for node in $1;do
    ssh root@${node} rm -rvf /data/k8s/kubeflow/pipeline
    ssh root@${node} mkdir -pv /data/k8s/kubeflow
    ssh root@${node} ln -s /data/alluxio /data/k8s/kubeflow/pipeline
    #ssh root@${node} docker restart kubelet
  done
  kubectl wait daemonset/alluxio-fuse --for=jsonpath='{.status.numberAvailable}'=3  --timeout=600s -n infra
  mkdir -pv  /data/k8s/kubeflow/pipeline/{workspace,archives}
else
    # 部署单节点minio和alluxio
    kubectl apply -f minio/single/
    kubectl -n infra wait sts/minio --for=jsonpath='{.status.readyReplicas}'=1 --timeout=600s
    # 自动到minio里面创建一个alluxio的bucket,用于存储alluxio数据
    kubectl apply -f minio/minio-mc.yaml
    echo "alluxio single start install"
    cd alluxio/single && helm install alluxio ./alluxio -n infra
    kubectl wait daemonset/alluxio-fuse --for=jsonpath='{.status.numberAvailable}'=1  --timeout=600s -n infra
    # 修改kubeflow挂载alluxio目录,需要等待alluxio-fuse就绪挂载上/data/k8s目录
    mkdir -pv  /data/k8s/kubeflow/
    ln -s /data/alluxio /data/k8s/kubeflow/pipeline
    kubectl -n infra wait sts/alluxio-fuse --for condition=Available --timeout=1600s
    mkdir -pv  /data/k8s/kubeflow/pipeline/{workspace,archives}
    cd ../..
fi
# 暴露alluxio-proxy服务39999
kubectl apply -f  alluxio/alluxio-proxy.yaml

# 部署harbor
mkdir -pv /data/harbor/{pg,redis,trivy,registry} && chmod -R 777 /data/harbor/registry
kubectl apply -f harbor/k8s/svc
kubectl apply -f harbor/k8s/cm
kubectl apply -f harbor/k8s/sc
kubectl apply -f harbor/k8s/pv
kubectl apply -f harbor/k8s/secret
kubectl apply -f harbor/k8s/sts
sleep 120
kubectl apply -f harbor/k8s/dep

# 配置入口
#ip=`ifconfig eth1 | grep 'inet '| awk '{print $2}' | head -n 1`
kubectl patch svc istio-ingressgateway -n istio-system -p '{"spec":{"externalIPs":["'"$1"'"]}}'

# 本地电脑手动host
echo "打开网址：http://$1"



