package phases

import (
	"context"
	"fmt"
	"strings"
	"time"

	"github.com/pkg/errors"
	agentv1beta1 "gopkg.openfuyao.cn/bkeagent/api/v1beta1"
	backupPlugin "gopkg.openfuyao.cn/bkeagent/pkg/job/builtin/backup"
	certPlugin "gopkg.openfuyao.cn/bkeagent/pkg/job/builtin/kubeadm/certs"
	"gopkg.openfuyao.cn/bkeagent/utils/etcd"
	"gopkg.openfuyao.cn/bkecommon"
	bkeaddon "gopkg.openfuyao.cn/bkecommon/cluster/addon"
	"gopkg.openfuyao.cn/bkecommon/cluster/api/v1beta1"
	confv1beta1 "gopkg.openfuyao.cn/bkecommon/cluster/api/v1beta1"
	"gopkg.openfuyao.cn/bkecommon/cluster/imagehelper"
	bkeinit "gopkg.openfuyao.cn/bkecommon/cluster/initialize"
	bkenode "gopkg.openfuyao.cn/bkecommon/cluster/node"
	bkevalidate "gopkg.openfuyao.cn/bkecommon/cluster/validation"
	apierrors "k8s.io/apimachinery/pkg/api/errors"
	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
	kerrors "k8s.io/apimachinery/pkg/util/errors"
	"k8s.io/apimachinery/pkg/util/wait"
	"net"
	clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
	"sigs.k8s.io/cluster-api/util/conditions"
	ctrl "sigs.k8s.io/controller-runtime"

	bkev1beta1 "gopkg.openfuyao.cn/cluster-api-provider-bke/api/v1beta1"
	"gopkg.openfuyao.cn/cluster-api-provider-bke/pkg/certs"
	"gopkg.openfuyao.cn/cluster-api-provider-bke/pkg/command"
	"gopkg.openfuyao.cn/cluster-api-provider-bke/pkg/kube"
	"gopkg.openfuyao.cn/cluster-api-provider-bke/pkg/mergecluster"
	"gopkg.openfuyao.cn/cluster-api-provider-bke/pkg/phaseframe"
	"gopkg.openfuyao.cn/cluster-api-provider-bke/pkg/phaseframe/phaseutil"
	"gopkg.openfuyao.cn/cluster-api-provider-bke/utils"
	"gopkg.openfuyao.cn/cluster-api-provider-bke/utils/annotation"
	"gopkg.openfuyao.cn/cluster-api-provider-bke/utils/clusterutil"
	"gopkg.openfuyao.cn/cluster-api-provider-bke/utils/condition"
	"gopkg.openfuyao.cn/cluster-api-provider-bke/utils/constant"
)

const (
	// EnsureClusterManageName is the name of the EnsureClusterManage phase
	EnsureClusterManageName confv1beta1.BKEClusterPhase = "EnsureClusterManage"

	manageClusterEtcdCertDirAnnotationKey = "etcd-cert-dir"
)

type EnsureClusterManage struct {
	phaseframe.BasePhase
	remoteClient kube.RemoteKubeClient
}

func NewEnsureClusterManage(ctx *phaseframe.PhaseContext) phaseframe.Phase {
	base := phaseframe.NewBasePhase(ctx, EnsureClusterManageName)
	return &EnsureClusterManage{BasePhase: base}
}

func (e *EnsureClusterManage) Execute() (_ ctrl.Result, err error) {
	e.Ctx.Log.Info(constant.ClusterManagingReason, "start to manage cluster %s", utils.ClientObjNS(e.Ctx.BKECluster))

	if err = e.getRemoteClient(); err != nil {
		return ctrl.Result{}, err
	}
	// 集群基础信息收集
	// include: k8s version, node info, cluster network, etc.
	if err = e.collectBaseInfo(); err != nil {
		return ctrl.Result{}, err
	}
	// 使用ds推送agent
	if err = e.pushAgent(); err != nil {
		return ctrl.Result{}, err
	}

	// 使用agent收集更多集群信息
	if err = e.collectAgentInfo(); err != nil {
		return ctrl.Result{}, err
	}

	// 其他类型的集群不需要执行后续的操作
	if !clusterutil.IsBocloudCluster(e.Ctx.BKECluster) {
		return ctrl.Result{}, nil
	}
	// cluste-api资源没有创建，不需要执行后续的操作
	if e.Ctx.BKECluster.OwnerReferences == nil {
		return ctrl.Result{Requeue: true}, nil
	}
	if err = e.Ctx.RefreshCtxCluster(); err != nil {
		return ctrl.Result{}, err
	}
	if e.Ctx.Cluster == nil {
		return ctrl.Result{}, nil
	}

	// 为后续的集群管理做准备，必须通过
	if err = e.bocloudClusterManagePrepare(); err != nil {
		return ctrl.Result{}, err
	}
	// 伪引导，完全转为bke管理，为后续的集群管理做准备
	if err = e.reconcileFakeBootstrap(); err != nil {
		return ctrl.Result{}, err
	}
	// ansible -> bke 的兼容性修改，为后续的集群管理做准备
	if err = e.compatibilityPatch(); err != nil {
		return ctrl.Result{}, err
	}
	// 纳管结束，标记full controlled,使得其他阶段能够被正常执行
	clusterutil.MarkClusterFullyControlled(e.Ctx.BKECluster)
	// requeue
	return ctrl.Result{Requeue: true}, nil
}

func (e *EnsureClusterManage) NeedExecute(old *bkev1beta1.BKECluster, new *bkev1beta1.BKECluster) (needExecute bool) {
	if !e.BasePhase.NormalNeedExecute(old, new) {
		return false
	}

	if clusterutil.IsBKECluster(new) {
		return false
	}
	if clusterutil.FullyControlled(new) {
		return false
	}

	e.SetStatus(bkev1beta1.PhaseWaiting)
	return true
}

func (e *EnsureClusterManage) collectBaseInfo() error {
	_, c, bkeCluster, _, log := e.Ctx.Untie()
	if clusterutil.ClusterBaseInfoHasCollected(bkeCluster) {
		return nil
	}
	log.Debug("collect cluster base info")
	collectRes, warns, errs := e.remoteClient.Collect()
	if len(errs) > 0 {
		err := kerrors.NewAggregate(errs)
		log.Error(constant.CollectClusterInfoFailedReason, "failed to collect cluster info for BKECluster %s: %v", utils.ClientObjNS(bkeCluster), err)
		return err
	}
	if len(warns) > 0 {
		for _, warn := range warns {
			log.Warn("collect cluster info for BKECluster %s: %v", utils.ClientObjNS(bkeCluster), warn)
		}
	}

	patchFunc := func(bkeCluster *bkev1beta1.BKECluster) {
		clusterutil.MarkClusterBaseInfoCollected(bkeCluster)
		annotation.SetAnnotation(bkeCluster, manageClusterEtcdCertDirAnnotationKey, collectRes.EtcdCertificatesDir)
		bkeCluster.Spec.ClusterConfig.Nodes = collectRes.Nodes
		bkeCluster.Spec.ClusterConfig.Cluster.Networking = collectRes.Networking
		bkeCluster.Spec.ControlPlaneEndpoint = collectRes.ControlPlaneEndpoint
		bkeCluster.Spec.ClusterConfig.Cluster.KubernetesVersion = collectRes.KubernetesVersion
		bkeCluster.Status.KubernetesVersion = collectRes.KubernetesVersion
		bkeCluster.Spec.ClusterConfig.Cluster.ContainerRuntime = collectRes.ContainerRuntime
		bkeCluster.Status.AgentStatus.Reset()
		if bkevalidate.ValidateRepo(bkeCluster.Spec.ClusterConfig.Cluster.ImageRepo) != nil {
			bkeinit.SetDefaultImageRepo(&bkeCluster.Spec.ClusterConfig.Cluster)
		}
		condition.ConditionMark(bkeCluster, bkev1beta1.NodesEnvCondition, confv1beta1.ConditionTrue, constant.NodesEnvReadyReason, "it's necessary set true used to fake boot strap cluster for bocloud cluster")

		// need init status node
		nodeStates := confv1beta1.NodesStates{}
		for _, node := range collectRes.Nodes {
			nodeStates = append(nodeStates, confv1beta1.NodeState{
				State: bkev1beta1.NodeManaging,
				Node:  node,
			})
		}
		bkeCluster.Status.NodesStatus = nodeStates
	}

	if collectRes.Nodes != nil || len(collectRes.Nodes) > 0 {
		condition.ConditionMark(bkeCluster, bkev1beta1.NodesInfoCondition, confv1beta1.ConditionTrue, constant.NodesInfoReadyReason, "")
	}

	return mergecluster.SyncStatusUntilComplete(c, bkeCluster, patchFunc)
}

func (e *EnsureClusterManage) pushAgent() error {
	ctx, c, bkeCluster, scheme, log := e.Ctx.Untie()

	needPushAgent := true
	for _, node := range bkeCluster.Spec.ClusterConfig.Nodes {
		if bkeCluster.GetNodeStateFlag(node.IP, bkev1beta1.NodeAgentPushedFlag) {
			needPushAgent = false
		}
	}
	if !needPushAgent {
		return nil
	}

	log.Debug("step 2 push agent to nodes")
	// get localkubeconfig from secret
	localKubeConfig, err := phaseutil.GetLocalKubeConfig(ctx, c)
	if err != nil {
		if apierrors.IsNotFound(err) {
			log.Error(constant.BKEAgentNotReadyReason, "Local kubeconfig secret not found")
			return errors.Errorf("local kubeconfig secret not found")
		}
		log.Error(constant.BKEAgentNotReadyReason, "Failed to get local kubeconfig secret, err：%v", err)
		return errors.Errorf("failed to get local kubeconfig secret, err：%v", err)
	}

	// get bkeagent image from custom extra
	cfg := bkeinit.BkeConfig(*bkeCluster.Spec.ClusterConfig)
	lancherImage := imagehelper.GetFullImageName(cfg.ImageRepo(), "bkeagent-launcher", "latest")
	if v, ok := cfg.CustomExtra["bkeagent-launcher-image"]; ok && v != "" {
		lancherImage = v
	}
	log.Info(constant.ClusterManagingReason, "BKEAgent launcher image: %s", lancherImage)
	log.Info(constant.ClusterManagingReason, "ntpserver: %s", bkeCluster.Spec.ClusterConfig.Cluster.NTPServer)

	//push agent (use bkeagent launcher daemonset to push agent)
	launcherAddonT := &bkeaddon.AddonTransfer{
		Addon: &v1beta1.Product{
			Name:    "bkeagent",
			Version: "latest",
			Param: map[string]string{
				"clusterName":   bkeCluster.Name,
				"ntpServer":     bkeCluster.Spec.ClusterConfig.Cluster.NTPServer,
				"debug":         "true",
				"kubeconfig":    string(localKubeConfig),
				"launcherImage": lancherImage,
			},
			Block: true,
		},
		Operate: bkeaddon.CreateAddon,
	}

	if err = e.remoteClient.InstallAddon(bkeCluster, launcherAddonT, nil); err != nil {
		condition.ConditionMark(bkeCluster, bkev1beta1.BKEAgentCondition, confv1beta1.ConditionFalse, constant.BKEAgentNotReadyReason, "failed create BKEAgent launcher ds")
		log.Finish(constant.BKEAgentNotReadyReason, "Failed to push bke agent to cluster, err：%v", err)
		return err
	}

	// delete launcher daemonset
	launcherAddonT.Operate = bkeaddon.RemoveAddon
	if err := e.remoteClient.InstallAddon(bkeCluster, launcherAddonT, nil); err != nil {
		log.Warn(constant.ReconcileErrorReason, "(Ignore)Failed to delete bke agent launcher daemonset, err：%v", err)
	}

	// mask node agent pushed flag
	for _, node := range bkeCluster.Spec.ClusterConfig.Nodes {
		bkeCluster.MarkNodeStateFlag(node.IP, bkev1beta1.NodeAgentPushedFlag)
	}

	// step 5 ping agent
	err, successNodes, failedNodes := phaseutil.PingBKEAgent(ctx, c, scheme, bkeCluster)
	if err != nil {
		log.Warn(constant.BKEAgentNotReadyReason, "(Ignore)Failed to ping bke agent in the flow nodes: %v, err：%v", failedNodes, err)
	}

	for _, node := range failedNodes {
		nodeIP := phaseutil.GetNodeIPFromCommandWaitResult(node)
		bkeCluster.SetNodeStateWithMessage(nodeIP, bkev1beta1.NodeInitFailed, fmt.Sprintf("Failed ping bkeagent"))
		// ping agent failed，unmask agentFlag
		bkeCluster.UnmarkNodeState(nodeIP, bkev1beta1.NodeAgentPushedFlag)
	}

	for _, node := range successNodes {
		nodeIP := phaseutil.GetNodeIPFromCommandWaitResult(node)
		bkeCluster.SetNodeStateMessage(nodeIP, "BKEAgent is ready")
		// ping agent success，mask agentFlag
		bkeCluster.MarkNodeStateFlag(nodeIP, bkev1beta1.NodeAgentPushedFlag)
		bkeCluster.MarkNodeStateFlag(nodeIP, bkev1beta1.NodeAgentReadyFlag)
	}

	if err := mergecluster.SyncStatusUntilComplete(c, bkeCluster); err != nil {
		return err
	}

	condition.ConditionMark(bkeCluster, bkev1beta1.BKEAgentCondition, confv1beta1.ConditionTrue, constant.BKEAgentReadyReason, "")

	return nil
}

func (e *EnsureClusterManage) collectAgentInfo() error {
	ctx, c, bkeCluster, scheme, log := e.Ctx.Untie()

	if clusterutil.ClusterAgentInfoHasCollected(bkeCluster) || !clusterutil.ClusterBaseInfoHasCollected(bkeCluster) {
		return nil
	}
	bkeNode := bkenode.Nodes(bkeCluster.Spec.ClusterConfig.Nodes)

	k8sCertDir := bkeCluster.Spec.ClusterConfig.Cluster.CertificatesDir
	etcdCertDir, ok := annotation.HasAnnotation(bkeCluster, manageClusterEtcdCertDirAnnotationKey)
	if !ok {
		etcdCertDir = k8sCertDir
	}

	// collect target cluster cert
	collectCommand := command.Collect{
		BaseCommand: command.BaseCommand{
			Ctx:             ctx,
			NameSpace:       bkeCluster.Namespace,
			Client:          c,
			Scheme:          scheme,
			OwnerObj:        bkeCluster,
			ClusterName:     bkeCluster.Name,
			Unique:          true,
			RemoveAfterWait: true,
		},
		Node:                &bkeNode.Master()[0],
		EtcdCertificatesDir: etcdCertDir,
		K8sCertificatesDir:  k8sCertDir,
	}
	if err := collectCommand.New(); err != nil {
		log.Error(constant.CommandCreateFailedReason, "failed to collect cert for cluster %s: %v", bkeCluster.Name, err)
		return err
	}
	err, _, failedNode := collectCommand.Wait()
	if err != nil {
		log.Error(constant.CommandWaitFailedReason, "failed to wait for collect cert for cluster %s: %v", bkeCluster.Name, err)
		return err
	}
	if len(failedNode) > 0 {
		commandErrs, err := phaseutil.LogCommandFailed(*collectCommand.Command, failedNode, log, constant.CommandExecFailedReason)
		phaseutil.MarkNodeStatusByCommandErrs(bkeCluster, commandErrs)
		return errors.Errorf("failed to collect cert for cluster %s: %v, err: %v", bkeCluster.Name, failedNode, err)
	}

	if err = e.getContainerRuntimeConfigFromCollectCommand(collectCommand.Command); err != nil {
		return errors.Errorf("failed to get container runtime config from collect command: %v", err)
	}

	log.Info(constant.CollectClusterInfoSucceedReason, "finish collect cluster info")
	return nil
}

func (e *EnsureClusterManage) reconcileFakeBootstrap() error {
	bkeCluster := e.Ctx.BKECluster
	if !clusterutil.IsBocloudCluster(bkeCluster) || clusterutil.FullyControlled(bkeCluster) {
		return nil
	}
	if bkeCluster.OwnerReferences == nil {
		return nil
	}
	err := e.Ctx.RefreshCtxCluster()
	if err != nil {
		return err
	}
	if e.Ctx.Cluster == nil {
		return nil
	}
	// 到此为之 开始伪引导了，所谓伪引导指的是将纳管的集群和相关的集群机器在当前集群中创建相关CRD进行映射
	// 使得结束伪引导后该集群的机器可以被当前集群管理

	if err := e.fakeBootstrapMaster(); err != nil {
		return errors.Errorf("failed to fake bootstrap master: %v", err)
	}

	if err := e.fakeBootstrapWorker(); err != nil {
		return errors.Errorf("failed to fake bootstrap worker: %v", err)
	}

	return nil
}

func (e *EnsureClusterManage) fakeBootstrapMaster() error {
	ctx, c, bkeCluster, _, log := e.Ctx.Untie()
	bkeNodes := bkenode.Nodes(bkeCluster.Spec.ClusterConfig.Nodes)

	// 第一步修改 kubeadmControlPlane.replicas = 当前集群的master节点数量
	kcp, err := phaseutil.GetClusterAPIKubeadmControlPlane(ctx, c, e.Ctx.Cluster)
	if err != nil {
		return err
	}
	expectKcpReplicas := int32(bkeNodes.Master().Length())
	kcp.Spec.Replicas = &expectKcpReplicas
	if err = phaseutil.ResumeClusterAPIObj(ctx, c, kcp); err != nil {
		return err
	}

	// 第二步 启动cluster-api,设置 当前bkecluster.status.ready = true
	// 等待cluster.status.ready = true
	bkeCluster.Status.Ready = true
	if err = mergecluster.SyncStatusUntilComplete(c, bkeCluster); err != nil {
		return err
	}
	ctxWithTimeout, cancel := context.WithTimeout(ctx, 1*time.Minute)
	err = wait.PollImmediateUntil(1*time.Second, func() (bool, error) {
		if err = e.Ctx.RefreshCtxCluster(); err != nil {
			return false, err
		}
		return e.Ctx.Cluster.Status.InfrastructureReady, nil
	}, ctxWithTimeout.Done())
	cancel()
	if err != nil {
		return errors.Errorf("failed to wait for cluster %s infrastructure ready: %v", utils.ClientObjNS(e.Ctx.Cluster), err)
	}

	// 第三步等待所有master节点都结束伪引导
	ctxWithTimeout, cancel = context.WithTimeout(ctx, 4*time.Minute)
	masterInitFlag := false
	masterNodes := bkeNodes.Master()
	successJoinMasterNodes := map[int]confv1beta1.Node{}
	err = wait.PollImmediateUntil(2*time.Second, func() (bool, error) {
		if !masterInitFlag {
			if err := e.Ctx.RefreshCtxCluster(); err == nil {
				if conditions.IsTrue(e.Ctx.Cluster, clusterv1.ControlPlaneInitializedCondition) {
					masterInitFlag = true
				} else {
					return false, nil
				}
			}
		}
		for i, node := range masterNodes {
			if _, ok := successJoinMasterNodes[i]; ok {
				continue
			}
			machine, err := phaseutil.NodeToMachine(ctx, c, bkeCluster, node)
			if err != nil {
				continue
			}
			if machine.Status.NodeRef != nil {
				log.Info(constant.ClusterManagingReason, "node %s fake bootstrap success", phaseutil.NodeInfo(node))
				successJoinMasterNodes[i] = node
			}
		}
		if len(successJoinMasterNodes) != int(expectKcpReplicas) {
			return false, nil
		}
		return true, nil
	}, ctxWithTimeout.Done())
	cancel()
	if err != nil {
		return errors.Errorf("failed to wait for cluster %s master nodes fake bootstrap ready: %v", utils.ClientObjNS(e.Ctx.BKECluster), err)
	}

	// 由于我们是纳管集群，cluster-api在处理时发现目标句群直接可以连接就会跳过master-init步骤，并且直接给cluster资源加上
	// clusterv1.ControlPlaneInitializedCondition condition,所以此处直接给master0 加上masterinitflag
	if masterInitFlag {

	}

	if err := e.Ctx.RefreshCtxBKECluster(); err != nil {
		return err
	}

	for _, node := range successJoinMasterNodes {
		e.Ctx.BKECluster.MarkNodeStateFlag(node.IP, bkev1beta1.NodeBootFlag)
		e.Ctx.BKECluster.SetNodeStateWithMessage(node.IP, bkev1beta1.NodeNotReady, "Fake bootstrap success")
	}

	if err := mergecluster.SyncStatusUntilComplete(c, e.Ctx.BKECluster); err != nil {
		return err
	}
	return nil
}

func (e *EnsureClusterManage) fakeBootstrapWorker() error {
	ctx, c, bkeCluster, _, log := e.Ctx.Untie()
	bkeNodes := bkenode.Nodes(bkeCluster.Spec.ClusterConfig.Nodes)

	workerNum := bkeNodes.Worker().Length()
	if workerNum == 0 {
		return nil
	}
	expectMDReplicas := int32(workerNum)
	if expectMDReplicas != 0 {
		// 第四步修改machineDeployment.replicas = 当前集群的worker节点数量
		md, err := phaseutil.GetClusterAPIMachineDeployment(ctx, c, e.Ctx.Cluster)
		if err != nil {
			return err
		}
		md.Spec.Replicas = &expectMDReplicas
		if err = phaseutil.ResumeClusterAPIObj(ctx, c, md); err != nil {
			return err
		}
		// 第五步等待所有worker节点都结束伪引导
		// 关键条件 status.clusterConfig.cluster.nodes的worker节点数量 = status.clusterConfig.cluster.machineDeployment.replicas
		workerNodes := bkeNodes.Worker()
		successJoinWorkerNodes := map[int]confv1beta1.Node{}
		ctxWithTimeout, cancel := context.WithTimeout(ctx, 4*time.Minute)
		defer cancel()
		err = wait.PollImmediateUntil(2*time.Second, func() (bool, error) {
			for i, node := range workerNodes {
				if _, ok := successJoinWorkerNodes[i]; ok {
					continue
				}
				machine, err := phaseutil.NodeToMachine(ctx, c, bkeCluster, node)
				if err != nil {
					continue
				}
				if machine.Status.NodeRef != nil {
					log.Info(constant.ClusterManagingReason, "node %s fake bootstrap success", phaseutil.NodeInfo(node))
					successJoinWorkerNodes[i] = node
				}
			}
			if len(successJoinWorkerNodes) != int(expectMDReplicas) {
				return false, nil
			}
			return true, nil
		}, ctxWithTimeout.Done())
		if err != nil {
			return errors.Errorf("failed to wait for cluster %s worker nodes fake bootstrap ready: %v", utils.ClientObjNS(e.Ctx.BKECluster), err)
		}
		if err := e.Ctx.RefreshCtxBKECluster(); err != nil {
			return err
		}

		for _, node := range successJoinWorkerNodes {
			e.Ctx.BKECluster.MarkNodeStateFlag(node.IP, bkev1beta1.NodeBootFlag)
			e.Ctx.BKECluster.SetNodeStateWithMessage(node.IP, bkev1beta1.NodeNotReady, "Fake bootstrap success")
		}
		if err := mergecluster.SyncStatusUntilComplete(c, e.Ctx.BKECluster); err != nil {
			return err
		}
	}
	return nil
}

func (e *EnsureClusterManage) compatibilityPatch() error {
	// 设置etcd pod的注释以兼容bke的集群升级
	ctx, _, _, _, log := e.Ctx.Untie()
	bkeNodes := bkenode.Nodes(e.Ctx.BKECluster.Spec.ClusterConfig.Nodes)
	clientSet, _ := e.remoteClient.KubeClient()
	etcdPods, err := clientSet.CoreV1().Pods(metav1.NamespaceSystem).List(ctx, metav1.ListOptions{
		LabelSelector: "component=etcd",
	})
	if err != nil {
		return err
	}

	failedUpdateEtcdNode := []string{}
	for _, pod := range etcdPods.Items {
		nodeName := pod.Spec.NodeName
		nodes := bkeNodes.Filter(bkenode.FilterOptions{"Hostname": nodeName})
		if nodes.Length() == 0 {
			log.Warn(constant.ClusterManageWarningReason, "etcd pod node %s not found in BKECluster nodes fields, skip", nodeName)
			failedUpdateEtcdNode = append(failedUpdateEtcdNode, nodeName)
		}
		if _, ok := annotation.HasAnnotation(&pod, annotation.EtcdAdvertiseClientUrlsAnnotationKey); ok {
			continue
		}
		annotation.SetAnnotation(&pod, annotation.EtcdAdvertiseClientUrlsAnnotationKey, etcd.GetClientURLByIP(nodes[0].IP))
		// update etcd pod
		_, err = clientSet.CoreV1().Pods(metav1.NamespaceSystem).Update(ctx, &pod, metav1.UpdateOptions{})
		if err != nil {
			log.Warn(constant.ClusterManageWarningReason, "failed to update etcd pod %s: %v", utils.ClientObjNS(&pod), err)
			failedUpdateEtcdNode = append(failedUpdateEtcdNode, nodeName)
		}
	}
	if len(failedUpdateEtcdNode) > 0 {
		log.Warn(constant.ClusterManageWarningReason, "following etcd node failed to set annotation: %v,this will cause subsequent cluster upgrades to fail", failedUpdateEtcdNode)
		log.Warn(constant.ClusterManageWarningReason, "please manually update the etcd pod and set the annotation %q, eg: bkeagent.bocloud.com/etcd.advertise-client-urls: https://<node ip>:2379", annotation.EtcdAdvertiseClientUrlsAnnotationKey)
		log.Warn(constant.ClusterManageWarningReason, "before upgrading the cluster, please make sure that the etcd pod has been set annotation %q", annotation.EtcdAdvertiseClientUrlsAnnotationKey)
	}
	return nil
}

func (e *EnsureClusterManage) getRemoteClient() error {
	ctx, c, bkeCluster, _, log := e.Ctx.Untie()
	remoteClient, err := kube.NewRemoteClientByBKECluster(ctx, c, bkeCluster)
	if err != nil {
		log.Error(constant.InternalErrorReason, "failed to get BKECluster %q remote cluster client", utils.ClientObjNS(bkeCluster))
		return err
	}
	e.remoteClient = remoteClient
	e.remoteClient.SetBKELogger(log)
	e.remoteClient.SetLogger(log.NormalLogger)
	return nil
}

// bocloudClusterManagePrepare 升级前准备
// 备份数据、分发证 等
func (e *EnsureClusterManage) bocloudClusterManagePrepare() error {
	bkeNodes := bkenode.Nodes(e.Ctx.BKECluster.Spec.ClusterConfig.Nodes)
	// todo  reconcile load balancer or other

	masterNode := bkeNodes.Master()
	//补全证书
	certsGenerator := certs.NewKubernetesCertGenerator(e.Ctx.Context, e.Ctx.Client, e.Ctx.BKECluster)
	certsGenerator.ConfigKubeConfig(net.JoinHostPort(masterNode[0].IP, "6443"))
	if err := certsGenerator.LookUpOrGenerate(); err != nil {
		return err
	}

	// backup data
	if err := e.backupBocloudClusterData(bkeNodes); err != nil {
		e.Ctx.Log.Warn(constant.BocloudClusterDataBackupFailedReason, "failed to backup cluster %s data: %v", utils.ClientObjNS(e.Ctx.BKECluster), err)
	}

	// distribute target cluster certs
	if err := e.distributeTargetClusterCerts(bkeNodes); err != nil {
		return err
	}

	// 环境初始化，hosts文件、运行时配置等
	if err := e.initBocloudClusterEnv(); err != nil {
		return err
	}
	return nil
}

// distributeTargetClusterCerts 分发证书给目标集群所有节点
func (e *EnsureClusterManage) distributeTargetClusterCerts(bkeNodes bkenode.Nodes) error {
	// 如果推断集群是bke集群，那么就不需要分发证书了
	if v, ok := condition.HasCondition(bkev1beta1.TypeOfManagementClusterGuessCondition, e.Ctx.BKECluster); ok {
		clusterType := v.Reason
		if clusterType == bkecommon.BKEClusterFromAnnotationValueBKE {
			return nil
		}
	}

	// 分发证书给master节点
	if bkeNodes.Master().Length() != 0 {
		if err := e.distributeMasterNodesCerts(bkeNodes); err != nil {
			return err
		}
	}
	// 分发证书给worker节点
	if bkeNodes.Worker().Length() != 0 {
		if err := e.distributeWorkerNodesCerts(bkeNodes); err != nil {
			return err
		}
	}
	return nil
}

// distributeMasterNodesCerts 分发证书给master节点
func (e *EnsureClusterManage) distributeMasterNodesCerts(bkeNodes bkenode.Nodes) error {
	ctx, c, bkeCluster, scheme, log := e.Ctx.Untie()
	if _, ok := condition.HasCondition(bkev1beta1.BocloudClusterMasterCertDistributionCondition, bkeCluster); ok {
		return nil
	}

	log.Info(constant.ClusterManagingReason, "Distribute master nodes certs")
	certCommandName := fmt.Sprintf("distribute-master-cert-%s", bkeCluster.Name)
	certCommandSpec := command.GenerateDefaultCommandSpec()
	certCommandSpec.Commands = []agentv1beta1.ExecCommand{
		{
			ID: "cert",
			Command: []string{
				certPlugin.Name,
				fmt.Sprintf("clusterName=%s", bkeCluster.Name),
				fmt.Sprintf("namespace=%s", bkeCluster.Namespace),
				fmt.Sprintf("certificatesDir=%s", bkeCluster.Spec.ClusterConfig.Cluster.CertificatesDir),

				"generate=false",
				"generateKubeConfig=true",
				"loadCACert=false",
				"loadTargetClusterCert=true",
				"loadAdminKubeconfig=false",
				"uploadCerts=false",
			},
			Type:          agentv1beta1.CommandBuiltIn,
			BackoffDelay:  3,
			BackoffIgnore: false,
		},
	}
	choseNodes := bkeNodes.Master()
	certCommand := command.Custom{
		BaseCommand: command.BaseCommand{
			Ctx:             ctx,
			Client:          c,
			Scheme:          scheme,
			OwnerObj:        bkeCluster,
			NameSpace:       bkeCluster.Namespace,
			ClusterName:     bkeCluster.Name,
			RemoveAfterWait: true,
			Unique:          true,
		},
		Nodes:        choseNodes,
		CommandName:  certCommandName,
		CommandSpec:  certCommandSpec,
		CommandLabel: command.BKEClusterLabel,
	}

	if err := certCommand.New(); err != nil {
		return err
	}

	err, _, failed := certCommand.Wait()
	if err != nil {
		log.Error(constant.CommandWaitFailedReason, "failed to wait command %q, err: %v", certCommandName, err)
		return err
	}
	if failed != nil || len(failed) > 0 {
		commandErrs, err := phaseutil.LogCommandFailed(*certCommand.Command, failed, log, constant.BocloudClusterMasterCertDistributionFailedReason)
		phaseutil.MarkNodeStatusByCommandErrs(bkeCluster, commandErrs)
		log.Error(constant.CommandExecFailedReason, "failed to distribute certificate on flow master nodes %q，err: %v", failed, err)
		return errors.Errorf("failed to distribute certificate on flow worker nodes %q，err: %v", failed, err)
	}
	condition.ConditionMark(bkeCluster, bkev1beta1.BocloudClusterMasterCertDistributionCondition, confv1beta1.ConditionTrue, constant.BocloudClusterMasterCertDistributionSuccessReason, "Distribute master nodes certs success")
	log.Info(constant.BocloudClusterMasterCertDistributionSuccessReason, "Distribute master nodes certs success")
	return nil
}

// distributeWorkerNodesCerts 分发证书给worker节点
func (e *EnsureClusterManage) distributeWorkerNodesCerts(bkeNodes bkenode.Nodes) error {
	ctx, c, bkeCluster, scheme, log := e.Ctx.Untie()
	if _, ok := condition.HasCondition(bkev1beta1.BocloudClusterWorkerCertDistributionCondition, bkeCluster); ok {
		return nil
	}

	log.Info(constant.ClusterManagingReason, "Distribute worker nodes certs")
	certCommandName := fmt.Sprintf("distribute-worker-cert-%s", bkeCluster.Name)
	certCommandSpec := command.GenerateDefaultCommandSpec()
	certCommandSpec.Commands = []agentv1beta1.ExecCommand{
		{
			ID: "cert",
			Command: []string{
				certPlugin.Name,
				fmt.Sprintf("clusterName=%s", bkeCluster.Name),
				fmt.Sprintf("namespace=%s", bkeCluster.Namespace),
				fmt.Sprintf("certificatesDir=%s", bkeCluster.Spec.ClusterConfig.Cluster.CertificatesDir),

				"generate=false",
				"generateKubeConfig=false",
				"loadCACert=true",
				"caCertNames=ca,proxy",
				"loadTargetClusterCert=false",
				"loadAdminKubeconfig=true",
				"uploadCerts=false",
			},
			Type:          agentv1beta1.CommandBuiltIn,
			BackoffDelay:  3,
			BackoffIgnore: false,
		},
	}
	choseNodes := bkeNodes.Worker()
	certCommand := command.Custom{
		BaseCommand: command.BaseCommand{
			Ctx:             ctx,
			Client:          c,
			Scheme:          scheme,
			OwnerObj:        bkeCluster,
			NameSpace:       bkeCluster.Namespace,
			ClusterName:     bkeCluster.Name,
			RemoveAfterWait: true,
			Unique:          true,
		},
		Nodes:        choseNodes,
		CommandName:  certCommandName,
		CommandSpec:  certCommandSpec,
		CommandLabel: command.BKEClusterLabel,
	}

	if err := certCommand.New(); err != nil {
		return err
	}

	err, _, failed := certCommand.Wait()
	if err != nil {
		log.Error(constant.CommandWaitFailedReason, "failed to wait command %q, err: %v", certCommandName, err)
		return err
	}
	if failed != nil || len(failed) > 0 {
		commandErrs, err := phaseutil.LogCommandFailed(*certCommand.Command, failed, log, constant.BocloudClusterWorkerCertDistributionFailedReason)
		phaseutil.MarkNodeStatusByCommandErrs(bkeCluster, commandErrs)
		log.Error(constant.CommandExecFailedReason, "failed to distribute certificate on flow worker node %q，err: %v", failed, err)
		return errors.Errorf("failed to distribute certificate on flow worker node %q，err: %v", failed, err)
	}
	condition.ConditionMark(bkeCluster, bkev1beta1.BocloudClusterWorkerCertDistributionCondition, confv1beta1.ConditionTrue, constant.BocloudClusterWorkerCertDistributionSuccessReason, "Distribute worker nodes certs success")
	log.Info(constant.BocloudClusterWorkerCertDistributionSuccessReason, "Distribute worker nodes certs success")
	return nil
}

// backupBocloudClusterData backup bocloud cluster data
// include: old pki dir、etcd pki dir, etc
func (e *EnsureClusterManage) backupBocloudClusterData(bkeNodes bkenode.Nodes) error {
	ctx, c, bkeCluster, scheme, log := e.Ctx.Untie()
	if _, ok := condition.HasCondition(bkev1beta1.BocloudClusterDataBackupCondition, bkeCluster); ok {
		return nil
	}

	log.Info(constant.ClusterManagingReason, "backup boCloud cluster data")
	dirs := []string{
		// kubernetes dir
		"/etc/kubernetes",
		// etcd cert dir
		"/etc/etcd/ssl",
	}
	files := []string{
		"",
	}

	backupCommandName := fmt.Sprintf("backup-bocloud-cluster-data-%s", bkeCluster.Name)
	backupCommandSpec := command.GenerateDefaultCommandSpec()
	backupCommandSpec.Commands = []agentv1beta1.ExecCommand{
		{
			ID: "backup",
			Command: []string{
				backupPlugin.Name,
				fmt.Sprintf("backupDirs=%s", strings.Join(dirs, ",")),
				fmt.Sprintf("backupFiles=%s", strings.Join(files, ",")),
			},
			Type:          agentv1beta1.CommandBuiltIn,
			BackoffIgnore: false,
		},
	}

	choseNodes := bkeNodes
	backupCommand := command.Custom{
		BaseCommand: command.BaseCommand{
			Ctx:             ctx,
			Client:          c,
			Scheme:          scheme,
			OwnerObj:        bkeCluster,
			NameSpace:       bkeCluster.Namespace,
			ClusterName:     bkeCluster.Name,
			RemoveAfterWait: true,
			Unique:          true,
		},
		Nodes:        choseNodes,
		CommandName:  backupCommandName,
		CommandSpec:  backupCommandSpec,
		CommandLabel: command.BKEClusterLabel,
	}

	if err := backupCommand.New(); err != nil {
		return err
	}
	err, _, failed := backupCommand.Wait()
	if err != nil {
		log.Error(constant.CommandWaitFailedReason, "failed to wait command %q, err: %v", backupCommandName, err)
		return err
	}
	if failed != nil || len(failed) > 0 {
		commandErrs, err := phaseutil.LogCommandFailed(*backupCommand.Command, failed, log, "BackupBocloudClusterDataFailed")
		phaseutil.MarkNodeStatusByCommandErrs(bkeCluster, commandErrs)
		log.Error(constant.CommandExecFailedReason, "failed to backup on flow master node %q，err: %v", failed, err)
		return errors.Errorf("failed to distribute certificate on flow master node %q，err: %v", failed, err)
	}
	condition.ConditionMark(bkeCluster, bkev1beta1.BocloudClusterDataBackupCondition, confv1beta1.ConditionTrue, constant.BocloudClusterDataBackupSuccessReason, "backup bocloud cluster data success")
	log.Info(constant.CommandExecSuccessReason, "backup bocloud cluster data success")
	return nil
}

// initBocloudClusterEnv init bocloud cluster env
func (e *EnsureClusterManage) initBocloudClusterEnv() error {
	ctx, c, bkeCluster, scheme, log := e.Ctx.Untie()

	bkeNodes := phaseutil.GetNeedInitEnvNodes(bkeCluster)
	if bkeNodes.Length() == 0 {
		return nil
	}

	log.Info(constant.ClusterManagingReason, "init bocloud cluster env, scope: hosts file, http repo")
	envCommandName := fmt.Sprintf("init-bocloud-cluster-env-%s", bkeCluster.Name)
	envCommandSpec := command.GenerateDefaultCommandSpec()
	envCommandSpec.Commands = []agentv1beta1.ExecCommand{
		{
			ID: "init bocloud cluster env",
			Command: []string{
				"K8sEnvInit",
				"init=true",
				"check=true",
				"scope=hosts,httpRepo,registry",
				fmt.Sprintf("bkeConfig=%s:%s", bkeCluster.Namespace, bkeCluster.Name),
			},
			Type:          agentv1beta1.CommandBuiltIn,
			BackoffDelay:  3,
			BackoffIgnore: false,
		},
	}
	envCommand := command.Custom{
		BaseCommand: command.BaseCommand{
			Ctx:             ctx,
			Client:          c,
			Scheme:          scheme,
			OwnerObj:        bkeCluster,
			NameSpace:       bkeCluster.Namespace,
			ClusterName:     bkeCluster.Name,
			RemoveAfterWait: true,
			Unique:          true,
		},
		Nodes:        bkeNodes,
		CommandName:  envCommandName,
		CommandSpec:  envCommandSpec,
		CommandLabel: command.BKEClusterLabel,
	}

	if err := envCommand.New(); err != nil {
		return err
	}

	err, success, failed := envCommand.Wait()
	if err != nil {
		log.Error(constant.CommandWaitFailedReason, "failed to wait command %q, err: %v", envCommandName, err)
		return err
	}

	// 标记成功节点状态
	for _, node := range success {
		nodeIP := phaseutil.GetNodeIPFromCommandWaitResult(node)
		bkeCluster.MarkNodeStateFlag(nodeIP, bkev1beta1.NodeEnvFlag)
		bkeCluster.SetNodeStateMessage(nodeIP, "Nodes env is ready")
	}

	// 标记失败节点状态
	for _, node := range failed {
		nodeIP := phaseutil.GetNodeIPFromCommandWaitResult(node)
		bkeCluster.SetNodeStateWithMessage(nodeIP, bkev1beta1.NodeInitFailed, "Failed to check k8s env")
	}

	if err := mergecluster.SyncStatusUntilComplete(c, bkeCluster); err != nil {
		return err
	}

	// 只要有失败的就返回，不同于集群部署
	if len(failed) > 0 {
		commandErrs, err := phaseutil.LogCommandFailed(*envCommand.Command, failed, log, constant.BocloudClusterEnvInitFailedReason)
		phaseutil.MarkNodeStatusByCommandErrs(bkeCluster, commandErrs)
		errInfo := fmt.Sprintf("failed to init bocloud cluster env on flow nodes %q, err: %v", failed, err)
		condition.ConditionMark(bkeCluster, bkev1beta1.BocloudClusterEnvInitCondition, confv1beta1.ConditionFalse, constant.BocloudClusterEnvInitFailedReason, errInfo)
		log.Error(constant.CommandExecFailedReason, errInfo)
		return errors.Errorf(errInfo)
	}

	condition.ConditionMark(bkeCluster, bkev1beta1.BocloudClusterEnvInitCondition, confv1beta1.ConditionTrue, constant.BocloudClusterEnvInitSuccessReason, "init bocloud cluster env success")
	for _, node := range bkeNodes {
		bkeCluster.MarkNodeStateFlag(node.IP, bkev1beta1.NodeEnvFlag)
	}

	log.Info(constant.BocloudClusterEnvInitSuccessReason, "init bocloud cluster env success")
	return nil
}

func (e *EnsureClusterManage) getContainerRuntimeConfigFromCollectCommand(collectCommand *agentv1beta1.Command) error {
	_, c, bkeCluster, _, log := e.Ctx.Untie()

	finalLowLevelRuntime := bkeinit.DefaultRuntime
	finalCgroupDriver := bkeinit.DefaultCgroupDriver
	finalDataRoot := bkeinit.DefaultCRIDockerDataRootDir
	finalClusterType := bkecommon.BKEClusterFromAnnotationValueBocloud
	finalKubeletRootDir := bkeinit.DefaultKubeletRootDir

	lowLevelRuntimes := []string{}
	cgroupDrivers := []string{}
	dataRoots := []string{}
	guessClusterTypes := []string{}
	kubeletRootDirs := []string{}

	for _, cmd := range collectCommand.Status {
		condition := cmd.Conditions[0]
		if condition.StdOut == nil {
			continue
		}

		// 环境中的agent 由于版本不同，可能会有不同的输出，所以这里需要做一下兼容
		for i, v := range condition.StdOut {
			if v == "" {
				continue
			}
			switch i {
			case 0:
				lowLevelRuntimes = append(lowLevelRuntimes, v)
			case 1:
				cgroupDrivers = append(cgroupDrivers, v)
			case 2:
				dataRoots = append(dataRoots, v)
			case 3:
				guessClusterTypes = append(guessClusterTypes, v)
			case 4:
				kubeletRootDirs = append(kubeletRootDirs, v)
			}
		}

		//lowLevelRuntimes = append(lowLevelRuntimes, condition.StdOut[0])
		//cgroupDrivers = append(cgroupDrivers, condition.StdOut[1])
		//dataRoots = append(dataRoots, condition.StdOut[2])
		//guessClusterTypes = append(guessClusterTypes, condition.StdOut[3])
		//kubeletRootDirs = append(kubeletRootDirs, condition.StdOut[4])
	}

	finalLowLevelRuntime = utils.MostCommonChar(lowLevelRuntimes)
	finalCgroupDriver = utils.MostCommonChar(cgroupDrivers)
	finalDataRoot = utils.MostCommonChar(dataRoots)
	finalClusterType = utils.MostCommonChar(guessClusterTypes)
	finalKubeletRootDir = utils.MostCommonChar(kubeletRootDirs)

	log.Info(constant.ClusterManagingReason, "bocloud cluster container runtime is %q. low level runtime: %q, cgroup driver: %q, data root: %q",
		bkeCluster.Spec.ClusterConfig.Cluster.ContainerRuntime.CRI, finalLowLevelRuntime, finalCgroupDriver, finalDataRoot)
	log.Info(constant.ClusterManagingReason, "bocloud cluster kubelet root dir is %q", finalKubeletRootDir)

	log.Info(constant.ClusterManagingReason, "infer the original cluster was created by %q", finalClusterType)

	containerRuntime := confv1beta1.ContainerRuntime{
		CRI:     bkeCluster.Spec.ClusterConfig.Cluster.ContainerRuntime.CRI,
		Runtime: finalLowLevelRuntime,
		Param: map[string]string{
			"cgroupDriver": finalCgroupDriver,
			"data-root":    finalDataRoot,
		},
	}

	patchFunc := func(bkeCluster *bkev1beta1.BKECluster) {
		// mark cluster info collected
		clusterutil.MarkClusterAgentInfoCollected(bkeCluster)
		annotation.RemoveAnnotation(bkeCluster, manageClusterEtcdCertDirAnnotationKey)

		bkeCluster.Spec.ClusterConfig.Cluster.ContainerRuntime = containerRuntime
		if bkeCluster.Spec.ClusterConfig.Cluster.Kubelet.ExtraVolumes != nil {
			for i, v := range bkeCluster.Spec.ClusterConfig.Cluster.Kubelet.ExtraVolumes {
				tmp := v
				if tmp.Name == "kubelet-root-dir" {
					bkeCluster.Spec.ClusterConfig.Cluster.Kubelet.ExtraVolumes[i].HostPath = finalKubeletRootDir
				}
			}
		} else {
			bkeCluster.Spec.ClusterConfig.Cluster.Kubelet.ExtraVolumes = []confv1beta1.HostPathMount{
				{
					Name:     "kubelet-root-dir",
					HostPath: finalKubeletRootDir,
				},
			}
		}
		condition.ConditionMark(bkeCluster, bkev1beta1.TypeOfManagementClusterGuessCondition, confv1beta1.ConditionTrue, finalClusterType, "")
	}

	return mergecluster.SyncStatusUntilComplete(c, bkeCluster, patchFunc)
}
