package controllers

import (
	"context"
	"fmt"
	"os"
	"strings"
	"time"

	"github.com/blang/semver"
	"github.com/pkg/errors"
	"go.uber.org/zap"
	confv1beta1 "gopkg.openfuyao.cn/bkecommon/cluster/api/v1beta1"
	bkenode "gopkg.openfuyao.cn/bkecommon/cluster/node"
	corev1 "k8s.io/api/core/v1"
	apierrors "k8s.io/apimachinery/pkg/api/errors"
	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
	kerrors "k8s.io/apimachinery/pkg/util/errors"
	"k8s.io/apimachinery/pkg/util/json"
	"k8s.io/apimachinery/pkg/util/wait"
	kubedrain "k8s.io/kubectl/pkg/drain"
	clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
	bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1"
	"sigs.k8s.io/cluster-api/util"
	"sigs.k8s.io/cluster-api/util/conditions"
	"sigs.k8s.io/cluster-api/util/patch"
	"sigs.k8s.io/cluster-api/util/version"
	ctrl "sigs.k8s.io/controller-runtime"
	"sigs.k8s.io/controller-runtime/pkg/client"
	"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"

	bkev1beta1 "gopkg.openfuyao.cn/cluster-api-provider-bke/api/v1beta1"
	"gopkg.openfuyao.cn/cluster-api-provider-bke/pkg/command"
	"gopkg.openfuyao.cn/cluster-api-provider-bke/pkg/kube"
	"gopkg.openfuyao.cn/cluster-api-provider-bke/pkg/mergecluster"
	metricrecord "gopkg.openfuyao.cn/cluster-api-provider-bke/pkg/metrics/record"
	"gopkg.openfuyao.cn/cluster-api-provider-bke/pkg/phaseframe/phaseutil"
	"gopkg.openfuyao.cn/cluster-api-provider-bke/utils"
	"gopkg.openfuyao.cn/cluster-api-provider-bke/utils/annotation"
	"gopkg.openfuyao.cn/cluster-api-provider-bke/utils/clusterutil"
	"gopkg.openfuyao.cn/cluster-api-provider-bke/utils/condition"
	"gopkg.openfuyao.cn/cluster-api-provider-bke/utils/constant"
	labelhelper "gopkg.openfuyao.cn/cluster-api-provider-bke/utils/label"
)

// reconcileNormal is the normal reconcile flow for a BKEMachine.
func (r *BKEMachineReconciler) reconcileBootstrap(ctx context.Context, machine *clusterv1.Machine, cluster *clusterv1.Cluster, bkeMachine *bkev1beta1.BKEMachine, bkeCluster *bkev1beta1.BKECluster, log *zap.SugaredLogger) (ctrl.Result, error) {
	if bkeMachine.Status.Bootstrapped {
		return ctrl.Result{}, nil
	}

	patchHelper, err := patch.NewHelper(bkeMachine, r.Client)
	if err != nil {
		return ctrl.Result{}, err
	}
	// Always attempt to Patch the bkeCluster object and Status after each reconciliation.
	defer func() {
		if err := patchBKEMachine(ctx, patchHelper, bkeMachine); err != nil {
			log.Error("failed to patch demoMachine", err)
			return
		}
	}()

	// if bkeMachine has WorkerNodeHost or MasterNodeHost label
	// means it is at bootstrap process
	// the Status.ProviderID and Status.Ready field will be processed
	if _, ok := labelhelper.CheckBKEMachineLabel(bkeMachine); ok {
		return ctrl.Result{}, nil
	}

	// The next steps are only for the machine that is reconciled for the first time

	// check bootstrap data is ready
	// Deprecated: 在bke中对于机器的引导，没有使用到bootstrap data，所以这里不再检查bootstrap data是否准备好
	// todo 由于在Cluster API中，需要在bootstrap data准备好后才能继续，对于程序速度优化，是否考虑自建一个bootstrap provider
	//if machine.Spec.Bootstrap.DataSecretName == nil {
	//	if !util.IsControlPlaneMachine(machine) && !conditions.IsTrue(cluster, clusterv1.ControlPlaneInitializedCondition) {
	//		log.Info("Waiting for the control plane to be initialized")
	//		conditions.MarkFalse(bkeMachine, utils.BootstrapSucceededCondition, clusterv1.WaitingForControlPlaneAvailableReason, clusterv1.ConditionSeverityInfo, "")
	//		return ctrl.Result{}, nil
	//	}
	//
	//	log.Info("Waiting for the Bootstrap provider controller to set bootstrap data")
	//	conditions.MarkFalse(bkeMachine, utils.BootstrapSucceededCondition, utils.WaitingForBootstrapDataReason, clusterv1.ConditionSeverityInfo, "")
	//	return ctrl.Result{}, nil
	//}
	if !util.IsControlPlaneMachine(machine) && !conditions.IsTrue(cluster, clusterv1.ControlPlaneInitializedCondition) {
		log.Info("Waiting for the control plane to be initialized")
		conditions.MarkFalse(bkeMachine, bkev1beta1.BootstrapSucceededCondition, clusterv1.WaitingForControlPlaneAvailableReason, clusterv1.ConditionSeverityInfo, "")
		return ctrl.Result{}, nil
	}

	// 由于capi-kubeadm-control-plane强行会对比kubeadmConfig的内容，所以这里需要将kubeadmConfig的内容进行修改与kcp保持一致
	// 修改失败也没关系 不影响部署
	if util.IsControlPlaneMachine(machine) {
		// 获取kubeadmConfig
		kubeadmConfig := &bootstrapv1.KubeadmConfig{}
		if err := r.Client.Get(ctx, client.ObjectKey{Namespace: machine.Namespace, Name: machine.Spec.Bootstrap.ConfigRef.Name}, kubeadmConfig); err == nil {
			helper, _ := patch.NewHelper(kubeadmConfig, r.Client)
			if helper != nil {
				kcp, _ := phaseutil.GetClusterAPIKubeadmControlPlane(ctx, r.Client, cluster)
				if kcp != nil {
					clusterConfiguration := kcp.Spec.KubeadmConfigSpec.ClusterConfiguration.DeepCopy()
					initConfiguration := kcp.Spec.KubeadmConfigSpec.InitConfiguration.DeepCopy()
					joinConfiguration := kcp.Spec.KubeadmConfigSpec.JoinConfiguration.DeepCopy()
					kubeadmConfig.Spec.ClusterConfiguration = clusterConfiguration
					kubeadmConfig.Spec.InitConfiguration = initConfiguration
					kubeadmConfig.Spec.JoinConfiguration = joinConfiguration
					_ = helper.Patch(ctx, kubeadmConfig)
				}
			}
		}
	}

	// get machine role
	role := bkenode.WorkerNodeRole
	if util.IsControlPlaneMachine(machine) {
		role = bkenode.MasterNodeRole
	}

	// get role nodes
	roleNodes := phaseutil.GetReadyBootstrapNodes(bkeCluster)
	if role == bkenode.MasterNodeRole {
		roleNodes = roleNodes.Master()
	} else {
		roleNodes = roleNodes.Worker()
	}

	if len(roleNodes) == 0 {
		r.logWarningAndEvent(log, bkeCluster, constant.ReconcileErrorReason, "No available nodes in bkeCluster.spec, err: %v", err)
		return ctrl.Result{}, nil
	}

	phase, err := r.getBootstrapPhase(ctx, machine, cluster)
	if err != nil {
		r.logWarningAndEvent(log, bkeCluster, constant.ReconcileErrorReason, "Failed to get bootstrap phase: %v", err)
		return ctrl.Result{RequeueAfter: 5 * time.Second}, nil
	}

	// we need to know which Nodes are already in use
	// then choose an available node bind to bkeMachine
	node, err := r.filterAvailableNode(ctx, roleNodes, bkeCluster, phase)
	if err != nil {
		r.logWarningAndEvent(log, bkeCluster, constant.ReconcileErrorReason, "Failed to get available node in bkeCluster.spec, err: %v", err)
		return ctrl.Result{}, nil
	}
	if phase == bkev1beta1.InitControlPlane {
		// todo 标记这是init 节点，但是多master情况下，这个节点可能会被删除
		bkeCluster.MarkNodeStateFlag(node.IP, bkev1beta1.MasterInitFlag)
	}
	bkeCluster.SetNodeStateWithMessage(node.IP, bkev1beta1.NodeBootStrapping, "Start bootstrap")
	if err := mergecluster.SyncStatusUntilComplete(r.Client, bkeCluster); err != nil {
		return ctrl.Result{}, err
	}

	// record deployment phase
	if err := r.recordBootstrapPhaseEvent(cluster, bkeCluster, node, phase, log); err != nil {
		r.logWarningAndEvent(log, bkeCluster, constant.ReconcileErrorReason, "Failed to record bootstrap phase event: %v", err)
		return ctrl.Result{}, nil
	}

	// 在没有被完全控制前，伪引导,不要创建command
	if !clusterutil.FullyControlled(bkeCluster) {

		defer func() {
			r.mux.Lock()
			delete(r.nodesBootRecord, node.IP)
			r.mux.Unlock()
		}()

		helper, err := patch.NewHelper(machine, r.Client)

		defer helper.Patch(ctx, machine)

		providerID := phaseutil.GenerateProviderID(bkeCluster, *node)

		// patch remote node spec.ProviderID
		realProviderID, err := r.patchOrGetRemoteNodeProviderID(ctx, bkeCluster, node, providerID)
		if err != nil {
			r.logWarningAndEvent(log, bkeCluster, constant.ReconcileErrorReason, "Failed to patch remote node providerID, retry after 5 second : %v", err)
			return ctrl.Result{RequeueAfter: 5 * time.Second}, nil
		}
		// 对于master machine 需要设置证书过期时间注释，来防止cluster-api的证书过期检查崩溃
		if util.IsControlPlaneMachine(machine) {
			config, err := phaseutil.GetMachineAssociateKubeadmConfig(ctx, r.Client, machine)
			if err != nil {
				r.logWarningAndEvent(log, bkeCluster, constant.ReconcileErrorReason, "Failed to get machine %q kubeadm config: %v", utils.ClientObjNS(machine), err)
			}
			if config != nil {
				helper, err := patch.NewHelper(config, r.Client)
				if err == nil {
					// 设置证书过期时间注释
					annotations := config.GetAnnotations()
					if annotations == nil {
						annotations = make(map[string]string)
					}
					// 100 年
					annotations[clusterv1.MachineCertificatesExpiryDateAnnotation] = time.Now().AddDate(100, 0, 0).Format(time.RFC3339)
					config.SetAnnotations(annotations)
					if err := helper.Patch(ctx, config); err != nil {
						r.logWarningAndEvent(log, bkeCluster, constant.ReconcileErrorReason, "Failed to patch kubeadm config: %v", err)
					}
				} else {
					r.logWarningAndEvent(log, bkeCluster, constant.ReconcileErrorReason, "Failed to new kubeadm config patch helper: %v", err)
				}
			}
		}

		if err = r.markBKEMachineBootstrapReady(bkeCluster, bkeMachine, *node, realProviderID, log); err != nil {
			r.logWarningAndEvent(log, bkeCluster, constant.ReconcileErrorReason, "Failed to mark bkeMachine bootstrap ready: %v", err)
			return ctrl.Result{}, nil
		}

		if err = r.reconcileBKEMachine(ctx, bkeCluster, bkeMachine, *node, log); err != nil {
			r.logWarningAndEvent(log, bkeCluster, constant.ReconcileErrorReason, "Failed to reconcile bkeMachine: %v", err)
			return ctrl.Result{}, nil
		}

		// 设置一些必要的注释给machine 通常纳管bke集群时 providerID会与实际生成的不一致，将实际的providerID保存在annotation中
		annotation.SetAnnotation(machine, annotation.BKEMachineProviderIDAnnotationKey, providerID)
		annotation.SetAnnotation(bkeMachine, annotation.BKEMachineProviderIDAnnotationKey, providerID)

		// 设置bkeMachine的label, 用于标识该bkeMachine正在引导,防止被其他machine使用
		labelhelper.SetBKEMachineLabel(bkeMachine, role, node.IP)

		return ctrl.Result{}, nil
	}

	// Start bootstrap
	bootstrapCommand := command.Bootstrap{
		BaseCommand: command.BaseCommand{
			Ctx:             ctx,
			NameSpace:       bkeMachine.Namespace,
			Client:          r.Client,
			Scheme:          r.Scheme,
			OwnerObj:        bkeMachine,
			ClusterName:     bkeCluster.Name,
			Unique:          true,
			RemoveAfterWait: false,
		},
		Node:      node,
		BKEConfig: bkeCluster.Name,
		Phase:     phase,
	}

	if err := bootstrapCommand.New(); err != nil {
		errInfo := "Failed to create bootstrap command"
		log.Errorf("%s: %v", errInfo, err)
		r.Recorder.AnnotatedEventf(bkeCluster, annotation.BKENormalEventAnnotation(), corev1.EventTypeWarning, constant.CommandCreateFailedReason,
			"%s: %v", errInfo, err)
		condition.ConditionMark(bkeCluster, bkev1beta1.TargetClusterBootCondition, confv1beta1.ConditionFalse, constant.CommandCreateFailedReason, errInfo)
		bkeCluster.SetNodeStateWithMessage(node.IP, bkev1beta1.NodeBootStrapFailed, errInfo)

		if err := mergecluster.SyncStatusUntilComplete(r.Client, bkeCluster); err != nil {
			return ctrl.Result{}, err
		}

		return ctrl.Result{}, err
	}

	log.Infof("Create Bootstrap command for node %q succeeded, waiting for the command to be finished", phaseutil.NodeInfo(*node))
	// now we need to set the label for bkeMachine, which means this node is already in used
	labelhelper.SetBKEMachineLabel(bkeMachine, role, node.IP)

	r.logInfoAndEvent(log, bkeCluster, constant.TargetClusterBootingReason, "waiting node %q (role %q) finish bootstrap", phaseutil.NodeInfo(*node), node.Role)

	// then wait Command reconcile by bkeagent until have the Status that we want
	// this controller will catch the Command Status changes
	return ctrl.Result{}, nil
}

// reconcileCommand used to reconcile all the Command created by BkeCluster
func (r *BKEMachineReconciler) reconcileCommand(ctx context.Context, machine *clusterv1.Machine, cluster *clusterv1.Cluster, bkeMachine *bkev1beta1.BKEMachine, bkeCluster *bkev1beta1.BKECluster, log *zap.SugaredLogger) (ctrl.Result, error) {
	patchHelper, err := patch.NewHelper(bkeMachine, r.Client)
	if err != nil {
		return ctrl.Result{}, err
	}
	// Always attempt to Patch the BKEMachine object and Status after each reconciliation.
	defer func(bkeMachine *bkev1beta1.BKEMachine) {
		if !controllerutil.ContainsFinalizer(bkeMachine, bkev1beta1.BKEMachineFinalizer) {
			return
		}
		if err := patchBKEMachine(ctx, patchHelper, bkeMachine); err != nil {
			log.Error("failed to patch demoMachine", err)
			return
		}
	}(bkeMachine)

	commands, err := getBKEMachineAssociateCommands(ctx, r.Client, bkeCluster, bkeMachine)
	if err != nil {
		log.Error(err, "list commands failed")
		return ctrl.Result{}, err
	}
	if commands == nil || len(commands) == 0 {
		return ctrl.Result{}, nil
	}

	var nodes bkenode.Nodes

	n1 := bkenode.Nodes(bkeCluster.Spec.ClusterConfig.Nodes)
	n2 := phaseutil.GetBKENodesFromNodesStatus(bkeCluster.Status.NodesStatus)
	if n1.Length() < n2.Length() {
		nodes = n2
	} else {
		nodes = n1
	}

	hostIp, found := labelhelper.CheckBKEMachineLabel(bkeMachine)
	if !found {
		return ctrl.Result{}, nil
	}

	res := ctrl.Result{}
	errs := []error{}
	for _, cmd := range commands {
		commandNodes := nodes.Filter(bkenode.FilterOptions{"IP": hostIp})
		if len(commandNodes) == 0 || commandNodes == nil {
			// reset command do not need to check node
			if !strings.HasPrefix(cmd.Name, command.ResetNodeCommandNamePrefix) {
				log.Warnf("The node %s in this command %s cannot be matched from the known Nodes", cmd.Spec.NodeName, cmd.Name)
			}
			continue
		}
		currentNode := commandNodes[0]
		complete, successNodes, failedNodes := command.CheckCommandStatus(&cmd)
		switch {
		case strings.HasPrefix(cmd.Name, command.BootstrapCommandNamePrefix):

			r.mux.Lock()
			delete(r.nodesBootRecord, currentNode.IP)
			r.mux.Unlock()

			if bkeMachine.Status.Bootstrapped {
				continue
			}
			if bkeCluster.Status.ClusterStatus == bkev1beta1.ClusterDeleting {
				log.Infof("Cluster %q is deleting, skip bootstrap", bkeCluster.Name)
				continue
			}

			role := bkenode.WorkerNodeRole
			if util.IsControlPlaneMachine(machine) {
				role = bkenode.MasterNodeRole
			}

			if complete && len(failedNodes) > 0 {
				metricrecord.NodeBootstrapFailedCountRecord(bkeCluster)

				r.logWarningAndEvent(log, bkeCluster, constant.NodeBootStrapFailedReason, "Failed to bootstrap node %q", phaseutil.NodeInfo(currentNode))
				output := r.LogCommandFailed(cmd, bkeCluster, failedNodes, log, constant.NodeBootStrapFailedReason)

				bkeCluster.SetNodeStateWithMessage(currentNode.IP, bkev1beta1.NodeBootStrapFailed, output)

				metricrecord.NodeBootstrapDurationRecord(bkeCluster, currentNode, cmd.CreationTimestamp.Time, output)

				conditions.MarkFalse(bkeMachine, bkev1beta1.BootstrapSucceededCondition, constant.NodeBootStrapFailedReason, clusterv1.ConditionSeverityWarning, constant.NodeBootStrapFailedReason, "Bootstrap failed err: %s", output)
				// 忽略该函数的错误，重点是将信息输出
				_ = r.reconcileBKEMachine(ctx, bkeCluster, bkeMachine, currentNode, log)

				annotation.SetAnnotation(&cmd, annotation.CommandReconciledAnnotationKey, "true")
				if err := r.Client.Update(ctx, &cmd); err != nil {
					log.Errorf("failed to mark command reconciled, err: %s", err.Error())
					errs = append(errs, err)
				}

				// 集群master未初始化，后续的部署无法进行
				if !conditions.IsTrue(cluster, clusterv1.ControlPlaneInitializedCondition) {
					// if master not init pause cluster deployment
					r.logWarningAndEvent(log, bkeCluster, constant.TargetClusterBootingFailedReason, "The control plane initialization encountered some errors, which are beyond the control scope of bke, the next deployment cannot proceed, and the cluster deployment has been paused")
					// user can fix the problem in error node, and restart bkeagent to rerun command, then controller can snap command
					r.logWarningAndEvent(log, bkeCluster, constant.TargetClusterBootingFailedReason, "You can check the BKEAgent log on the error node (/var/log/bkeagent.log) and manually resolve the problem. Then restart the BKEAgent on the node")
					r.logWarningAndEvent(log, bkeCluster, constant.TargetClusterBootingFailedReason, "If problem can not be resolved, you can delete the cluster by delete BKECluster resource %q directly", utils.ClientObjNS(bkeCluster))

					return ctrl.Result{}, mergecluster.SyncStatusUntilComplete(r.Client, bkeCluster)
				}

				// 移除bkeMachine 的label,使其能够重新触发bootstrap,master init的label由ensuremasterinit phase里去控制删除
				labelhelper.RemoveBKEMachineLabel(bkeMachine, role)

				// 集群master已初始化，后续的部署可以进行
				r.logWarningAndEvent(log, bkeCluster, constant.TargetClusterBootingFailedReason, "You can check the BKEAgent log on the error node (/var/log/bkeagent.log) and manually resolve the problem. Then restart the BKEAgent on the node")
				r.logWarningAndEvent(log, bkeCluster, constant.TargetClusterBootingFailedReason, "If problem can not be resolved, you can delete the error node config in BKECluster.spec.clusterConfig.Nodes")
				r.logWarningAndEvent(log, bkeCluster, constant.TargetClusterNotReadyReason, "Cluster %q Control plane already init, the next deployment can continue", bkeCluster.Name)

				return ctrl.Result{}, mergecluster.SyncStatusUntilComplete(r.Client, bkeCluster)

			}

			// bootstrapCommand only contains one node
			if complete && len(failedNodes) == 0 && len(successNodes) == 1 {
				// check provider ID
				r.logInfoAndEvent(log, bkeCluster, constant.TargetClusterBootingReason, "Attempting to connect to target cluster node %q, please wait", phaseutil.NodeInfo(currentNode))
				err = wait.PollImmediate(5*time.Second, 4*time.Minute, func() (bool, error) {
					bkeCluster, err = mergecluster.GetCombinedBKECluster(ctx, r.Client, bkeCluster.Namespace, bkeCluster.Name)
					if err != nil {
						return false, err
					}
					if bkeCluster.Status.ClusterStatus == bkev1beta1.ClusterDeleting {
						return true, nil
					}
					if err := r.checkTargetClusterNode(ctx, bkeCluster, cluster, machine, currentNode, log); err != nil {
						log.Warnf("(ignore) Failed to check target cluster node %q, retrying...,err: %s", phaseutil.NodeInfo(currentNode), err.Error())
						return false, nil
					}
					return true, nil
				})
				if err != nil {
					errInfo := fmt.Sprintf("4 Minute after, the target cluster node %q is still unavailable,err: %s", phaseutil.NodeInfo(currentNode), err.Error())
					bkeCluster.SetNodeStateWithMessage(currentNode.IP, bkev1beta1.NodeBootStrapFailed, errInfo)
					r.logErrorAndEvent(log, bkeCluster, constant.NodeBootStrapFailedReason, errInfo)
					r.logErrorAndEvent(log, bkeCluster, constant.NodeBootStrapFailedReason, "It seems that the kubernetes component of node %q did not start properly", phaseutil.NodeInfo(currentNode))
					condition.ConditionMark(bkeCluster, bkev1beta1.TargetClusterBootCondition, confv1beta1.ConditionFalse, constant.NodeBootStrapFailedReason, errInfo)
					res = util.LowestNonZeroResult(res, ctrl.Result{RequeueAfter: 5 * time.Second})
					if err := mergecluster.SyncStatusUntilComplete(r.Client, bkeCluster); err != nil {
						log.Errorf("mergecluster.SyncStatusUntilComplete failed, err: %s", err.Error())
						res = util.LowestNonZeroResult(res, ctrl.Result{RequeueAfter: 5 * time.Second})
					}
					continue
				}
				if bkeCluster.Status.ClusterStatus == bkev1beta1.ClusterDeleting {
					return ctrl.Result{}, nil
				}
				providerID := phaseutil.GenerateProviderID(bkeCluster, currentNode)
				if err := r.markBKEMachineBootstrapReady(bkeCluster, bkeMachine, currentNode, providerID, log); err != nil {
					errs = append(errs, err)
				}

				metricrecord.NodeBootstrapSuccessCountRecord(bkeCluster)
				metricrecord.NodeBootstrapDurationRecord(bkeCluster, currentNode, cmd.CreationTimestamp.Time, "success")

				annotation.SetAnnotation(&cmd, annotation.CommandReconciledAnnotationKey, "true")
				if err := r.Client.Update(ctx, &cmd); err != nil {
					log.Errorf("failed to mark command reconciled, err: %s", err.Error())
					errs = append(errs, err)
				}

				if err = r.reconcileBKEMachine(ctx, bkeCluster, bkeMachine, currentNode, log); err != nil {
					errs = append(errs, err)
				}
			}

		case strings.HasPrefix(cmd.Name, command.ResetNodeCommandNamePrefix):
			if commandNodes == nil || len(commandNodes) == 0 {
				controllerutil.RemoveFinalizer(bkeMachine, bkev1beta1.BKEMachineFinalizer)
				if err := patchBKEMachine(ctx, patchHelper, bkeMachine); err != nil {
					log.Error("failed to patch demoMachine", err)
				}
				return ctrl.Result{Requeue: true}, nil
			}
			if complete && len(failedNodes) == 0 && len(successNodes) == 1 {

				bkeCluster.RemoveNodeState(currentNode.IP)
				// sync bkeCluster Status
				if err := mergecluster.SyncStatusUntilComplete(r.Client, bkeCluster); err != nil {
					log.Errorf("failed to sync bkeCluster Status: %v", err)
					res = util.LowestNonZeroResult(res, ctrl.Result{RequeueAfter: 5 * time.Second})
					continue
				}
				controllerutil.RemoveFinalizer(bkeMachine, bkev1beta1.BKEMachineFinalizer)
				if err := patchBKEMachine(ctx, patchHelper, bkeMachine); err != nil {
					log.Error("failed to patch demoMachine", err)
				}
				return ctrl.Result{Requeue: true}, nil
			}
			// todo: add more command here
			// case
		}
	}
	return res, kerrors.NewAggregate(errs)
}

// reconcileBKEMachine used to reconcile all the BKEMachine Status
func (r *BKEMachineReconciler) reconcileBKEMachine(ctx context.Context, bkeCluster *bkev1beta1.BKECluster, bkemachine *bkev1beta1.BKEMachine, n confv1beta1.Node, log *zap.SugaredLogger) error {

	if condition.HasConditionStatus(bkev1beta1.TargetClusterBootCondition, bkeCluster, confv1beta1.ConditionTrue) {
		return nil
	}

	nodes := bkenode.Nodes(bkeCluster.Spec.ClusterConfig.Nodes)

	bkeMachines, err := phaseutil.GetBKEClusterAssociateBKEMachines(ctx, r.Client, bkeCluster)
	if err != nil {
		return err
	}

	bkeMachineNum := len(bkeMachines)
	nodesNum := nodes.Length()

	bootStrapNodeFailed := false

	// bkeMachines.len 为0 或者 bkeMachines.len != Nodes.length 集群没有引导结束
	clusterReady := bkeMachineNum != 0 && bkeMachineNum == nodesNum
	for i, bm := range bkeMachines {
		if bm.Name == bkemachine.Name {
			bm = *bkemachine
			bkeMachines[i] = *bkemachine
		}

		if conditions.GetReason(&bm, bkev1beta1.BootstrapSucceededCondition) == constant.NodeBootStrapFailedReason {
			bootStrapNodeFailed = true
		}
		if !bm.Status.Bootstrapped {
			clusterReady = false
			break
		}
	}
	log.Debugf("bkeMachineNum: %d, nodesNum: %d, clusterReady: %v", bkeMachineNum, nodesNum, clusterReady)

	BkeMachineExceptNum := nodesNum
	failedBootNodeNum, successBootNodeNum := phaseutil.CalculateBKEMachineBootNum(bkeMachines)
	allBootFlag := failedBootNodeNum+successBootNodeNum == BkeMachineExceptNum

	if allBootFlag {
		bkeCluster.Status.KubernetesVersion = bkeCluster.Spec.ClusterConfig.Cluster.KubernetesVersion
		if err := mergecluster.SyncStatusUntilComplete(r.Client, bkeCluster); err != nil {
			log.Error("failed to patch BKECluster", err)
			return err
		}
	} else {
		r.logInfoAndEvent(log, bkeCluster, constant.TargetClusterBootingReason, "Waiting for Cluster-API to reconcile")
	}

	if bootStrapNodeFailed && allBootFlag {
		condition.ConditionMark(bkeCluster, bkev1beta1.TargetClusterBootCondition, confv1beta1.ConditionFalse, constant.NodeBootStrapFailedReason, "")
		r.logWarningAndEvent(log, bkeCluster, constant.NodeBootStrapFailedReason, "The target cluster %q has finished booting, but some node bootstrap failed", bkeCluster.Name)
		return nil
	}

	if clusterReady && allBootFlag {
		condition.ConditionMark(bkeCluster, bkev1beta1.TargetClusterBootCondition, confv1beta1.ConditionTrue, constant.TargetClusterBootReadyReason, "")
		r.logInfoAndEvent(log, bkeCluster, constant.TargetClusterBootReadyReason, "The target cluster %q has finished booting", bkeCluster.Name)

		metricrecord.ClusterBootstrapDurationRecord(bkeCluster)

		r.logInfoAndEvent(log, bkeCluster, constant.TargetClusterBootReadyReason, "The target cluster %q has finished booting", bkeCluster.Name)
		if err := mergecluster.SyncStatusUntilComplete(r.Client, bkeCluster); err != nil {
			log.Error("failed to patch BKECluster", err)
			return err
		}
		return nil
	}

	condition.ConditionMark(bkeCluster, bkev1beta1.TargetClusterBootCondition, confv1beta1.ConditionFalse, constant.TargetClusterBootingReason, fmt.Sprintf("bootstrap node %q", phaseutil.NodeInfo(n)))
	if err := mergecluster.SyncStatusUntilComplete(r.Client, bkeCluster); err != nil {
		log.Error("failed to patch BKECluster", err)
		return err
	}
	return nil
}

// filterAvailableNode return the first available node found
func (r *BKEMachineReconciler) filterAvailableNode(ctx context.Context, roleNodes bkenode.Nodes, bkeCluster *bkev1beta1.BKECluster, phase confv1beta1.BKEClusterPhase) (*confv1beta1.Node, error) {
	r.mux.Lock()
	defer r.mux.Unlock()

	if phase == bkev1beta1.InitControlPlane {
		r.nodesBootRecord[roleNodes[0].IP] = struct{}{}
		return &roleNodes[0], nil
	}

	bkeMachineList := &bkev1beta1.BKEMachineList{}
	var availableNode *confv1beta1.Node

	if err := r.Client.List(ctx, bkeMachineList, phaseutil.GetListFiltersByBKECluster(bkeCluster)...); err != nil {
		return nil, err
	}

	for _, node := range roleNodes {
		// 此举用来在同时boot多个worker节点时，防止一个node被多个worker分配，因为BKEMachineLabel可能会来不及打上
		if _, ok := r.nodesBootRecord[node.IP]; ok {
			continue
		}

		nodeBind := false
		// 查看当前节点是否已经被分配
		for _, bkeMachine := range bkeMachineList.Items {
			if v, ok := labelhelper.CheckBKEMachineLabel(&bkeMachine); ok && v == node.IP {
				nodeBind = true
				break
			}
		}
		// 如果没有被分配则返回该节点
		if !nodeBind {
			availableNode = &node
			r.nodesBootRecord[node.IP] = struct{}{}
			break
		}
	}

	if availableNode == nil {
		return nil, errors.New("no available node")
	}

	return availableNode, nil
}

// checkTargetClusterNode use the remote client to check the target cluster node with provideID has been created
// set the node taint and role label
func (r *BKEMachineReconciler) checkTargetClusterNode(ctx context.Context, bkeCluster *bkev1beta1.BKECluster, cluster *clusterv1.Cluster, machine *clusterv1.Machine, currentNode confv1beta1.Node, log *zap.SugaredLogger) error {
	targetClusterClient, err := kube.NewRemoteClusterClient(ctx, r.Client, bkeCluster)
	if err != nil {
		return err
	}
	providerID := phaseutil.GenerateProviderID(bkeCluster, currentNode)

	nodeList := &corev1.NodeList{}

	if err = targetClusterClient.List(ctx, nodeList); err != nil {
		return err
	}

	// 当节点是独立master角色节点时需要cordon
	// 当节点是master/node角色节点时不需要cordon, 但是需要打上master node角色标签
	// 当节点是node角色节点时不需要cordon, 但是需要打上node标签
	node := bkenode.Node(currentNode)
	cordon := false
	// 注解是否允许cordon，默认允许
	annotationCordonFlag := true
	nodeRole := node.Role[0]
	if node.IsMasterWorker() {
		nodeRole = bkenode.MasterWorkerNodeRole
		cordon = false
	}
	if node.IsMaster() {
		nodeRole = bkenode.MasterNodeRole
		cordon = true
	}
	if node.IsWorker() {
		nodeRole = bkenode.WorkerNodeRole
	}
	// 如果是master节点，且注解不允许cordon，则不cordon
	if v, ok := annotation.HasAnnotation(bkeCluster, annotation.MasterSchedulableAnnotationKey); ok && v == "true" {
		annotationCordonFlag = false
	}

	// check providerID already exists or not
	if len(nodeList.Items) == 0 {
		// If for whatever reason the index isn't registered or available, we fall back to loop over the whole list.
		nl := corev1.NodeList{}
		for {
			if err := targetClusterClient.List(ctx, &nl, client.Continue(nl.Continue)); err != nil {
				return err
			}
			for _, n := range nl.Items {
				if n.Spec.ProviderID == "" {
					continue
				}
				if providerID == n.Spec.ProviderID {
					if cordon && annotationCordonFlag {
						if err := r.cordonMasterNode(ctx, bkeCluster, &n, log); err != nil {
							log.Warnf("failed to cordon master node %q", n.Name)
						}
					}

					if err := setTargetClusterNodeRole(ctx, targetClusterClient, &n, nodeRole); err != nil {
						return errors.Wrapf(err, "failed patch target cluster node, providerID %q", providerID)
					}
					if err := MockKubeadmConfigConfigmap(ctx, targetClusterClient); err != nil {
						return errors.Wrap(err, "failed to create mock kubeadm-config configmap")
					}
					if err := MockKubeletConfigConfigmap(ctx, targetClusterClient, bkeCluster.Spec.ClusterConfig.Cluster.KubernetesVersion); err != nil {
						return errors.Wrap(err, "failed to create mock kubelet-config configmap")
					}
					// len(nodeList.Items) = 1 means the providerID is already success registered
					return nil
				}
			}
			if nl.Continue == "" {
				break
			}
		}
		return errors.Errorf("could not find node with providerID %s ", providerID)
	}

	ns := []corev1.Node{}
	for _, n := range nodeList.Items {
		if n.Spec.ProviderID == "" {
			continue
		}
		if providerID == n.Spec.ProviderID {
			ns = append(ns, n)
		}
	}
	if len(ns) != 1 {
		return errors.Errorf("unexpectedly found more than one Node matching the providerID %s", providerID)
	}

	// len(nodeList.Items) = 1 means the providerID is already success registered
	if cordon && annotationCordonFlag {
		if err := r.cordonMasterNode(ctx, bkeCluster, &ns[0], log); err != nil {
			log.Warnf("failed to cordon master node %q", &ns[0].Name)
		}
	}

	if err := setTargetClusterNodeRole(ctx, targetClusterClient, &ns[0], nodeRole); err != nil {
		return errors.Wrapf(err, "failed patch target cluster node, providerID %q", providerID)
	}

	if err := MockKubeadmConfigConfigmap(ctx, targetClusterClient); err != nil {
		return errors.Wrap(err, "failed to create mock kubeadm-config configmap")
	}
	if err := MockKubeletConfigConfigmap(ctx, targetClusterClient, bkeCluster.Spec.ClusterConfig.Cluster.KubernetesVersion); err != nil {
		return errors.Wrap(err, "failed to create mock kubelet-config configmap")
	}
	return nil

}

// markBKEMachineBootstrapReady marks the BKEMachine as ready and bootstrapped
// sets the providerID and addresses on the BKEMachine
// sets the Ready condition to true.
// sets the NodeRef on the BKEMachine.
// add node info to BKECluster.Status.ClusterStatus.Nodes
func (r *BKEMachineReconciler) markBKEMachineBootstrapReady(bkeCluster *bkev1beta1.BKECluster, bkeMachine *bkev1beta1.BKEMachine, assocNode confv1beta1.Node, providerID string, log *zap.SugaredLogger) error {
	// set MachineAddress
	setMachineAddress(bkeMachine, assocNode)
	// set providerID
	setProviderID(bkeMachine, providerID)
	bkeMachine.Status.Ready = true
	bkeMachine.Status.Bootstrapped = true
	conditions.MarkTrue(bkeMachine, bkev1beta1.BootstrapSucceededCondition)

	r.logInfoAndEvent(log, bkeCluster, constant.TargetClusterBootingReason, "node %q, role %v bootstrap succeeded", phaseutil.NodeInfo(assocNode), assocNode.Role)

	bkeCluster.MarkNodeStateFlag(assocNode.IP, bkev1beta1.NodeBootFlag)
	bkeCluster.SetNodeStateWithMessage(assocNode.IP, bkev1beta1.NodeNotReady, "Bootstrap Succeeded")

	if err := mergecluster.SyncStatusUntilComplete(r.Client, bkeCluster); err != nil {
		log.Errorf("failed to update bkeCluster Status, err: %s", err.Error())
		return errors.Errorf("failed to update bkeCluster Status, err: %s", err.Error())
	}
	return nil
}

// patchRemoteNodeProviderID patch remote node providerID
// if remote node providerID is not equal to providerID, patch it
func (r *BKEMachineReconciler) patchOrGetRemoteNodeProviderID(ctx context.Context, bkeCluster *bkev1beta1.BKECluster, node *confv1beta1.Node, providerID string) (string, error) {
	// patch remote node providerID
	targetClusterClient, err := kube.NewRemoteClusterClient(ctx, r.Client, bkeCluster)
	if err != nil {
		return "", errors.Wrap(err, "failed to create target cluster client")
	}

	if err = MockKubeadmConfigConfigmap(ctx, targetClusterClient); err != nil {
		return "", errors.Wrap(err, "failed to create mock kubeadm-config configmap")
	}
	if err = MockKubeletConfigConfigmap(ctx, targetClusterClient, bkeCluster.Spec.ClusterConfig.Cluster.KubernetesVersion); err != nil {
		return "", errors.Wrap(err, "failed to create mock kubelet-config configmap")
	}

	// get remote node
	remoteNode := &corev1.Node{}
	if err = targetClusterClient.Get(ctx, client.ObjectKey{Name: node.Hostname}, remoteNode); err != nil {
		return "", errors.Errorf("failed to get remote node %q", phaseutil.NodeInfo(*node))
	}
	p := client.MergeFrom(remoteNode.DeepCopy())

	bocVersion, found := os.LookupEnv(constant.BocVersionEnvKey)
	if !found {
		bocVersion = constant.DefaultBocVersion
	}
	labelhelper.SetBocVersionLabel(remoteNode, bocVersion)
	if remoteNode.Spec.ProviderID == "" {
		remoteNode.Spec.ProviderID = providerID
	}
	if err := targetClusterClient.Patch(ctx, remoteNode, p); err != nil {
		return "", errors.Errorf("failed to patch remote node %q,err: %v", phaseutil.NodeInfo(*node), err)
	}

	return remoteNode.Spec.ProviderID, nil

}

// setMachineAddress gets the address from the node and sets it on the BKEMachine object.
func setMachineAddress(bkeMachine *bkev1beta1.BKEMachine, node confv1beta1.Node) {
	bkeMachine.Status.Addresses = []bkev1beta1.MachineAddress{
		{
			Type:    bkev1beta1.MachineHostName,
			Address: node.Hostname,
		},
		{
			Type:    bkev1beta1.MachineInternalIP,
			Address: node.IP,
		},
		{
			Type:    bkev1beta1.MachineExternalIP,
			Address: node.IP,
		},
	}
}

// setProviderID sets the providerID on the BKEMachine.spec.providerID
func setProviderID(bkeMachine *bkev1beta1.BKEMachine, providerID string) {
	bkeMachine.Spec.ProviderID = &providerID
}

// recordBootstrapPhaseEvent records the bootstrap phase events for the BKEMachine
func (r *BKEMachineReconciler) recordBootstrapPhaseEvent(cluster *clusterv1.Cluster, bkeCluster *bkev1beta1.BKECluster, node *confv1beta1.Node, phase confv1beta1.BKEClusterPhase, log *zap.SugaredLogger) error {
	finalPhase := phase

	switch phase {
	case bkev1beta1.JoinWorker:
		if !clusterutil.FullyControlled(bkeCluster) {
			finalPhase = bkev1beta1.FakeJoinWorker
		}
	case bkev1beta1.JoinControlPlane:
		if !clusterutil.FullyControlled(bkeCluster) {
			finalPhase = bkev1beta1.FakeJoinControlPlane
		}
	case bkev1beta1.InitControlPlane:
		if !clusterutil.FullyControlled(bkeCluster) {
			finalPhase = bkev1beta1.FakeInitControlPlane
		}
	}
	r.logInfoAndEvent(log, bkeCluster, constant.TargetClusterBootingReason, "cluster %q in the %s phase, node %q (host: %q, role %q)", cluster.Name, finalPhase, node.Hostname, node.IP, node.Role)
	return nil
}

// locker is a struct that holds the lock information,
// cloned from cluster-api/bootstrap/kubeadm/internal/locking/control_plane_init_mutex.go information struct
type locker struct {
	MachineName string `json:"machineName"`
}

const lockKey = "lock-information"

// getBootstrapPhase returns the deployment phase by viewing the lock information generated by clusterAPI
func (r *BKEMachineReconciler) getBootstrapPhase(ctx context.Context, machine *clusterv1.Machine, cluster *clusterv1.Cluster) (confv1beta1.BKEClusterPhase, error) {
	if !util.IsControlPlaneMachine(machine) {
		return bkev1beta1.JoinWorker, nil
	}

	cmLock := &corev1.ConfigMap{}
	key := client.ObjectKey{
		Namespace: cluster.Namespace,
		Name:      fmt.Sprintf("%s-lock", cluster.Name),
	}
	if err := r.Client.Get(ctx, key, cmLock); err != nil {
		if apierrors.IsNotFound(err) {
			if util.IsControlPlaneMachine(machine) {
				if conditions.IsFalse(cluster, clusterv1.ControlPlaneInitializedCondition) {
					return bkev1beta1.InitControlPlane, nil
				}
				return bkev1beta1.JoinControlPlane, nil
			} else {
				return bkev1beta1.JoinWorker, nil
			}
		}
		return "", errors.Errorf("failed to get the lock configmap %s", key.String())
	}
	if cmLock.Data == nil {
		return "", errors.Errorf("lock data is nil,lock configmap %s", cmLock.Name)
	}
	l := &locker{}
	if err := json.Unmarshal([]byte(cmLock.Data[lockKey]), l); err != nil {
		return "", errors.Wrapf(err, "failed to unmarshal lock information")
	}
	if l.MachineName == machine.Name {
		return bkev1beta1.InitControlPlane, nil
	} else {
		return bkev1beta1.JoinControlPlane, nil
	}
}

func (r *BKEMachineReconciler) cordonMasterNode(ctx context.Context, bkeCluster *bkev1beta1.BKECluster, node *corev1.Node, log *zap.SugaredLogger) error {
	remoteClient, err := kube.NewRemoteClientByBKECluster(ctx, r.Client, bkeCluster)
	if err != nil {
		return err
	}
	cs, _ := remoteClient.KubeClient()
	bkeLogger := bkev1beta1.NewBKELogger(log, r.Recorder, bkeCluster)
	drainer := phaseutil.NewDrainer(ctx, cs, nil, false, bkeLogger)
	return kubedrain.RunCordonOrUncordon(drainer, node, true)
}

// setTargetClusterNodeRole patches the node with the given taints,and set role label for the target cluster node
func setTargetClusterNodeRole(ctx context.Context, c client.Client, node *corev1.Node, nodeRole string) error {
	switch nodeRole {
	case bkenode.WorkerNodeRole:
		labelhelper.SetWorkerRoleLabel(node)
	case bkenode.MasterNodeRole:
		labelhelper.SetMasterRoleLabel(node)
	case bkenode.MasterWorkerNodeRole:
		labelhelper.SetMasterRoleLabel(node)
		labelhelper.SetWorkerRoleLabel(node)
	}
	bocVersion, found := os.LookupEnv(constant.BocVersionEnvKey)
	if !found {
		bocVersion = constant.DefaultBocVersion
	}
	labelhelper.SetBocVersionLabel(node, bocVersion)
	if err := c.Update(ctx, node); err != nil {
		return err
	}
	return nil
}

// MockKubeadmConfigConfigmap mocks the kubeadm-config configmap in the target cluster
// This is used to avoid the cluster-api control-plane provider to fail when trying to fetch the kubeadm-config configmap
func MockKubeadmConfigConfigmap(ctx context.Context, c client.Client) error {
	mockCM := &corev1.ConfigMap{
		ObjectMeta: metav1.ObjectMeta{
			Name:      constant.KubeadmConfigKey,
			Namespace: metav1.NamespaceSystem,
		},
		Data: map[string]string{
			"ClusterStatus":        constant.MockData,
			"ClusterConfiguration": constant.MockData,
		},
	}

	if err := c.Create(ctx, mockCM); err != nil {
		if apierrors.IsAlreadyExists(err) {
			return c.Update(ctx, mockCM)
		}
		return err
	}
	return nil
}

// minVerUnversionedKubeletConfig is the minimum Kubernetes version that supports the unversioned kubelet configmap.
var minVerUnversionedKubeletConfig = semver.MustParse("1.24.0")

const (
	// UnversionedKubeletConfigMapName defines base kubelet configuration ConfigMap for kubeadm >= 1.24.
	UnversionedKubeletConfigMapName = "kubelet-config"
	// KubeletConfigMapName defines base kubelet configuration ConfigMap name for kubeadm < 1.24.
	KubeletConfigMapName = "kubelet-config-%d.%d"
)

// MockKubeletConfigConfigmap creates a mock kubelet ConfigMap for a given Kubernetes version in the target cluster.
// This is used to avoid the cluster-api control-plane provider to fail when trying to fetch the kubelet ConfigMap.
func MockKubeletConfigConfigmap(ctx context.Context, c client.Client, currentVersion string) error {
	v, err := version.ParseMajorMinorPatch(currentVersion)
	if err != nil {
		return err
	}
	name := generateKubeletConfigName(v)
	mockCM := &corev1.ConfigMap{
		ObjectMeta: metav1.ObjectMeta{
			Name:      name,
			Namespace: metav1.NamespaceSystem,
		},
		Data: map[string]string{
			"kubelet": constant.MockData,
		},
	}
	if err := c.Create(ctx, mockCM); err != nil {
		if apierrors.IsAlreadyExists(err) {
			return c.Update(ctx, mockCM)
		}
		return err
	}
	return nil
}

// generateKubeletConfigName returns the name of the kubelet ConfigMap for a given Kubernetes version.
// used in MockKubeletConfigConfigmap which is created by kubeadm in the target cluster
func generateKubeletConfigName(version semver.Version) string {
	majorMinor := semver.Version{Major: version.Major, Minor: version.Minor}
	if majorMinor.GTE(minVerUnversionedKubeletConfig) {
		return UnversionedKubeletConfigMapName
	}
	return fmt.Sprintf(KubeletConfigMapName, version.Major, version.Minor)
}
