package phases

import (
	"context"
	"fmt"
	"strings"
	"time"

	"github.com/pkg/errors"
	confv1beta1 "gopkg.openfuyao.cn/bkecommon/cluster/api/v1beta1"
	apierrors "k8s.io/apimachinery/pkg/api/errors"
	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
	"k8s.io/apimachinery/pkg/util/wait"
	"k8s.io/utils/pointer"
	clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
	"sigs.k8s.io/cluster-api/util"
	ctrl "sigs.k8s.io/controller-runtime"

	bkev1beta1 "gopkg.openfuyao.cn/cluster-api-provider-bke/api/v1beta1"
	"gopkg.openfuyao.cn/cluster-api-provider-bke/pkg/kube"
	"gopkg.openfuyao.cn/cluster-api-provider-bke/pkg/mergecluster"
	"gopkg.openfuyao.cn/cluster-api-provider-bke/pkg/phaseframe"
	"gopkg.openfuyao.cn/cluster-api-provider-bke/pkg/phaseframe/phaseutil"
	"gopkg.openfuyao.cn/cluster-api-provider-bke/pkg/statusmanage"
	"gopkg.openfuyao.cn/cluster-api-provider-bke/utils"
	"gopkg.openfuyao.cn/cluster-api-provider-bke/utils/constant"
)

const (
	EnsureMasterDeleteName confv1beta1.BKEClusterPhase = "EnsureMasterDelete"
)

type EnsureMasterDelete struct {
	phaseframe.BasePhase
	machinesAndNodesToDelete     map[string]phaseutil.MachineAndNode
	machinesAndNodesToWaitDelete map[string]phaseutil.MachineAndNode
}

func NewEnsureMasterDelete(ctx *phaseframe.PhaseContext) phaseframe.Phase {
	base := phaseframe.NewBasePhase(ctx, EnsureMasterDeleteName)
	return &EnsureMasterDelete{
		BasePhase:                    base,
		machinesAndNodesToWaitDelete: make(map[string]phaseutil.MachineAndNode),
		machinesAndNodesToDelete:     make(map[string]phaseutil.MachineAndNode),
	}
}

func (e *EnsureMasterDelete) Execute() (_ ctrl.Result, err error) {
	if err = e.reconcileMasterDelete(); err != nil {
		return ctrl.Result{}, err
	}
	return ctrl.Result{}, e.waitMasterDelete()
}

func (e *EnsureMasterDelete) NeedExecute(old *bkev1beta1.BKECluster, new *bkev1beta1.BKECluster) (needExecute bool) {
	if !e.BasePhase.DefaultNeedExecute(old, new) {
		return false
	}

	// 如果有节点需要删除，需要执行
	nodes := phaseutil.GetNeedDeleteMasterNodes(new)
	if nodes.Length() == 0 {
		return false
	}
	e.SetStatus(bkev1beta1.PhaseWaiting)
	return true
}

func (e *EnsureMasterDelete) reconcileMasterDelete() error {
	// todo 同步worker删除的逻辑
	ctx, c, bkeCluster, _, log := e.Ctx.Untie()
	nodes := phaseutil.GetNeedDeleteMasterNodes(bkeCluster)

	log.Info(constant.MasterDeletingReason, "Start delete master nodes process")
	log.Info(constant.MasterDeletingReason, "Check whether the node has been associated with a Machine to avoid duplicate deletion")

	nodesInfos := []string{}
	nodesCount := 0
	// machineName -> machineAndNode
	machineToNodeDeleteMap := map[string]phaseutil.MachineAndNode{}
	machineToNodeWaitDeleteMap := map[string]phaseutil.MachineAndNode{}

	// 遍历所有需要删除的节点，如果节点已经被删除了，需要从status中删除
	log.Debug("get associated machine for nodes, to avoid duplicate deletion")
	for _, node := range nodes {
		// 正常应该是能够找到machine的，如果找不到，说明节点已经被删除了，需要从status中删除
		if machine, err := phaseutil.NodeToMachine(ctx, c, bkeCluster, node); err != nil {
			log.Warn(constant.MasterDeletedReason, "Node %s has not been associated with a Machine, skip delete it", phaseutil.NodeInfo(node))
			// 如果节点已经在status中，但是没有关联machine，需要从status中删除
			bkeCluster.RemoveNodeState(node.IP)
			// 将节点从状态管理器中删除
			statusmanage.BKEClusterStatusManager.RemoveSingleNodeStatusCache(bkeCluster, node.IP)
			// remove node from AppointmentDeletedNodesAnnotationKey
			patchFunc := func(cluster *bkev1beta1.BKECluster) {
				phaseutil.RemoveAppointmentDeletedNodes(cluster, node.IP)
			}
			if err = mergecluster.SyncStatusUntilComplete(c, bkeCluster, patchFunc); err != nil {
				log.Error(constant.MasterJoinedReason, "Sync status failed. err: %v", err)
				return err
			}
		} else {
			if machine.Status.Phase == string(clusterv1.MachinePhaseDeleting) {
				log.Info(constant.WorkerDeletedReason, "node %s is in deleting phase, skip delete it", phaseutil.NodeInfo(node))
				machineToNodeWaitDeleteMap[machine.Name] = phaseutil.MachineAndNode{Machine: machine, Node: node}
				continue
			}
			if machine.Status.Phase == string(clusterv1.MachinePhaseDeleted) {
				log.Info(constant.WorkerDeletedReason, "node %s is in deleted phase, skip delete it", phaseutil.NodeInfo(node))
				continue
			}
			nodesInfos = append(nodesInfos, phaseutil.NodeInfo(node))
			nodesCount++
			machineToNodeDeleteMap[machine.Name] = phaseutil.MachineAndNode{
				Machine: machine,
				Node:    node,
			}
		}
	}

	// for wait
	e.machinesAndNodesToWaitDelete = machineToNodeWaitDeleteMap

	// 如果没有需要删除的节点，直接返回
	if nodesCount == 0 {
		log.Info(constant.MasterDeletedReason, "No master nodes need to be deleted")
		return nil
	}
	log.Info(constant.MasterDeletingReason, "%d nodes need to deleted, nodes: %v", nodesCount, strings.Join(nodesInfos, ", "))
	log.Debug("get kubeadm control plane")
	scope, err := phaseutil.GetClusterAPIAssociateObjs(ctx, c, e.Ctx.Cluster)
	if err != nil || scope.KubeadmControlPlane == nil {
		log.Error(constant.MasterDeleteFailedReason, "Get cluster-api associate objs failed. err: %v", err)
		// cluster api object error, no need to continue
		return err
	}

	// 暂停 KubeadmControlPlane的运行，以便我们能设置注释指定删除某些节点
	log.Debug("pause kubeadm control plane")
	if err = phaseutil.PauseClusterAPIObj(ctx, c, scope.KubeadmControlPlane); err != nil {
		log.Error(constant.MasterDeleteFailedReason, "Pause KubeadmControlPlane failed. err: %v", err)
		return err
	}
	log.Info(constant.MasterDeletingReason, "Pause KubeadmControlPlane success")

	specCopy := scope.KubeadmControlPlane.Spec.DeepCopy()
	currentReplicas := specCopy.Replicas
	// 如果节点加入过程中出现异常，需要将节点数量恢复到加入前的状态
	defer func() {
		if err != nil {
			log.Info(constant.MasterDeleteFailedReason, "Scale up KubeadmControlPlane replicas to %d.", currentReplicas)
			scope.KubeadmControlPlane.Spec.Replicas = currentReplicas
			if err = phaseutil.ResumeClusterAPIObj(ctx, c, scope.KubeadmControlPlane); err != nil {
				log.Error(constant.MasterDeleteFailedReason, "Rollback KubeadmControlPlane replicas failed. err: %v", err)
			}
		}
	}()

	// 标记需要删除的节点关联的machine
	log.Debug("mark machine for deletion")
	for _, machineAndNode := range machineToNodeDeleteMap {
		machine := machineAndNode.Machine
		if err = phaseutil.MarkMachineForDeletion(ctx, c, machine); err != nil {
			log.Error(constant.MasterDeleteFailedReason, "Can't delete node %s", phaseutil.NodeInfo(machineAndNode.Node))
			log.Error(constant.MasterDeleteFailedReason, "Mark machine %s for deletion failed. err: %v", utils.ClientObjNS(machine), err)
			delete(machineToNodeDeleteMap, machine.Name)
		}
	}

	// 如果没有需要删除的节点，直接返回
	if len(machineToNodeDeleteMap) == 0 {
		log.Info(constant.MasterDeleteFailedReason, "Some nodes cannot be completely deleted")
		return nil
	}
	e.machinesAndNodesToDelete = machineToNodeDeleteMap

	// 缩容KubeadmControlPlane的副本数，以便删除节点
	exceptReplicas := *currentReplicas - int32(len(machineToNodeDeleteMap))
	// 无论如何，副本数不能小于1
	if exceptReplicas < 1 {
		exceptReplicas = 1
	}
	scope.KubeadmControlPlane.Spec.Replicas = &exceptReplicas

	log.Info(constant.MasterDeletingReason, "Scale down KubeadmControlPlane replicas to %d.", exceptReplicas)

	// 重新启动并更新KubeadmControlPlane副本数
	if err = phaseutil.ResumeClusterAPIObj(ctx, c, scope.KubeadmControlPlane); err != nil {
		log.Error(constant.MasterJoinFailedReason, "Scale down KubeadmControlPlane replicas failed. err: %v", err)
		// cluster api object error, no need to continue
		return err
	}

	return nil
}

// waitMasterDelete wait for master node to be deleted.
// todo 这块代码与隔壁waitWorkerDelete几乎一样，单独抽出来作为一个函数调用
func (e *EnsureMasterDelete) waitMasterDelete() error {
	machinesAndNodesToWaitDelete := e.machinesAndNodesToWaitDelete
	if len(e.machinesAndNodesToDelete) != 0 && e.machinesAndNodesToDelete != nil {
		for k, v := range e.machinesAndNodesToDelete {
			machinesAndNodesToWaitDelete[k] = v
		}
	}
	if len(machinesAndNodesToWaitDelete) == 0 || machinesAndNodesToWaitDelete == nil {
		return nil
	}

	ctx, c, bkeCluster, _, log := e.Ctx.Untie()
	successDeletedNode := map[string]confv1beta1.Node{}
	ctxTimeout, cancel := context.WithTimeout(ctx, 4*time.Minute)
	defer cancel()
	err := wait.PollImmediateUntil(2*time.Second, func() (done bool, err error) {
		for machineName, machineWithNode := range machinesAndNodesToWaitDelete {
			if _, ok := successDeletedNode[machineName]; ok {
				continue
			}
			machine := machineWithNode.Machine
			if err = c.Get(ctx, util.ObjectKey(machine), machine); err != nil {
				if apierrors.IsNotFound(err) {
					log.Info(constant.MasterDeleteSucceedReason, "Machine %s has been deleted", utils.ClientObjNS(machine))
					successDeletedNode[machineName] = machineWithNode.Node
					continue
				}
				log.Error(constant.MasterDeleteFailedReason, "Get machine %s failed. err: %v", utils.ClientObjNS(machine), err)
				return false, err
			}
		}
		if len(successDeletedNode) != len(machinesAndNodesToWaitDelete) {
			return false, nil
		}
		return true, nil
	}, ctxTimeout.Done())

	if errors.Is(err, wait.ErrWaitTimeout) {
		return errors.Errorf("Wait master node delete failed")
	}
	if err != nil {
		return err
	}
	log.Info(constant.MasterDeleteSucceedReason, "Master nodes delete success")

	if len(successDeletedNode) != 0 {
		log.Info(constant.MasterDeletedReason, "Attempt to clean the legacy daemonset pod of the removed node")
		remoteClient, err := kube.NewRemoteClientByBKECluster(ctx, c, e.Ctx.BKECluster)
		if err != nil {
			log.Warn(constant.MasterDeletedReason, "Get remote client failed. err: %v", err)
			return nil
		}
		clientSet, _ := remoteClient.KubeClient()
		for _, node := range successDeletedNode {
			// 顺便从bkecluster中删除节点
			bkeCluster.RemoveNodeState(node.IP)
			// 将节点从状态管理器中删除
			statusmanage.BKEClusterStatusManager.RemoveSingleNodeStatusCache(bkeCluster, node.IP)

			nodeName := node.Hostname
			// list all pods in the node
			pods, err := clientSet.CoreV1().Pods("").List(ctx, metav1.ListOptions{
				FieldSelector: fmt.Sprintf("spec.nodeName=%s", nodeName),
			})
			if err != nil {
				log.Warn(constant.MasterDeletedReason, "List pods in node %s failed. err: %v", nodeName, err)
				continue
			}
			for _, pod := range pods.Items {
				// force delete the pod
				err = clientSet.CoreV1().Pods(pod.Namespace).Delete(ctx, pod.Name, metav1.DeleteOptions{
					GracePeriodSeconds: pointer.Int64(0),
				})
				if err != nil {
					log.Warn(constant.MasterDeletedReason, "Delete pod %s failed. err: %v", utils.ClientObjNS(&pod), err)
					continue
				}
			}
		}
		return mergecluster.SyncStatusUntilComplete(c, bkeCluster)
	}
	return nil
}
