package phases

import (
	"context"
	"time"

	"github.com/pkg/errors"
	confv1beta1 "gopkg.openfuyao.cn/bkecommon/cluster/api/v1beta1"
	bkenode "gopkg.openfuyao.cn/bkecommon/cluster/node"
	corev1 "k8s.io/api/core/v1"
	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
	"k8s.io/apimachinery/pkg/util/wait"
	kubedrain "k8s.io/kubectl/pkg/drain"
	clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
	"sigs.k8s.io/cluster-api/util/conditions"
	ctrl "sigs.k8s.io/controller-runtime"

	bkev1beta1 "gopkg.openfuyao.cn/cluster-api-provider-bke/api/v1beta1"
	"gopkg.openfuyao.cn/cluster-api-provider-bke/pkg/command"
	"gopkg.openfuyao.cn/cluster-api-provider-bke/pkg/kube"
	"gopkg.openfuyao.cn/cluster-api-provider-bke/pkg/mergecluster"
	"gopkg.openfuyao.cn/cluster-api-provider-bke/pkg/phaseframe"
	"gopkg.openfuyao.cn/cluster-api-provider-bke/pkg/phaseframe/phaseutil"
	"gopkg.openfuyao.cn/cluster-api-provider-bke/utils"
	"gopkg.openfuyao.cn/cluster-api-provider-bke/utils/annotation"
	"gopkg.openfuyao.cn/cluster-api-provider-bke/utils/constant"
	l "gopkg.openfuyao.cn/cluster-api-provider-bke/utils/log"
)

const (
	EnsureWorkerUpgradeName confv1beta1.BKEClusterPhase = "EnsureWorkerUpgrade"
)

type EnsureWorkerUpgrade struct {
	phaseframe.BasePhase
}

func NewEnsureWorkerUpgrade(ctx *phaseframe.PhaseContext) phaseframe.Phase {
	ctx.Log.NormalLogger = l.Named(EnsureWorkerUpgradeName.String())
	base := phaseframe.NewBasePhase(ctx, EnsureWorkerUpgradeName)
	return &EnsureWorkerUpgrade{base}
}

func (e *EnsureWorkerUpgrade) ExecutePreHook() error {
	return e.BasePhase.DefaultPreHook()
}

func (e *EnsureWorkerUpgrade) Execute() (_ ctrl.Result, err error) {
	if v, ok := annotation.HasAnnotation(e.Ctx.BKECluster, "deployAction"); !ok || v != "k8s_upgrade" {
		//添加boc所需的注解
		patchFunc := func(bkeCluster *bkev1beta1.BKECluster) {
			annotation.SetAnnotation(bkeCluster, "deployAction", "k8s_upgrade")
		}
		if err = mergecluster.SyncStatusUntilComplete(e.Ctx.Client, e.Ctx.BKECluster, patchFunc); err != nil {
			return ctrl.Result{}, err
		}
	}

	return e.reconcileWorkerUpgrade()
}

func (e *EnsureWorkerUpgrade) NeedExecute(old *bkev1beta1.BKECluster, new *bkev1beta1.BKECluster) (needExecute bool) {
	if !e.BasePhase.DefaultNeedExecute(old, new) {
		return false
	}

	// cluster状态不正常，不需要执行
	if new.Status.ClusterStatus == bkev1beta1.ClusterUnhealthy || new.Status.ClusterStatus == bkev1beta1.ClusterUnknown {
		return false
	}

	// cluster没有初始化的condition，不需要执行
	if err := e.Ctx.RefreshCtxCluster(); err == nil {
		if !conditions.IsTrue(e.Ctx.Cluster, clusterv1.ControlPlaneInitializedCondition) {
			return false
		}
	}

	nodes := phaseutil.GetNeedUpgradeWorkerNodes(new)
	if nodes.Length() == 0 {
		return false
	}

	e.SetStatus(bkev1beta1.PhaseWaiting)
	return true
}

func (e *EnsureWorkerUpgrade) reconcileWorkerUpgrade() (ctrl.Result, error) {
	_, _, bkeCluster, _, log := e.Ctx.Untie()
	if bkeCluster.Spec.ClusterConfig.Cluster.KubernetesVersion != bkeCluster.Status.KubernetesVersion {
		ret, err := e.rolloutUpgrade()
		if err != nil {
			return ret, err
		}
	}
	log.Info(constant.WorkerUpgradedReason, "k8s version same, not need to upgrade work node")
	return ctrl.Result{}, nil
}

func (e *EnsureWorkerUpgrade) rolloutUpgrade() (ctrl.Result, error) {
	ctx, c, bkeCluster, _, log := e.Ctx.Untie()

	needUpgradeNodes := bkenode.Nodes{}

	nodes := phaseutil.GetNeedUpgradeWorkerNodes(bkeCluster)
	for _, node := range nodes {
		if !bkeCluster.GetNodeStateFlag(node.IP, bkev1beta1.NodeAgentReadyFlag) {
			log.Info(constant.WorkerUpgradingReason, "agent is not ready at node %s, skip upgrade", phaseutil.NodeInfo(node))
			continue
		}
		needUpgradeNodes = append(needUpgradeNodes, node)
	}

	if len(needUpgradeNodes) == 0 {
		log.Info(constant.WorkerUpgradeFailedReason, "all the master node BKEAgent is not ready")
		return ctrl.Result{Requeue: true}, errors.New("all the master node BKEAgent is not ready")
	}

	// 对节点依次升级
	log.Info(constant.WorkerUpgradingReason, "Start upgrade worker nodes process, upgrade policy: rollingUpgrade")

	clientSet, _, _ := kube.GetTargetClusterClient(ctx, c, bkeCluster)
	drainer := phaseutil.NewDrainer(ctx, clientSet, nil, false, log)
	failedUpgradeNodes := []string{}

	for _, node := range needUpgradeNodes {
		remoteNode, err := phaseutil.GetRemoteNodeByBKENode(ctx, clientSet, node)
		if err != nil {
			log.Error(constant.WorkerUpgradeFailedReason, "get remote cluster Node resource failed, err: %v", err)
			return ctrl.Result{}, errors.Errorf("get remote cluster Node resource failed, err: %v", err)
		}
		// 已经是期望版本的节点不需要升级
		if remoteNode.Status.NodeInfo.KubeletVersion == bkeCluster.Spec.ClusterConfig.Cluster.KubernetesVersion {
			log.Info(constant.MasterUpgradeSucceedReason, "node %q is already the expected version %q, skip upgrade", phaseutil.NodeInfo(node), bkeCluster.Spec.ClusterConfig.Cluster.KubernetesVersion)
			continue
		}
		// mark node as upgrading
		bkeCluster.SetNodeStateWithMessage(node.IP, bkev1beta1.NodeUpgrading, "Upgrading")
		if err := mergecluster.SyncStatusUntilComplete(c, bkeCluster); err != nil {
			return ctrl.Result{}, err
		}

		if err := e.upgradeNode(node, remoteNode, drainer); err != nil {
			failedUpgradeNodes = append(failedUpgradeNodes, phaseutil.NodeInfo(node))
			log.Warn(constant.WorkerUpgradeFailedReason, "upgrade node %q failed: %v", phaseutil.NodeInfo(node), err)
			bkeCluster.SetNodeStateWithMessage(node.IP, bkev1beta1.NodeUpgradeFailed, err.Error())
			if err = mergecluster.SyncStatusUntilComplete(c, bkeCluster); err != nil {
				return ctrl.Result{}, err
			}
			continue
		}
		// mark node as upgrading success
		bkeCluster.SetNodeStateWithMessage(node.IP, bkev1beta1.NodeNotReady, "Upgrading success")
		if err := mergecluster.SyncStatusUntilComplete(c, bkeCluster); err != nil {
			return ctrl.Result{}, err
		}
	}
	if len(failedUpgradeNodes) == 0 {
		log.Info(constant.WorkerUpgradeSucceedReason, "upgrade all worker success")
		return ctrl.Result{}, nil
	} else {
		log.Warn(constant.WorkerUpgradeFailedReason, "upgrade worker process finished, but some nodes upgrade failed, will retry later nodes: %v", failedUpgradeNodes)
		// worker node没有升级成功，不允许进入下一阶段
		return ctrl.Result{}, errors.Errorf("upgrade worker process finished, but some nodes upgrade failed, will retry later nodes: %v", failedUpgradeNodes)
	}
}

func (e *EnsureWorkerUpgrade) upgradeNode(node confv1beta1.Node, remoteNode *corev1.Node, drainer *kubedrain.Helper) (err error) {
	ctx, c, bkeCluster, scheme, log := e.Ctx.Untie()

	// 升级不再需要cordon
	//log.Info(utils.WorkerUpgradingReason, "start cordon node %s", phaseutil.NodeInfo(node))
	//if err := kubedrain.RunCordonOrUncordon(drainer, remoteNode, true); err != nil {
	//	log.Error(utils.WorkerUpgradeFailedReason, "unable to cordon remote node %q, err: %v", phaseutil.NodeInfo(node), err)
	//	return errors.Errorf("unable to cordon node %q, err: %v", phaseutil.NodeInfo(node), err)
	//}
	//
	//defer func() {
	//	log.Info(utils.WorkerUpgradingReason, "start uncordon node %s", phaseutil.NodeInfo(node))
	//	if er := kubedrain.RunCordonOrUncordon(drainer, remoteNode, false); er != nil {
	//		log.Error(utils.WorkerUpgradeFailedReason, "unable to uncordon remote node %q, err: %v", phaseutil.NodeInfo(node), err)
	//		err = kerrors.NewAggregate([]error{err, errors.Errorf("unable to uncordon node %q, err: %v", phaseutil.NodeInfo(node), err)})
	//	}
	//}()

	upgrade := command.Upgrade{
		BaseCommand: command.BaseCommand{
			Ctx:         ctx,
			NameSpace:   bkeCluster.Namespace,
			Client:      c,
			Scheme:      scheme,
			OwnerObj:    bkeCluster,
			ClusterName: bkeCluster.Name,
			Unique:      true,
		},
		Node:       &node,
		BKEConfig:  bkeCluster.Name,
		Phase:      bkev1beta1.UpgradeWorker,
		BackUpEtcd: false,
	}

	log.Info(constant.WorkerUpgradingReason, "start upgrade node %s", phaseutil.NodeInfo(node))
	if err := upgrade.New(); err != nil {
		log.Error(constant.WorkerUpgradeFailedReason, "upgrade node %q failed: %v", phaseutil.NodeInfo(node), err)
		return errors.Errorf("create upgrade command，node: %q failed: %v", phaseutil.NodeInfo(node), err)
	}
	log.Info(constant.WorkerUpgradingReason, "wait upgrade node %s finish", phaseutil.NodeInfo(node))
	err, _, failedNodes := upgrade.Wait()
	if err != nil {
		log.Error(constant.WorkerUpgradeFailedReason, "wait upgrade command complete failed，node: %q, err: %v", phaseutil.NodeInfo(node), err)
		return errors.Errorf("wait upgrade command complete failed，node: %q, err: %v", phaseutil.NodeInfo(node), err)
	}
	if len(failedNodes) != 0 {
		log.Error(constant.WorkerUpgradeFailedReason, "upgrade node %q failed: %v", phaseutil.NodeInfo(node), err)
		commandErrs, err := phaseutil.LogCommandFailed(*upgrade.Command, failedNodes, log, constant.WorkerUpgradeFailedReason)
		phaseutil.MarkNodeStatusByCommandErrs(bkeCluster, commandErrs)
		return errors.Errorf("upgrade node %q failed: %v", phaseutil.NodeInfo(node), err)
	}
	log.Info(constant.WorkerUpgradeSucceedReason, "upgrade node %q operation succeed", phaseutil.NodeInfo(node))

	remoteClient, err := kube.NewRemoteClientByBKECluster(ctx, c, bkeCluster)
	if err != nil {
		log.Error(constant.WorkerUpgradeFailedReason, "get remote client for BKECluster %q failed", utils.ClientObjNS(bkeCluster))
		return errors.Errorf("get remote client for BKECluster %q failed: %v", utils.ClientObjNS(bkeCluster), err)
	}
	clientSet, _ := remoteClient.KubeClient()
	// wait for node pass healthy check
	log.Info(constant.WorkerUpgradingReason, "wait for node %q pass healthy check", phaseutil.NodeInfo(node))
	err = wait.PollWithContext(ctx, 2*time.Second, 5*time.Minute, func(ctx context.Context) (bool, error) {
		if utils.CtxDone(ctx) {
			return false, nil
		}
		remoteNode, err = clientSet.CoreV1().Nodes().Get(ctx, node.Hostname, metav1.GetOptions{})
		if err != nil {
			return false, nil
		}
		if err := remoteClient.NodeHealthCheck(remoteNode, bkeCluster.Spec.ClusterConfig.Cluster.KubernetesVersion, log.NormalLogger); err != nil {
			return false, nil
		}
		return true, nil
	})

	if err != nil {
		log.Error(constant.WorkerUpgradeFailedReason, "upgrade node %q failed: %v", phaseutil.NodeInfo(node), err)
		return errors.Errorf("wait for node %q pass healthy check failed: %v", phaseutil.NodeInfo(node), err)
	}
	log.Info(constant.WorkerUpgradingReason, "upgrade worker node %q success", phaseutil.NodeInfo(node))
	return nil
}
