package phases

import (
	"context"
	"time"

	"github.com/pkg/errors"
	"gopkg.openfuyao.cn/bkeagent/utils/mfutil"
	confv1beta1 "gopkg.openfuyao.cn/bkecommon/cluster/api/v1beta1"
	bkeinit "gopkg.openfuyao.cn/bkecommon/cluster/initialize"
	bkenode "gopkg.openfuyao.cn/bkecommon/cluster/node"
	corev1 "k8s.io/api/core/v1"
	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
	"k8s.io/apimachinery/pkg/util/wait"
	clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
	"sigs.k8s.io/cluster-api/util/conditions"
	containerutil "sigs.k8s.io/cluster-api/util/container"
	ctrl "sigs.k8s.io/controller-runtime"

	bkev1beta1 "gopkg.openfuyao.cn/cluster-api-provider-bke/api/v1beta1"
	"gopkg.openfuyao.cn/cluster-api-provider-bke/pkg/command"
	"gopkg.openfuyao.cn/cluster-api-provider-bke/pkg/kube"
	"gopkg.openfuyao.cn/cluster-api-provider-bke/pkg/mergecluster"
	"gopkg.openfuyao.cn/cluster-api-provider-bke/pkg/phaseframe"
	"gopkg.openfuyao.cn/cluster-api-provider-bke/pkg/phaseframe/phaseutil"
	"gopkg.openfuyao.cn/cluster-api-provider-bke/utils"
	"gopkg.openfuyao.cn/cluster-api-provider-bke/utils/annotation"
	"gopkg.openfuyao.cn/cluster-api-provider-bke/utils/constant"
	l "gopkg.openfuyao.cn/cluster-api-provider-bke/utils/log"
)

const (
	EnsureMasterUpgradeName confv1beta1.BKEClusterPhase = "EnsureMasterUpgrade"
)

type EnsureMasterUpgrade struct {
	phaseframe.BasePhase
}

func NewEnsureMasterUpgrade(ctx *phaseframe.PhaseContext) phaseframe.Phase {
	ctx.Log.NormalLogger = l.Named(EnsureMasterUpgradeName.String())
	base := phaseframe.NewBasePhase(ctx, EnsureMasterUpgradeName)
	return &EnsureMasterUpgrade{BasePhase: base}
}

func (e *EnsureMasterUpgrade) Execute() (_ ctrl.Result, err error) {
	if v, ok := annotation.HasAnnotation(e.Ctx.BKECluster, "deployAction"); !ok || v != "k8s_upgrade" {
		//添加boc所需的注解
		patchFunc := func(bkeCluster *bkev1beta1.BKECluster) {
			annotation.SetAnnotation(bkeCluster, "deployAction", "k8s_upgrade")
		}
		if err = mergecluster.SyncStatusUntilComplete(e.Ctx.Client, e.Ctx.BKECluster, patchFunc); err != nil {
			return ctrl.Result{}, err
		}
	}

	return e.reconcileMasterUpgrade()
}

func (e *EnsureMasterUpgrade) NeedExecute(old *bkev1beta1.BKECluster, new *bkev1beta1.BKECluster) (needExecute bool) {
	if !e.BasePhase.DefaultNeedExecute(old, new) {
		return false
	}

	// cluster没有初始化的condition，不需要执行
	if err := e.Ctx.RefreshCtxCluster(); err == nil {
		if !conditions.IsTrue(e.Ctx.Cluster, clusterv1.ControlPlaneInitializedCondition) {
			return false
		}
	}

	nodes := phaseutil.GetNeedUpgradeMasterNodes(new)
	if nodes.Length() == 0 {
		return false
	}

	e.SetStatus(bkev1beta1.PhaseWaiting)
	return true
}

func (e *EnsureMasterUpgrade) reconcileMasterUpgrade() (ctrl.Result, error) {
	_, _, bkeCluster, _, log := e.Ctx.Untie()
	if bkeCluster.Spec.ClusterConfig.Cluster.KubernetesVersion != bkeCluster.Status.KubernetesVersion {
		ret, err := e.rolloutUpgrade()
		if err != nil {
			return ret, err
		}
	}
	log.Info(constant.MasterUpgradedReason, "k8s version same, not need to upgrade master node")
	return ctrl.Result{}, nil
}

func (e *EnsureMasterUpgrade) rolloutUpgrade() (ctrl.Result, error) {
	ctx, c, bkeCluster, _, log := e.Ctx.Untie()

	needUpgradeNodes := bkenode.Nodes{}

	nodes := phaseutil.GetNeedUpgradeMasterNodes(bkeCluster)
	for _, node := range nodes {
		if !bkeCluster.GetNodeStateFlag(node.IP, bkev1beta1.NodeAgentReadyFlag) {
			log.Info(constant.MasterUpgradingReason, "agent is not ready at node %s, skip upgrade", phaseutil.NodeInfo(node))
			continue
		}
		needUpgradeNodes = append(needUpgradeNodes, node)
	}

	if len(needUpgradeNodes) == 0 {
		log.Info(constant.MasterUpgradingReason, "all the master node BKEAgent is not ready")
		return ctrl.Result{Requeue: true}, errors.New("all the master node BKEAgent is not ready")
	}

	specNodes := bkenode.Nodes(bkeCluster.Spec.ClusterConfig.Nodes)

	needBackupEtcd := false
	backEtcdNode := confv1beta1.Node{}
	etcdNodes := specNodes.Etcd()
	if etcdNodes.Length() != 0 {
		needBackupEtcd = true
		backEtcdNode = etcdNodes[0]
		log.Info(constant.MasterUpgradingReason, "backup etcd data to node %s", phaseutil.NodeInfo(backEtcdNode))
	}

	if err := e.ensureEtcdAdvertiseClientUrlsAnnotation(etcdNodes); err != nil {
		log.Error(constant.MasterUpgradeFailedReason, "ensure etcd advertise client urls annotation failed, err: %v", err)
		return ctrl.Result{}, errors.Errorf("ensure etcd advertise client urls annotation failed, err: %v", err)
	}

	// 对节点依次升级
	log.Info(constant.MasterUpgradingReason, "Start upgrade master nodes process, upgrade policy: rollingUpgrade")

	clientSet, _, _ := kube.GetTargetClusterClient(ctx, c, bkeCluster)
	for _, node := range needUpgradeNodes {
		remoteNode, err := phaseutil.GetRemoteNodeByBKENode(ctx, clientSet, node)
		if err != nil {
			log.Error(constant.WorkerUpgradeFailedReason, "get remote cluster Node resource failed, err: %v", err)
			return ctrl.Result{}, errors.Errorf("get remote cluster Node resource failed, err: %v", err)
		}
		// 已经是期望版本的节点不需要升级
		if remoteNode.Status.NodeInfo.KubeletVersion == bkeCluster.Spec.ClusterConfig.Cluster.KubernetesVersion {
			log.Info(constant.MasterUpgradeSucceedReason, "node %q is already the expected version %q,skip upgrade", phaseutil.NodeInfo(node), bkeCluster.Spec.ClusterConfig.Cluster.KubernetesVersion)
			continue
		}

		// mark node as upgrading
		bkeCluster.SetNodeStateWithMessage(node.IP, bkev1beta1.NodeUpgrading, "Upgrading")
		if err := mergecluster.SyncStatusUntilComplete(c, bkeCluster); err != nil {
			return ctrl.Result{}, err
		}

		if err := e.upgradeNode(needBackupEtcd, backEtcdNode, node, remoteNode); err != nil {
			// master node block until upgrade success
			log.Error(constant.MasterUpgradeFailedReason, "upgrade node %q failed: %v", phaseutil.NodeInfo(node), err)
			bkeCluster.SetNodeStateWithMessage(node.IP, bkev1beta1.NodeUpgradeFailed, err.Error())
			if err = mergecluster.SyncStatusUntilComplete(c, bkeCluster); err != nil {
				return ctrl.Result{}, errors.Errorf("upgrade node %q failed: %v", phaseutil.NodeInfo(node), err)
			}
			return ctrl.Result{}, errors.Errorf("upgrade node %q failed: %v", phaseutil.NodeInfo(node), err)
		}
		// mark node as upgrading success
		bkeCluster.SetNodeStateWithMessage(node.IP, bkev1beta1.NodeNotReady, "Upgrading success")
		if err := mergecluster.SyncStatusUntilComplete(c, bkeCluster); err != nil {
			return ctrl.Result{}, err
		}
	}
	log.Info(constant.MasterUpgradeSucceedReason, "upgrade all master success")
	// master 始终是最后更新完的,这时候更改status的版本
	bkeCluster.Status.KubernetesVersion = bkeCluster.Spec.ClusterConfig.Cluster.KubernetesVersion

	// 更新addon version kubeproxy
	kubeproxyNeedUpgrade := false
	kubectlNeedUpgrade := false
	for _, addon := range bkeCluster.Spec.ClusterConfig.Addons {
		if addon.Name == "kubeproxy" && addon.Version != bkeCluster.Spec.ClusterConfig.Cluster.KubernetesVersion {
			log.Info(constant.MasterUpgradingReason, "kubeproxy need upgrade")
			kubeproxyNeedUpgrade = true
		}
		if addon.Name == "kubectl" && addon.Version != "v1.25" {
			log.Info(constant.MasterUpgradingReason, "kubectl need upgrade")
			kubectlNeedUpgrade = true
		}
	}

	patchFuncs := []mergecluster.PatchFunc{}

	if kubeproxyNeedUpgrade {
		if err := e.upgradeKubeProxy(bkeCluster.Spec.ClusterConfig.Cluster.KubernetesVersion); err != nil {
			log.Error(constant.MasterUpgradeFailedReason, "upgrade kubeproxy failed: %v", err)
			return ctrl.Result{}, err
		}
		patchFunc := func(currentCombinedBkeCluster *bkev1beta1.BKECluster) {
			for i, d := range currentCombinedBkeCluster.Spec.ClusterConfig.Addons {
				if d.Name == "kubeproxy" {
					d.Version = currentCombinedBkeCluster.Spec.ClusterConfig.Cluster.KubernetesVersion
					currentCombinedBkeCluster.Spec.ClusterConfig.Addons[i] = d
				}
			}
			for i, d := range currentCombinedBkeCluster.Status.AddonStatus {
				addon := d.DeepCopy()
				if addon.Name == "kubeproxy" {
					addon.Version = currentCombinedBkeCluster.Spec.ClusterConfig.Cluster.KubernetesVersion
					currentCombinedBkeCluster.Status.AddonStatus[i] = *addon
				}
			}
		}
		patchFuncs = append(patchFuncs, patchFunc)
	}

	if kubectlNeedUpgrade {
		patchFunc := func(currentCombinedBkeCluster *bkev1beta1.BKECluster) {
			found := false
			for i, d := range currentCombinedBkeCluster.Spec.ClusterConfig.Addons {
				if d.Name == "kubectl" {
					found = true
					d.Version = "v1.25"
					currentCombinedBkeCluster.Spec.ClusterConfig.Addons[i] = d
				}
			}
			if !found {
				currentCombinedBkeCluster.Spec.ClusterConfig.Addons = append(currentCombinedBkeCluster.Spec.ClusterConfig.Addons, confv1beta1.Product{
					Name:    "kubectl",
					Version: "v1.25",
					Block:   false,
				})
			}
		}
		patchFuncs = append(patchFuncs, patchFunc)
	}

	if err := mergecluster.SyncStatusUntilComplete(c, bkeCluster, patchFuncs...); err != nil {
		return ctrl.Result{}, errors.Errorf("failed to upgrade addon version, err: %v", err)
	}
	return ctrl.Result{}, nil
}

func (e *EnsureMasterUpgrade) upgradeNode(needBackupEtcd bool, backEtcdNode confv1beta1.Node, node confv1beta1.Node, remoteNode *corev1.Node) error {
	ctx, c, bkeCluster, scheme, log := e.Ctx.Untie()
	upgrade := command.Upgrade{
		BaseCommand: command.BaseCommand{
			Ctx:         ctx,
			NameSpace:   bkeCluster.Namespace,
			Client:      c,
			Scheme:      scheme,
			OwnerObj:    bkeCluster,
			ClusterName: bkeCluster.Name,
			Unique:      true,
		},
		Node:      &node,
		BKEConfig: bkeCluster.Name,
		Phase:     bkev1beta1.UpgradeControlPlane,
	}

	if needBackupEtcd && node.IP == backEtcdNode.IP {
		upgrade.BackUpEtcd = true
	}

	log.Info(constant.MasterUpgradingReason, "start upgrade node %s", phaseutil.NodeInfo(node))
	if err := upgrade.New(); err != nil {
		log.Error(constant.MasterUpgradeFailedReason, "upgrade node %q failed: %v", phaseutil.NodeInfo(node), err)
		return errors.Errorf("create upgrade command，node: %q failed: %v", phaseutil.NodeInfo(node), err)
	}
	log.Info(constant.MasterUpgradingReason, "wait upgrade node %s finish", phaseutil.NodeInfo(node))
	err, _, failedNodes := upgrade.Wait()
	if err != nil {
		log.Error(constant.MasterUpgradeFailedReason, "wait upgrade command complete failed，node: %q, err: %v", phaseutil.NodeInfo(node), err)
		return errors.Errorf("wait upgrade command complete failed，node: %q, err: %v", phaseutil.NodeInfo(node), err)
	}
	if len(failedNodes) != 0 {
		log.Error(constant.MasterUpgradeFailedReason, "upgrade node %q failed: %v", phaseutil.NodeInfo(node), err)
		commandErrs, err := phaseutil.LogCommandFailed(*upgrade.Command, failedNodes, log, constant.MasterUpgradeFailedReason)
		phaseutil.MarkNodeStatusByCommandErrs(bkeCluster, commandErrs)
		return errors.Errorf("upgrade node %q failed: %v", phaseutil.NodeInfo(node), err)
	}
	log.Info(constant.MasterUpgradingReason, "upgrade node %q operation succeed", phaseutil.NodeInfo(node))

	remoteClient, err := kube.NewRemoteClientByBKECluster(ctx, c, bkeCluster)
	if err != nil {
		log.Error(constant.MasterUpgradeFailedReason, "get remote client for BKECluster %q failed", utils.ClientObjNS(bkeCluster))
		return errors.Errorf("get remote client for BKECluster %q failed: %v", utils.ClientObjNS(bkeCluster), err)
	}
	clientSet, _ := remoteClient.KubeClient()

	// wait for node pass healthy check
	log.Info(constant.MasterUpgradingReason, "wait for node %q pass healthy check", phaseutil.NodeInfo(node))
	err = wait.PollWithContext(ctx, 2*time.Second, 5*time.Minute, func(ctx context.Context) (bool, error) {
		if utils.CtxDone(ctx) {
			return false, nil
		}
		remoteNode, err = clientSet.CoreV1().Nodes().Get(ctx, node.Hostname, metav1.GetOptions{})
		if err != nil {
			return false, nil
		}
		if err := remoteClient.NodeHealthCheck(remoteNode, bkeCluster.Spec.ClusterConfig.Cluster.KubernetesVersion, log.NormalLogger); err != nil {
			return false, nil
		}
		return true, nil
	})

	if err != nil {
		log.Error(constant.MasterUpgradeFailedReason, "upgrade node %q failed: %v", phaseutil.NodeInfo(node), err)
		return errors.Errorf("wait for node %q pass healthy check failed: %v", phaseutil.NodeInfo(node), err)
	}
	log.Info(constant.MasterUpgradingReason, "upgrade master node %q success", phaseutil.NodeInfo(node))
	return nil
}

func (e *EnsureMasterUpgrade) upgradeKubeProxy(expectVersion string) error {
	ctx, c, bkeCluster, _, log := e.Ctx.Untie()
	clientSet, _, err := kube.GetTargetClusterClient(ctx, c, bkeCluster)
	if err != nil {
		return err
	}

	// get ds
	ds, err := clientSet.AppsV1().DaemonSets(metav1.NamespaceSystem).Get(ctx, "kube-proxy", metav1.GetOptions{})
	if err != nil {
		log.Error(constant.MasterUpgradeFailedReason, "get kube-proxy ds failed: %v", err)
		return err
	}

	cfg := bkeinit.BkeConfig(*bkeCluster.Spec.ClusterConfig)
	imageRepo := cfg.ImageRepo()

	// update image
	srcImage := ds.Spec.Template.Spec.Containers[0].Image
	srcImage, err = containerutil.ModifyImageRepository(srcImage, imageRepo)
	if err != nil {
		return err
	}
	dstImage, err := containerutil.ModifyImageTag(srcImage, expectVersion)
	if err != nil {
		return err
	}
	ds.Spec.Template.Spec.Containers[0].Image = dstImage

	_, err = clientSet.AppsV1().DaemonSets(metav1.NamespaceSystem).Update(ctx, ds, metav1.UpdateOptions{})
	if err != nil {
		return err
	}
	log.Info(constant.MasterUpgradingReason, "update kube-proxy image to %q success", dstImage)
	return nil
}

// ensureEtcdAdvertiseClientUrlsAnnotation
// ensure EtcdAdvertiseClientUrlsAnnotation exit in etcd pod annotations
func (e *EnsureMasterUpgrade) ensureEtcdAdvertiseClientUrlsAnnotation(etcdNodes bkenode.Nodes) error {
	ctx, c, bkeCluster, _, _ := e.Ctx.Untie()
	clientSet, _, err := kube.GetTargetClusterClient(ctx, c, bkeCluster)
	if err != nil {
		return err
	}
	for _, n := range etcdNodes {
		etcdPodName := kube.StaticPodName(mfutil.Etcd, n.Hostname)
		podClient := clientSet.CoreV1().Pods(metav1.NamespaceSystem)
		pod, err := podClient.Get(ctx, etcdPodName, metav1.GetOptions{})
		if err != nil {
			return err
		}
		annotations := pod.GetAnnotations()
		if v, ok := annotations[annotation.EtcdAdvertiseClientUrlsAnnotationKey]; ok && v != "" {
			continue
		}
		annotations[annotation.EtcdAdvertiseClientUrlsAnnotationKey] = phaseutil.GetClientURLByIP(n.IP)
		pod.SetAnnotations(annotations)
		if _, err = podClient.Update(ctx, pod, metav1.UpdateOptions{}); err != nil {
			return err
		}
	}
	return nil
}
