/******************************************************************
 * Copyright (c) 2024 Bocloud Technologies Co., Ltd.
 * installer is licensed under Mulan PSL v2.
 * You can use this software according to the terms and conditions of the Mulan PSL v2.
 * You may obtain n copy of Mulan PSL v2 at:
 *          http://license.coscl.org.cn/MulanPSL2
 * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
 * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
 * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
 * See the Mulan PSL v2 for more details.
 ******************************************************************/

package controllers

import (
	"context"
	"fmt"
	"strings"
	"time"

	"github.com/pkg/errors"
	"gopkg.openfuyao.cn/bkeagent/utils/log"
	confv1beta1 "gopkg.openfuyao.cn/bkecommon/cluster/api/v1beta1"
	bkenode "gopkg.openfuyao.cn/bkecommon/cluster/node"
	corev1 "k8s.io/api/core/v1"
	apierrors "k8s.io/apimachinery/pkg/api/errors"
	"k8s.io/apimachinery/pkg/runtime"
	"k8s.io/apimachinery/pkg/runtime/schema"
	"k8s.io/apimachinery/pkg/types"
	"k8s.io/client-go/rest"
	"k8s.io/client-go/tools/record"
	clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
	"sigs.k8s.io/cluster-api/util"
	"sigs.k8s.io/cluster-api/util/annotations"
	ctrl "sigs.k8s.io/controller-runtime"
	"sigs.k8s.io/controller-runtime/pkg/builder"
	"sigs.k8s.io/controller-runtime/pkg/client"
	"sigs.k8s.io/controller-runtime/pkg/controller"
	"sigs.k8s.io/controller-runtime/pkg/handler"
	"sigs.k8s.io/controller-runtime/pkg/predicate"
	"sigs.k8s.io/controller-runtime/pkg/reconcile"

	bkev1beta1 "gopkg.openfuyao.cn/cluster-api-provider-bke/api/v1beta1"
	"gopkg.openfuyao.cn/cluster-api-provider-bke/pkg/mergecluster"
	bkemetrics "gopkg.openfuyao.cn/cluster-api-provider-bke/pkg/metrics"
	"gopkg.openfuyao.cn/cluster-api-provider-bke/pkg/phaseframe"
	"gopkg.openfuyao.cn/cluster-api-provider-bke/pkg/phaseframe/phases"
	"gopkg.openfuyao.cn/cluster-api-provider-bke/pkg/phaseframe/phaseutil"
	"gopkg.openfuyao.cn/cluster-api-provider-bke/pkg/statusmanage"
	"gopkg.openfuyao.cn/cluster-api-provider-bke/utils"
	"gopkg.openfuyao.cn/cluster-api-provider-bke/utils/annotation"
	"gopkg.openfuyao.cn/cluster-api-provider-bke/utils/clustertracker"
	"gopkg.openfuyao.cn/cluster-api-provider-bke/utils/clusterutil"
	"gopkg.openfuyao.cn/cluster-api-provider-bke/utils/condition"
	"gopkg.openfuyao.cn/cluster-api-provider-bke/utils/config"
	"gopkg.openfuyao.cn/cluster-api-provider-bke/utils/constant"
	l "gopkg.openfuyao.cn/cluster-api-provider-bke/utils/log"
	"gopkg.openfuyao.cn/cluster-api-provider-bke/utils/predicates"
)

// BKEClusterReconciler reconciles a BKECluster object
type BKEClusterReconciler struct {
	client.Client
	Scheme     *runtime.Scheme
	Recorder   record.EventRecorder
	RestConfig *rest.Config
	Tracker    *clustertracker.ClusterCacheTracker
	controller controller.Controller
}

// +kubebuilder:rbac:groups=bke.bocloud.com,resources=*,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=cluster.x-k8s.io;controlplane.cluster.x-k8s.io;bootstrap.cluster.x-k8s.io,resources=*,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups="",resources=events;secrets;configmaps;namespaces,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=bkeagent.bocloud.com,resources=commands,verbs=get;list;watch;create;update;patch;delete
// Reconcile is the main logic of bke cluster controller.
func (r *BKEClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Result, err error) {
	bkeCluster, err := mergecluster.GetCombinedBKECluster(ctx, r.Client, req.Namespace, req.Name)
	if err != nil {
		if apierrors.IsNotFound(err) {
			return ctrl.Result{}, nil
		}
		return ctrl.Result{}, err
	}

	if config.MetricsAddr != "0" {
		bkemetrics.MetricRegister.Register(utils.ClientObjNS(bkeCluster))
	}

	oldBkeCluster, err := mergecluster.GetLastUpdatedBKECluster(bkeCluster)
	if err != nil {
		return ctrl.Result{}, err
	}

	log := l.Named("Reconcile").With("bkeCluster", bkeCluster.Name, "namespace", bkeCluster.Namespace)
	bkeLogger := bkev1beta1.NewBKELogger(log, r.Recorder, bkeCluster)

	if err = r.computeAgentStatus(bkeCluster); err != nil {
		bkeLogger.Error(constant.InternalErrorReason, "failed set AgentStatus, err: %v", err)
		return ctrl.Result{}, err
	}

	if err = r.initNodeStatus(bkeCluster); err != nil {
		bkeLogger.Error(constant.InternalErrorReason, "failed set NodeStatus, err: %v", err)
		return ctrl.Result{}, err
	}

	// init phase ctx
	phaseCtx := phaseframe.NewReconcilePhaseCtx(ctx).
		SetClient(r.Client).
		SetRestConfig(r.RestConfig).
		SetScheme(r.Scheme).
		SetLogger(bkeLogger).
		SetBKECluster(bkeCluster)
	defer phaseCtx.Cancel()

	flow := phases.NewPhaseFlow(phaseCtx)

	err = flow.CalculatePhase(oldBkeCluster, bkeCluster)
	if err != nil {
		return ctrl.Result{}, err
	}

	res, err := flow.Execute()
	if err != nil {
		log.Warnf("Reconcile bkeCluster %q failed: %v", utils.ClientObjNS(bkeCluster), err)
	}

	if clustertracker.AllowTrackerRemoteCluster(phaseCtx.BKECluster) {
		// 监听集群节点状态，如果有节点状态变更，触发集群健康检查
		watchInput := clustertracker.WatchInput{
			Name:          utils.ClientObjNS(phaseCtx.BKECluster),
			Cluster:       util.ObjectKey(phaseCtx.BKECluster),
			BKECluster:    phaseCtx.BKECluster,
			Watcher:       r.controller,
			Kind:          &corev1.Node{},
			EventHandler:  handler.EnqueueRequestsFromMapFunc(nodeToBKEClusterMapFunc(ctx, r.Client)),
			EventRecorder: r.Recorder,
			Predicates:    []predicate.Predicate{predicates.NodeNotReadyPredicate()},
		}

		if err := r.Tracker.Watch(ctx, watchInput); err != nil {
			bkeLogger.Error(constant.ClusterTracker, "failed to watch node, err: %v", err)
			return ctrl.Result{RequeueAfter: 10 * time.Second}, nil
		}
	}

	// if need requeue, return
	if res.Requeue || res.RequeueAfter > 0 {
		return res, nil
	}

	return statusmanage.BKEClusterStatusManager.GetCtrlResult(phaseCtx.BKECluster), nil
}

// SetupWithManager sets up the controller with the Manager.
func (r *BKEClusterReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, options controller.Options) error {
	c, err := ctrl.NewControllerManagedBy(mgr).
		For(&bkev1beta1.BKECluster{}).
		WithEventFilter(predicates.OR(
			predicates.BKEClusterAnnotationsChange(),
			predicates.BKEClusterSpecChange(),
		)).
		WithOptions(options).
		Watches(
			&clusterv1.Cluster{},
			handler.EnqueueRequestsFromMapFunc(clusterToBKEClusterMapFunc(ctx, bkev1beta1.GroupVersion.WithKind("BKECluster"), mgr.GetClient(), &bkev1beta1.BKECluster{})),
			builder.WithPredicates(predicates.ClusterUnPause()),
		).Build(r)
	if err != nil {
		return errors.Errorf("failed setting up with a controller manager: %v", err)
	}
	r.controller = c
	return nil
}

func clusterToBKEClusterMapFunc(ctx context.Context, gvk schema.GroupVersionKind, c client.Client, providerCluster client.Object) handler.MapFunc {
	return func(ctx context.Context, o client.Object) []reconcile.Request {
		cluster, ok := o.(*clusterv1.Cluster)
		if !ok {
			return nil
		}

		// Return early if the Cluster DeletionTimestamp != 0.
		if !cluster.DeletionTimestamp.IsZero() {
			return nil
		}

		// Return early if the InfrastructureRef is nil.
		if cluster.Spec.InfrastructureRef == nil {
			return nil
		}
		gk := gvk.GroupKind()
		// Return early if the GroupKind doesn't match what we expect.
		infraGK := cluster.Spec.InfrastructureRef.GroupVersionKind().GroupKind()
		if gk != infraGK {
			return nil
		}
		providerCluster := providerCluster.DeepCopyObject().(client.Object)
		key := types.NamespacedName{Namespace: cluster.Namespace, Name: cluster.Spec.InfrastructureRef.Name}

		if err := c.Get(ctx, key, providerCluster); err != nil {
			l.Errorf("Failed to get %T err: %v", providerCluster, err)
			return nil
		}

		if annotations.IsExternallyManaged(providerCluster) {
			l.Errorf("%T is externally managed, skipping mapping", providerCluster)
			return nil
		}

		return []reconcile.Request{
			{
				NamespacedName: client.ObjectKey{
					Namespace: cluster.Namespace,
					Name:      cluster.Spec.InfrastructureRef.Name,
				},
			},
		}
	}
}

func nodeToBKEClusterMapFunc(ctx context.Context, c client.Client) handler.MapFunc {
	return func(ctx context.Context, o client.Object) []reconcile.Request {
		node, ok := o.(*corev1.Node)
		if !ok {
			return nil
		}

		clusterName, ok := annotation.HasAnnotation(node, clusterv1.ClusterNameAnnotation)
		if !ok {
			return nil
		}
		clusterNamespace, ok := annotation.HasAnnotation(node, clusterv1.ClusterNamespaceAnnotation)
		if !ok {
			return nil
		}
		cluster := &clusterv1.Cluster{}
		if err := c.Get(ctx, types.NamespacedName{Namespace: clusterNamespace, Name: clusterName}, cluster); err != nil {
			l.Errorf("Failed to get Cluster %s/%s err: %v", clusterNamespace, clusterName, err)
			return nil
		}

		if cluster.Spec.InfrastructureRef == nil {
			return nil
		}

		return []reconcile.Request{
			{
				NamespacedName: client.ObjectKey{
					Namespace: cluster.Spec.InfrastructureRef.Namespace,
					Name:      cluster.Spec.InfrastructureRef.Name,
				},
			},
		}
	}
}

func (r *BKEClusterReconciler) computeAgentStatus(bkeCluster *bkev1beta1.BKECluster) error {
	statusCopy := bkeCluster.Status.AgentStatus.DeepCopy()
	bkeCluster.Status.AgentStatus.Replies = int32(len(bkeCluster.Spec.ClusterConfig.Nodes))
	// 初始化agentStatus
	if bkeCluster.Status.AgentStatus.Status == "" {
		bkeCluster.Status.AgentStatus.UnavailableReplies = int32(len(bkeCluster.Spec.ClusterConfig.Nodes))
		bkeCluster.Status.AgentStatus.Status = fmt.Sprintf("%d/%d", 0, len(bkeCluster.Spec.ClusterConfig.Nodes))
	} else {
		// 更新relies

		specNodesNum := len(bkeCluster.Spec.ClusterConfig.Nodes)
		availableNodesNum := "0"
		status := strings.Split(statusCopy.Status, "/")
		availableNodesNum = status[0]
		bkeCluster.Status.AgentStatus.Status = fmt.Sprintf("%s/%d", availableNodesNum, specNodesNum)
		bkeCluster.Status.AgentStatus.Replies = int32(specNodesNum)
	}
	if !statusCopy.Equal(&bkeCluster.Status.AgentStatus) {
		if err := mergecluster.SyncStatusUntilComplete(r.Client, bkeCluster); err != nil {
			return err
		}
	}
	return nil
}

func (r *BKEClusterReconciler) initNodeStatus(bkeCluster *bkev1beta1.BKECluster) error {
	bkeNodes := bkenode.Nodes(bkeCluster.Spec.ClusterConfig.Nodes)
	statusNodes := phaseutil.GetBKENodesFromNodesStatus(bkeCluster.Status.NodesStatus)
	// 是否是初次部署
	deployFlag := len(statusNodes) == 0

	nodeT, nodeChangeFlag := bkenode.CompareBKEConfigNode(statusNodes, bkeNodes)
	if nodeChangeFlag {
		nodesStatus := bkeCluster.Status.NodesStatus.DeepCopy()
		needDeleteNodes := phaseutil.GetNeedDeleteWorkerNodes(bkeCluster)
		for _, t := range nodeT {
			switch t.Operate {
			case bkenode.CreateNode:
				nodesStatus = append(nodesStatus, confv1beta1.NodeState{
					State: bkev1beta1.NodeUnknown,
					Node:  *t.Node,
				})
				log.Debugf("新增节点 %s, 添加到status，当前状态：%s", phaseutil.NodeInfo(*t.Node), bkev1beta1.NodeUnknown)
			case bkenode.RemoveNode:
				if needDeleteNodes.Filter(bkenode.FilterOptions{"IP": t.Node.IP}).Length() == 0 {
					continue
				}
				for i, node := range statusNodes {
					if node.IP == t.Node.IP {
						// 删除节点 添加删除状态码，去除失败状态码，设置为删除状态
						nodesStatus[i].StateCode |= bkev1beta1.NodeDeletingFlag
						nodesStatus[i].StateCode &= ^bkev1beta1.NodeFailedFlag
						nodesStatus[i].State = bkev1beta1.NodeDeleting
						log.Debugf("准备删除节点 %s，修改状态为：%s", phaseutil.NodeInfo(*t.Node), bkev1beta1.NodeDeleting)
					}
				}
			case bkenode.UpdateNode:
				for i, node := range statusNodes {
					if node.IP == t.Node.IP {
						nodesStatus[i].Node = *t.Node
						// 去掉失败状态码，使其能被继续处理
						nodesStatus[i].StateCode &= ^bkev1beta1.NodeFailedFlag
						log.Debugf("更新节点 %s信息，当前状态为：%s", phaseutil.NodeInfo(*t.Node), nodesStatus[i].State)
					}
				}
			}
		}
		bkeCluster.Status.NodesStatus = nodesStatus
	}

	// 是否需要升级集群
	upgradeFlag := phaseutil.GetNeedUpgradeNodes(bkeCluster).Length() > 0
	// 是否需要纳管集群
	manageFlag := clusterutil.IsBocloudCluster(bkeCluster) && !clusterutil.FullyControlled(bkeCluster)

	deployFailedFlag := false
	upgradeFailedFlag := false
	manageFailedFlag := false
	// 获取当前集群最终状态
	v, ok := condition.HasCondition(bkev1beta1.ClusterHealthyStateCondition, bkeCluster)
	if ok && v != nil {
		deployFailedFlag = v.Reason == string(bkev1beta1.Deploying) && v.Message == string(bkev1beta1.DeployFailed)
		upgradeFailedFlag = v.Reason == string(bkev1beta1.Upgrading) && v.Message == string(bkev1beta1.UpgradeFailed)
		manageFailedFlag = v.Reason == string(bkev1beta1.Managing) && v.Message == string(bkev1beta1.ManageFailed)
	}

	retryFlag := false
	patchFunc := func(cluster *bkev1beta1.BKECluster) { return }
	// retry annotation set need clean all failedFlag from nodes
	if retryNodeIPs, ok := annotation.HasAnnotation(bkeCluster, annotation.RetryAnnotationKey); ok {

		// 如果没有指定节点，则清理所有节点的失败状态
		if retryNodeIPs == "" {
			log.Debugf("重试标记存在，清理所有节点的失败状态码")
			// 清理所有节点的失败状态
			for _, node := range statusNodes {
				if bkeCluster.GetNodeStateFlag(node.IP, bkev1beta1.NodeFailedFlag) {
					bkeCluster.UnmarkNodeState(node.IP, bkev1beta1.NodeFailedFlag)
				}
			}
			// 重置状态管理器
			log.Debugf("重置状态管理器")
			statusmanage.BKEClusterStatusManager.RemoveClusterStatusManagerCache(bkeCluster)
		} else {
			retryNodes := strings.Split(retryNodeIPs, ",")
			// 清理指定节点的失败状态
			for _, nodeIP := range retryNodes {
				log.Debugf("重试标记存在，清理节点 %s 的失败状态码", nodeIP)
				if bkeCluster.GetNodeStateFlag(nodeIP, bkev1beta1.NodeFailedFlag) {
					bkeCluster.UnmarkNodeState(nodeIP, bkev1beta1.NodeFailedFlag)
				}
				log.Debugf("重试标记存在，移除节点 %s 的状态缓存", nodeIP)
				statusmanage.BKEClusterStatusManager.RemoveSingleNodeStatusCache(bkeCluster, nodeIP)
			}
		}

		retryFlag = true
		patchFunc = func(cluster *bkev1beta1.BKECluster) {
			// 移除retry annotation
			annotation.RemoveAnnotation(cluster, annotation.RetryAnnotationKey)
		}

		// todo 这俩状态现在不确定，先不用retry控制
		// 重置失败状态
		//if deployFailedFlag {
		//	markBKEClusterHealthyStatus(bkeCluster, bkev1beta1.Deploying)
		//}
		//if upgradeFailedFlag {
		//	markBKEClusterHealthyStatus(bkeCluster, bkev1beta1.Upgrading)
		//}
		//if manageFailedFlag {
		//	markBKEClusterHealthyStatus(bkeCluster, bkev1beta1.Managing)
		//}
	}

	// 首次部署设置为正在部署
	if deployFlag || deployFailedFlag {
		markBKEClusterHealthyStatus(bkeCluster, bkev1beta1.Deploying)
	}
	// 需要升级集群设置为正在升级
	if upgradeFlag || upgradeFailedFlag {
		markBKEClusterHealthyStatus(bkeCluster, bkev1beta1.Upgrading)
	}
	// 需要纳管集群设置为正在纳管
	if manageFlag || manageFailedFlag {
		markBKEClusterHealthyStatus(bkeCluster, bkev1beta1.Managing)
	}

	// 删除集群
	if phaseutil.IsDeleteOrReset(bkeCluster) {
		markBKEClusterHealthyStatus(bkeCluster, bkev1beta1.Deleting)
	}

	if retryFlag || nodeChangeFlag || deployFlag || deployFailedFlag || upgradeFlag || upgradeFailedFlag || manageFailedFlag {
		if err := mergecluster.SyncStatusUntilComplete(r.Client, bkeCluster, patchFunc); err != nil {
			return err
		}
	}

	return nil
}

func markBKEClusterHealthyStatus(bkeCluster *bkev1beta1.BKECluster, status confv1beta1.ClusterHealthState) {
	log.Debugf("标记集群 %s 状态为 %s", utils.ClientObjNS(bkeCluster), status)
	bkeCluster.Status.ClusterHealthState = status
	condition.ConditionMark(bkeCluster, bkev1beta1.ClusterHealthyStateCondition, confv1beta1.ConditionTrue, string(status), "")
}
