/*
Copyright 2022 The Karmada Authors.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package cluster

import (
	"context"
	"fmt"
	"time"

	apierrors "k8s.io/apimachinery/pkg/api/errors"
	"k8s.io/apimachinery/pkg/fields"
	"k8s.io/apimachinery/pkg/types"
	utilerrors "k8s.io/apimachinery/pkg/util/errors"
	"k8s.io/client-go/tools/record"
	"k8s.io/klog/v2"
	controllerruntime "sigs.k8s.io/controller-runtime"
	"sigs.k8s.io/controller-runtime/pkg/client"
	"sigs.k8s.io/controller-runtime/pkg/controller"
	"sigs.k8s.io/controller-runtime/pkg/reconcile"

	clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1"
	policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1"
	workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2"
	"github.com/karmada-io/karmada/pkg/features"
	"github.com/karmada-io/karmada/pkg/sharedcli/ratelimiterflag"
	"github.com/karmada-io/karmada/pkg/util"
	"github.com/karmada-io/karmada/pkg/util/fedinformer/keys"
	"github.com/karmada-io/karmada/pkg/util/helper"
	"github.com/karmada-io/karmada/pkg/util/indexregistry"
)

// TaintManagerName is the controller name that will be used when reporting events and metrics.
const TaintManagerName = "taint-manager"

// NoExecuteTaintManager listens to Taint/Toleration changes and is responsible for removing objects
// from Clusters tainted with NoExecute Taints.
type NoExecuteTaintManager struct {
	client.Client // used to operate Cluster resources.
	EventRecorder record.EventRecorder

	ClusterTaintEvictionRetryFrequency time.Duration
	ConcurrentReconciles               int
	RateLimiterOptions                 ratelimiterflag.Options

	EnableNoExecuteTaintEviction    bool
	NoExecuteTaintEvictionPurgeMode string

	// EvictionQueueOptions contains options for dynamic rate limiting
	EvictionQueueOptions EvictionQueueOptions

	// bindingEvictionWorker handles ResourceBinding resources
	bindingEvictionWorker util.AsyncWorker
	// clusterBindingEvictionWorker handles the ClusterResourceBinding resource
	clusterBindingEvictionWorker util.AsyncWorker
}

// EvictionWorkerOptions configures a new EvictionWorker instance.
type EvictionWorkerOptions struct {
	// Name is the queue's name used for metrics and logging
	Name string

	// KeyFunc generates keys from objects for queue operations
	KeyFunc util.KeyFunc

	// ReconcileFunc processes keys from the queue
	ReconcileFunc util.ReconcileFunc

	// ResourceKindFunc returns resource metadata for metrics collection
	ResourceKindFunc func(key interface{}) (clusterName, resourceKind string)

	// EvictionQueueOptions configures dynamic rate limiting behavior
	EvictionQueueOptions EvictionQueueOptions

	// RateLimiterOptions configures general rate limiter behavior
	RateLimiterOptions ratelimiterflag.Options
}

// EvictionQueueOptions holds the options that control the behavior of the graceful eviction queue based on the overall health of the clusters.
type EvictionQueueOptions struct {
	// ResourceEvictionRate is the number of resources to be evicted per second in a cluster failover scenario.
	ResourceEvictionRate float32
}

// Reconcile performs a full reconciliation for the object referred to by the Request.
// The Controller will requeue the Request to be processed again if an error is non-nil or
// Result.Requeue is true, otherwise upon completion it will remove the work from the queue.
func (tc *NoExecuteTaintManager) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
	klog.V(4).InfoS("Reconciling cluster for taint manager", "cluster", req.NamespacedName.Name)

	cluster := &clusterv1alpha1.Cluster{}
	if err := tc.Client.Get(ctx, req.NamespacedName, cluster); err != nil {
		// The resource may no longer exist, in which case we stop processing.
		if apierrors.IsNotFound(err) {
			return controllerruntime.Result{}, nil
		}
		return controllerruntime.Result{}, err
	}

	// short path to skip reconciliation if NoExecute taint eviction is disabled or no target taints on Cluster
	if !tc.EnableNoExecuteTaintEviction || !helper.HasNoExecuteTaints(cluster.Spec.Taints) {
		return controllerruntime.Result{}, nil
	}

	return tc.syncCluster(ctx, cluster)
}

func (tc *NoExecuteTaintManager) syncCluster(ctx context.Context, cluster *clusterv1alpha1.Cluster) (reconcile.Result, error) {
	// List all ResourceBindings which are assigned to this cluster.
	rbList := &workv1alpha2.ResourceBindingList{}
	if err := tc.List(ctx, rbList, client.MatchingFieldsSelector{
		Selector: fields.OneTermEqualSelector(indexregistry.ResourceBindingIndexByFieldCluster, cluster.Name),
	}); err != nil {
		klog.ErrorS(err, "Failed to list ResourceBindings", "cluster", cluster.Name)
		return controllerruntime.Result{}, err
	}
	for i := range rbList.Items {
		key, err := keys.FederatedKeyFunc(cluster.Name, &rbList.Items[i])
		if err != nil {
			klog.ErrorS(err, "Failed to generate federated key for ResourceBinding", "gvk", rbList.Items[i].GetObjectKind().GroupVersionKind())
			continue
		}
		tc.bindingEvictionWorker.Add(key)
	}

	// List all ClusterResourceBindings which are assigned to this cluster.
	crbList := &workv1alpha2.ClusterResourceBindingList{}
	if err := tc.List(ctx, crbList, client.MatchingFieldsSelector{
		Selector: fields.OneTermEqualSelector(indexregistry.ClusterResourceBindingIndexByFieldCluster, cluster.Name),
	}); err != nil {
		klog.ErrorS(err, "Failed to list ClusterResourceBindings", "cluster", cluster.Name)
		return controllerruntime.Result{}, err
	}
	for i := range crbList.Items {
		key, err := keys.FederatedKeyFunc(cluster.Name, &crbList.Items[i])
		if err != nil {
			klog.ErrorS(err, "Failed to generate federated key for ClusterResourceBinding", "gvk", crbList.Items[i].GetObjectKind().GroupVersionKind())
			continue
		}
		tc.clusterBindingEvictionWorker.Add(key)
	}
	return controllerruntime.Result{RequeueAfter: tc.ClusterTaintEvictionRetryFrequency}, nil
}

// Start starts an asynchronous loop that handle evictions.
func (tc *NoExecuteTaintManager) Start(ctx context.Context) error {
	//Create an eviction queue that handles ResourceBinding
	//The queue dynamically adjusts the processing rate based on the cluster health
	tc.bindingEvictionWorker = NewEvictionWorker(EvictionWorkerOptions{
		Name:                 "binding-eviction",
		KeyFunc:              nil,
		ReconcileFunc:        tc.syncBindingEviction,
		ResourceKindFunc:     tc.getResourceKindFromKey,
		EvictionQueueOptions: tc.EvictionQueueOptions,
		RateLimiterOptions:   tc.RateLimiterOptions,
	})
	tc.bindingEvictionWorker.Run(ctx, tc.ConcurrentReconciles)

	//Create an eviction queue that handles ClusterResourceBinding
	//The queue dynamically adjusts the processing rate based on the cluster health
	tc.clusterBindingEvictionWorker = NewEvictionWorker(EvictionWorkerOptions{
		Name:                 "cluster-binding-eviction",
		KeyFunc:              nil,
		ReconcileFunc:        tc.syncClusterBindingEviction,
		ResourceKindFunc:     tc.getResourceKindFromKey,
		EvictionQueueOptions: tc.EvictionQueueOptions,
		RateLimiterOptions:   tc.RateLimiterOptions,
	})
	tc.clusterBindingEvictionWorker.Run(ctx, tc.ConcurrentReconciles)

	<-ctx.Done()
	return nil
}

// getResourceKindFromKey extracts cluster name and resource kind from a queue key
// for eviction metrics collection.
func (tc *NoExecuteTaintManager) getResourceKindFromKey(key interface{}) (clusterName, resourceKind string) {
	fedKey, ok := key.(keys.FederatedKey)
	if !ok {
		return "", ""
	}

	// Extract cluster name from the key
	clusterName = fedKey.Cluster

	// Determine resource kind based on the worker that processes this key
	if fedKey.Namespace != "" {
		resourceKind = "ResourceBinding"
	} else {
		resourceKind = "ClusterResourceBinding"
	}

	return clusterName, resourceKind
}

func (tc *NoExecuteTaintManager) syncBindingEviction(key util.QueueKey) error {
	fedKey, ok := key.(keys.FederatedKey)
	if !ok {
		err := fmt.Errorf("invalid key type %T", key)
		klog.ErrorS(err, "Failed to sync binding eviction due to invalid key", "key", key)
		return err
	}
	cluster := fedKey.Cluster
	klog.V(4).InfoS("Begin syncing ResourceBinding for taint eviction", "binding", fedKey.ClusterWideKey.NamespaceKey(), "cluster", cluster)

	binding := &workv1alpha2.ResourceBinding{}
	if err := tc.Client.Get(context.TODO(), types.NamespacedName{Namespace: fedKey.Namespace, Name: fedKey.Name}, binding); err != nil {
		// The resource no longer exist, in which case we stop processing.
		if apierrors.IsNotFound(err) {
			return nil
		}
		return fmt.Errorf("failed to get binding %s: %v", fedKey.NamespaceKey(), err)
	}

	if !binding.DeletionTimestamp.IsZero() || !binding.Spec.TargetContains(cluster) {
		return nil
	}

	needEviction, tolerationTime, err := tc.needEviction(cluster, binding.Annotations)
	if err != nil {
		klog.ErrorS(err, "Failed to check if binding needs eviction", "binding", fedKey.ClusterWideKey.NamespaceKey())
		return err
	}

	// Case 1: Need eviction right now.
	// Case 2: Need eviction after toleration time.
	// Case 3: Tolerate forever, we do nothing.
	if needEviction {
		var purgeMode policyv1alpha1.PurgeMode
		var preservedLabelState map[string]string
		purgeMode = tc.getPurgeMode(binding.Spec.Failover)
		if features.FeatureGate.Enabled(features.StatefulFailoverInjection) {
			if binding.Spec.Failover != nil && binding.Spec.Failover.Cluster != nil {
				preservedLabelState, err = extractStateForEviction(binding.Spec.Failover.Cluster, binding.Status.AggregatedStatus, cluster)
				if err != nil {
					klog.ErrorS(err, "Failed to extract preserved label state for eviction", "binding", binding.Name, "cluster", cluster)
					return err
				}
			}
		}

		// update final result to evict the target cluster
		binding.Spec.GracefulEvictCluster(cluster, workv1alpha2.NewTaskOptions(
			workv1alpha2.WithPurgeMode(purgeMode),
			workv1alpha2.WithProducer(workv1alpha2.EvictionProducerTaintManager),
			workv1alpha2.WithReason(workv1alpha2.EvictionReasonTaintUntolerated),
			workv1alpha2.WithPreservedLabelState(preservedLabelState)))

		if err = tc.Update(context.TODO(), binding); err != nil {
			helper.EmitClusterEvictionEventForResourceBinding(binding, cluster, tc.EventRecorder, err)
			klog.ErrorS(err, "Failed to update binding", "binding", klog.KObj(binding))
			return err
		}
		klog.V(2).InfoS("Evicted cluster from ResourceBinding", "cluster", fedKey.Cluster, "binding", fedKey.ClusterWideKey.NamespaceKey())
	} else if tolerationTime > 0 {
		tc.bindingEvictionWorker.AddAfter(fedKey, tolerationTime)
	}

	return nil
}

func (tc *NoExecuteTaintManager) syncClusterBindingEviction(key util.QueueKey) error {
	fedKey, ok := key.(keys.FederatedKey)
	if !ok {
		err := fmt.Errorf("invalid key type %T", key)
		klog.ErrorS(err, "Failed to sync cluster binding eviction due to invalid key", "key", key)
		return err
	}
	cluster := fedKey.Cluster
	klog.V(4).InfoS("Begin syncing ClusterResourceBinding with taintManager", "binding", fedKey.ClusterWideKey.NamespaceKey(), "cluster", cluster)

	binding := &workv1alpha2.ClusterResourceBinding{}
	if err := tc.Client.Get(context.TODO(), types.NamespacedName{Name: fedKey.Name}, binding); err != nil {
		// The resource no longer exist, in which case we stop processing.
		if apierrors.IsNotFound(err) {
			return nil
		}
		return fmt.Errorf("failed to get cluster binding %s: %v", fedKey.Name, err)
	}

	if !binding.DeletionTimestamp.IsZero() || !binding.Spec.TargetContains(cluster) {
		return nil
	}

	needEviction, tolerationTime, err := tc.needEviction(cluster, binding.Annotations)
	if err != nil {
		klog.ErrorS(err, "Failed to check if cluster binding needs eviction", "binding", binding.Name)
		return err
	}

	// Case 1: Need eviction now.
	// Case 2: Need eviction after toleration time.
	// Case 3: Tolerate forever, we do nothing.
	if needEviction {
		var purgeMode policyv1alpha1.PurgeMode
		var preservedLabelState map[string]string
		purgeMode = tc.getPurgeMode(binding.Spec.Failover)
		if features.FeatureGate.Enabled(features.StatefulFailoverInjection) {
			if binding.Spec.Failover != nil && binding.Spec.Failover.Cluster != nil {
				preservedLabelState, err = extractStateForEviction(binding.Spec.Failover.Cluster, binding.Status.AggregatedStatus, cluster)
				if err != nil {
					klog.ErrorS(err, "Failed to extract preserved label state for eviction", "binding", binding.Name, "cluster", cluster)
					return err
				}
			}
		}

		// update final result to evict the target cluster
		binding.Spec.GracefulEvictCluster(cluster, workv1alpha2.NewTaskOptions(
			workv1alpha2.WithPurgeMode(purgeMode),
			workv1alpha2.WithProducer(workv1alpha2.EvictionProducerTaintManager),
			workv1alpha2.WithReason(workv1alpha2.EvictionReasonTaintUntolerated),
			workv1alpha2.WithPreservedLabelState(preservedLabelState)))
		if err = tc.Update(context.TODO(), binding); err != nil {
			helper.EmitClusterEvictionEventForClusterResourceBinding(binding, cluster, tc.EventRecorder, err)
			klog.ErrorS(err, "Failed to update cluster binding", "binding", binding.Name)
			return err
		}
		klog.V(2).InfoS("Evicted cluster from ClusterResourceBinding", "cluster", fedKey.Cluster, "binding", fedKey.ClusterWideKey.NamespaceKey())
	} else if tolerationTime > 0 {
		tc.clusterBindingEvictionWorker.AddAfter(fedKey, tolerationTime)
		return nil
	}

	return nil
}

// getPurgeMode determines the purge mode based on the binding's failover spec and the taint manager's global config.
func (tc *NoExecuteTaintManager) getPurgeMode(failover *policyv1alpha1.FailoverBehavior) policyv1alpha1.PurgeMode {
	if failover != nil && failover.Cluster != nil {
		if failover.Cluster.PurgeMode == "" {
			return policyv1alpha1.PurgeModeGracefully
		}
		return failover.Cluster.PurgeMode
	}

	switch tc.NoExecuteTaintEvictionPurgeMode {
	case string(policyv1alpha1.PurgeModeDirectly):
		return policyv1alpha1.PurgeModeDirectly
	default:
		return policyv1alpha1.PurgeModeGracefully
	}
}

// needEviction determines if a binding should be evicted from the specified cluster based on taints and tolerations.
// Returns:
//   - needEviction: true if the binding should be evicted (when to evict is indicated by the second return value).
//   - tolerationTime: if needEviction is true, this is the duration to wait before eviction (0 means evict immediately,
//     >0 means evict after this duration).
//     If needEviction is false, a value < 0 indicates the binding should be tolerated indefinitely (no eviction).
//   - error: any error encountered during evaluation.
func (tc *NoExecuteTaintManager) needEviction(clusterName string, annotations map[string]string) (bool, time.Duration, error) {
	return tc.needEvictionWithCurrentTime(clusterName, annotations, time.Now())
}

// needEvictionWithCurrentTime determines if a binding should be evicted from the specified cluster based on taints and tolerations.
// This function accepts a currentTime parameter to enable deterministic testing.
// Returns:
//   - needEviction: true if the binding should be evicted (when to evict is indicated by the second return value).
//   - tolerationTime: if needEviction is true, this is the duration to wait before eviction (0 means evict immediately,
//     >0 means evict after this duration).
//     If needEviction is false, a value < 0 indicates the binding should be tolerated indefinitely (no eviction).
//   - error: any error encountered during evaluation.
func (tc *NoExecuteTaintManager) needEvictionWithCurrentTime(clusterName string, annotations map[string]string, currentTime time.Time) (bool, time.Duration, error) {
	placement, err := helper.GetAppliedPlacement(annotations)
	if err != nil {
		return false, -1, err
	}
	// Under normal circumstances, placement will not be empty,
	// but when the default scheduler is not used, coordination problems may occur.
	// Therefore, in order to make the method more robust, add the empty judgement.
	if placement == nil {
		return false, -1, fmt.Errorf("the applied placement for ResourceBining can not be empty")
	}

	cluster := &clusterv1alpha1.Cluster{}
	if err = tc.Client.Get(context.TODO(), types.NamespacedName{Name: clusterName}, cluster); err != nil {
		// The resource may no longer exist, in which case we stop processing.
		if apierrors.IsNotFound(err) {
			return false, -1, nil
		}
		return false, -1, err
	}

	taints := helper.GetNoExecuteTaints(cluster.Spec.Taints)
	if !tc.EnableNoExecuteTaintEviction || len(taints) == 0 {
		return false, -1, nil
	}
	tolerations := placement.ClusterTolerations

	allTolerated, usedTolerations := helper.GetMatchingTolerations(taints, tolerations)
	if !allTolerated {
		return true, 0, nil
	}

	minTolerationTime := helper.GetMinTolerationTimeWithCurrentTime(taints, usedTolerations, currentTime)
	if minTolerationTime == 0 {
		return true, 0, nil
	}

	return false, minTolerationTime, nil
}

// extractStateForEviction extracts preserved label state from binding
// for the target cluster using state preservation rules.
func extractStateForEviction(failoverBehavior *policyv1alpha1.ClusterFailoverBehavior, aggregatedStatus []workv1alpha2.AggregatedStatusItem, cluster string) (map[string]string, error) {
	// Check if state preservation is configured
	if failoverBehavior.StatePreservation != nil && len(failoverBehavior.StatePreservation.Rules) != 0 {
		targetStatusItem, exist := helper.FindTargetStatusItemByCluster(aggregatedStatus, cluster)
		if !exist || targetStatusItem.Status == nil || targetStatusItem.Status.Raw == nil {
			return nil, fmt.Errorf("the application status has not yet been collected from Cluster(%s)", cluster)
		}
		preservedLabelState, err := helper.BuildPreservedLabelState(failoverBehavior.StatePreservation, targetStatusItem.Status.Raw)
		if err != nil {
			return nil, err
		}
		return preservedLabelState, nil
	}
	return nil, nil
}

// SetupWithManager creates a controller and register to controller manager.
func (tc *NoExecuteTaintManager) SetupWithManager(mgr controllerruntime.Manager) error {
	return utilerrors.NewAggregate([]error{
		controllerruntime.NewControllerManagedBy(mgr).Named(TaintManagerName).For(&clusterv1alpha1.Cluster{}).
			WithOptions(controller.Options{RateLimiter: ratelimiterflag.DefaultControllerRateLimiter[controllerruntime.Request](tc.RateLimiterOptions)}).Complete(tc),
		mgr.Add(tc),
	})
}
