package cleanup

import (
	"context"
	"time"

	"github.com/go-logr/logr"
	kyvernov1 "github.com/kyverno/kyverno/api/kyverno/v1"
	kyvernov2 "github.com/kyverno/kyverno/api/kyverno/v2"
	"github.com/kyverno/kyverno/pkg/client/clientset/versioned"
	kyvernov2informers "github.com/kyverno/kyverno/pkg/client/informers/externalversions/kyverno/v2"
	kyvernov2listers "github.com/kyverno/kyverno/pkg/client/listers/kyverno/v2"
	"github.com/kyverno/kyverno/pkg/clients/dclient"
	"github.com/kyverno/kyverno/pkg/config"
	"github.com/kyverno/kyverno/pkg/controllers"
	engineapi "github.com/kyverno/kyverno/pkg/engine/api"
	enginecontext "github.com/kyverno/kyverno/pkg/engine/context"
	"github.com/kyverno/kyverno/pkg/engine/context/loaders"
	"github.com/kyverno/kyverno/pkg/engine/factories"
	"github.com/kyverno/kyverno/pkg/engine/jmespath"
	"github.com/kyverno/kyverno/pkg/event"
	"github.com/kyverno/kyverno/pkg/logging"
	"github.com/kyverno/kyverno/pkg/metrics"
	"github.com/kyverno/kyverno/pkg/toggle"
	"github.com/kyverno/kyverno/pkg/utils/conditions"
	controllerutils "github.com/kyverno/kyverno/pkg/utils/controller"
	"github.com/kyverno/kyverno/pkg/utils/match"
	"go.uber.org/multierr"
	apierrors "k8s.io/apimachinery/pkg/api/errors"
	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
	"k8s.io/apimachinery/pkg/util/sets"
	corev1listers "k8s.io/client-go/listers/core/v1"
	"k8s.io/client-go/util/workqueue"
)

type controller struct {
	// clients
	client        dclient.Interface
	kyvernoClient versioned.Interface

	// listers
	cpolLister kyvernov2listers.ClusterCleanupPolicyLister
	polLister  kyvernov2listers.CleanupPolicyLister
	nsLister   corev1listers.NamespaceLister

	// queue
	queue   workqueue.TypedRateLimitingInterface[any]
	enqueue controllerutils.EnqueueFuncT[kyvernov2.CleanupPolicyInterface]

	// config
	configuration config.Configuration
	cmResolver    engineapi.ConfigmapResolver
	eventGen      event.Interface
	jp            jmespath.Interface
	gctxStore     loaders.Store
}

const (
	maxRetries      = 10
	Workers         = 3
	ControllerName  = "cleanup-controller"
	minRequeueDelay = 1 * time.Second
)

func NewController(
	client dclient.Interface,
	kyvernoClient versioned.Interface,
	cpolInformer kyvernov2informers.ClusterCleanupPolicyInformer,
	polInformer kyvernov2informers.CleanupPolicyInformer,
	nsLister corev1listers.NamespaceLister,
	configuration config.Configuration,
	cmResolver engineapi.ConfigmapResolver,
	jp jmespath.Interface,
	eventGen event.Interface,
	gctxStore loaders.Store,
) controllers.Controller {
	queue := workqueue.NewTypedRateLimitingQueueWithConfig(
		workqueue.DefaultTypedControllerRateLimiter[any](),
		workqueue.TypedRateLimitingQueueConfig[any]{Name: ControllerName},
	)
	keyFunc := controllerutils.MetaNamespaceKeyT[kyvernov2.CleanupPolicyInterface]
	baseEnqueueFunc := controllerutils.LogError(logger, controllerutils.Parse(keyFunc, controllerutils.Queue(queue)))
	enqueueFunc := func(logger logr.Logger, operation, kind string) controllerutils.EnqueueFuncT[kyvernov2.CleanupPolicyInterface] {
		logger = logger.WithValues("kind", kind, "operation", operation)
		return func(obj kyvernov2.CleanupPolicyInterface) error {
			logger := logger.WithValues("name", obj.GetName())
			if obj.GetNamespace() != "" {
				logger = logger.WithValues("namespace", obj.GetNamespace())
			}
			logger.V(2).Info(operation)
			if err := baseEnqueueFunc(obj); err != nil {
				logger.Error(err, "failed to enqueue object", "obj", obj)
				return err
			}
			return nil
		}
	}
	c := &controller{
		client:        client,
		kyvernoClient: kyvernoClient,
		cpolLister:    cpolInformer.Lister(),
		polLister:     polInformer.Lister(),
		nsLister:      nsLister,
		queue:         queue,
		enqueue:       baseEnqueueFunc,
		configuration: configuration,
		cmResolver:    cmResolver,
		eventGen:      eventGen,
		jp:            jp,
		gctxStore:     gctxStore,
	}
	if _, err := controllerutils.AddEventHandlersT(
		cpolInformer.Informer(),
		controllerutils.AddFuncT(logger, enqueueFunc(logger, "added", "ClusterCleanupPolicy")),
		// On update, enqueue only when spec changes; skip status-only updates
		func(oldObj, obj kyvernov2.CleanupPolicyInterface) {
			if oldObj.GetGeneration() != obj.GetGeneration() {
				_ = enqueueFunc(logger, "updated", "ClusterCleanupPolicy")(obj)
			}
		},
		controllerutils.DeleteFuncT(logger, enqueueFunc(logger, "deleted", "ClusterCleanupPolicy")),
	); err != nil {
		logger.Error(err, "failed to register event handlers")
	}
	if _, err := controllerutils.AddEventHandlersT(
		polInformer.Informer(),
		controllerutils.AddFuncT(logger, enqueueFunc(logger, "added", "CleanupPolicy")),
		// On update, enqueue only when spec changes; skip status-only updates
		func(oldObj, obj kyvernov2.CleanupPolicyInterface) {
			if oldObj.GetGeneration() != obj.GetGeneration() {
				_ = enqueueFunc(logger, "updated", "CleanupPolicy")(obj)
			}
		},
		controllerutils.DeleteFuncT(logger, enqueueFunc(logger, "deleted", "CleanupPolicy")),
	); err != nil {
		logger.Error(err, "failed to register event handlers")
	}
	return c
}

func (c *controller) Run(ctx context.Context, workers int) {
	controllerutils.Run(ctx, logger.V(3), ControllerName, time.Second, c.queue, workers, maxRetries, c.reconcile)
}

func (c *controller) getPolicy(namespace, name string) (kyvernov2.CleanupPolicyInterface, error) {
	if namespace == "" {
		cpolicy, err := c.cpolLister.Get(name)
		if err != nil {
			return nil, err
		}
		return cpolicy, nil
	} else {
		policy, err := c.polLister.CleanupPolicies(namespace).Get(name)
		if err != nil {
			return nil, err
		}
		return policy, nil
	}
}

func (c *controller) cleanup(ctx context.Context, logger logr.Logger, policy kyvernov2.CleanupPolicyInterface) error {
	metrics := metrics.GetCleanupMetrics()

	spec := policy.GetSpec()
	kinds := sets.New(spec.MatchResources.GetKinds()...)
	debug := logger.V(4)
	var errs []error
	deleteOptions := metav1.DeleteOptions{
		PropagationPolicy: spec.DeletionPropagationPolicy,
	}
	enginectx := enginecontext.NewContext(c.jp)
	ctxFactory := factories.DefaultContextLoaderFactory(c.cmResolver, factories.WithGlobalContextStore(c.gctxStore))
	loader := ctxFactory(nil, kyvernov1.Rule{})
	if err := loader.Load(
		ctx,
		c.jp,
		c.client,
		nil,
		spec.Context,
		enginectx,
	); err != nil {
		return err
	}
	for kind := range kinds {
		debug := debug.WithValues("kind", kind)
		debug.Info("processing...")
		list, err := c.client.ListResource(ctx, "", kind, policy.GetNamespace(), nil)
		if err != nil {
			debug.Error(err, "failed to list resources")
			if metrics != nil {
				metrics.RecordCleanupFailure(ctx, kind, policy.GetNamespace(), policy, deleteOptions.PropagationPolicy)
			}
			// Check if this is a recoverable error (permission denied, resource not found, etc.)
			if dclient.IsRecoverableError(err) {
				logger.V(2).Info("skipping resource kind due to access restrictions", "kind", kind, "error", err.Error())
			} else {
				// For non-recoverable errors (connectivity issues, etc.), add to errors slice
				errs = append(errs, err)
			}

			continue
		}

		for i := range list.Items {
			resource := list.Items[i]
			namespace := resource.GetNamespace()
			name := resource.GetName()
			debug := debug.WithValues("name", name, "namespace", namespace)
			gvk := resource.GroupVersionKind()
			// Skip if resource matches resourceFilters from config
			if c.configuration.ToFilter(gvk, resource.GetKind(), namespace, name) {
				debug.Info("skipping resource due to resourceFilters in ConfigMap")
				continue
			}
			// check if the resource is owned by Kyverno
			if controllerutils.IsManagedByKyverno(&resource) && toggle.FromContext(ctx).ProtectManagedResources() {
				continue
			}

			var nsLabels map[string]string
			if namespace != "" {
				ns, err := c.nsLister.Get(namespace)
				if err != nil {
					debug.Error(err, "failed to get namespace labels")
					errs = append(errs, err)
				}
				nsLabels = ns.GetLabels()
			}
			// match namespaces
			if err := match.CheckNamespace(policy.GetNamespace(), resource); err != nil {
				debug.Info("resource namespace didn't match policy namespace", "result", err)
				continue
			}
			// match resource with match/exclude clause
			matched := match.CheckMatchesResources(
				resource,
				spec.MatchResources,
				nsLabels,
				// TODO(eddycharly): we don't have user info here, we should check that
				// we don't have user conditions in the policy rule
				kyvernov2.RequestInfo{},
				resource.GroupVersionKind(),
				"",
			)
			if matched != nil {
				debug.Info("resource/match didn't match", "result", matched)
				continue
			}
			if spec.ExcludeResources != nil {
				excluded := match.CheckMatchesResources(
					resource,
					*spec.ExcludeResources,
					nsLabels,
					// TODO(eddycharly): we don't have user info here, we should check that
					// we don't have user conditions in the policy rule
					kyvernov2.RequestInfo{},
					resource.GroupVersionKind(),
					"",
				)
				if excluded == nil {
					debug.Info("resource/exclude matched")
					continue
				} else {
					debug.Info("resource/exclude didn't match", "result", excluded)
				}
			}
			// check conditions
			if spec.Conditions != nil {
				enginectx.Reset()
				if err := enginectx.SetTargetResource(resource.Object); err != nil {
					debug.Error(err, "failed to add resource in context")
					errs = append(errs, err)
					continue
				}
				if err := enginectx.AddNamespace(resource.GetNamespace()); err != nil {
					debug.Error(err, "failed to add namespace in context")
					errs = append(errs, err)
					continue
				}
				if err := enginectx.AddImageInfos(&resource, c.configuration); err != nil {
					debug.Error(err, "failed to add image infos in context")
					errs = append(errs, err)
					continue
				}
				passed, err := conditions.CheckAnyAllConditions(logger, enginectx, *spec.Conditions)
				if err != nil {
					debug.Error(err, "failed to check condition")
					errs = append(errs, err)
					continue
				}
				if !passed {
					debug.Info("conditions did not pass")
					continue
				}
			}
			logger.WithValues("name", name, "namespace", namespace).Info("resource matched, it will be deleted...")
			if err := c.client.DeleteResource(ctx, resource.GetAPIVersion(), resource.GetKind(), namespace, name, false, deleteOptions); err != nil {
				if metrics != nil {
					metrics.RecordCleanupFailure(ctx, kind, namespace, policy, deleteOptions.PropagationPolicy)
				}
				debug.Error(err, "failed to delete resource")
				errs = append(errs, err)
				e := event.NewCleanupPolicyEvent(policy, resource, err)
				c.eventGen.Add(e)
			} else {
				if metrics != nil {
					metrics.RecordDeletedObject(ctx, kind, namespace, policy, deleteOptions.PropagationPolicy)
				}
				debug.Info("resource deleted")
				e := event.NewCleanupPolicyEvent(policy, resource, nil)
				c.eventGen.Add(e)
			}
		}
	}
	return multierr.Combine(errs...)
}

func (c *controller) reconcile(ctx context.Context, logger logr.Logger, key, namespace, name string) error {
	policy, err := c.getPolicy(namespace, name)
	if err != nil {
		if apierrors.IsNotFound(err) {
			return nil
		}
		logger.Error(err, "unable to get the policy from policy informer")
		return err
	}

	var nextExecutionTime *time.Time
	executionTime, err := policy.GetExecutionTime()
	if err != nil {
		logger.Error(err, "failed to get the policy execution time")
		return err
	}
	// In case it is the time to do the cleanup process
	if time.Now().After(*executionTime) {
		err := c.cleanup(ctx, logger, policy)
		if err != nil {
			return err
		}
		if err := c.updateCleanupPolicyStatus(ctx, policy, namespace, time.Now()); err != nil {
			logger.Error(err, "failed to update the cleanup policy status")
			return err
		}
		nextExecutionTime, err = policy.GetNextExecutionTime(time.Now())
		if err != nil {
			logger.Error(err, "failed to get the policy next execution time")
			return err
		}
	} else {
		nextExecutionTime = executionTime
	}

	// calculate the remaining time until deletion and clamp to a sane minimum
	// to avoid immediate hot-loops when nextExecutionTime is in the past or now.
	delay := time.Until(*nextExecutionTime)
	if delay <= 0 {
		delay = minRequeueDelay
	}
	// add the item back to the queue after the delay
	c.queue.AddAfter(key, delay)
	return nil
}

func (c *controller) updateCleanupPolicyStatus(ctx context.Context, policy kyvernov2.CleanupPolicyInterface, namespace string, time time.Time) error {
	switch obj := policy.(type) {
	case *kyvernov2.ClusterCleanupPolicy:
		latest := obj.DeepCopy()
		latest.Status.LastExecutionTime = metav1.NewTime(time)

		new, err := c.kyvernoClient.KyvernoV2().ClusterCleanupPolicies().UpdateStatus(ctx, latest, metav1.UpdateOptions{})
		if err != nil {
			return err
		}
		logging.V(3).Info("updated cluster cleanup policy status", "name", policy.GetName(), "status", new.Status)
	case *kyvernov2.CleanupPolicy:
		latest := obj.DeepCopy()
		latest.Status.LastExecutionTime = metav1.NewTime(time)

		new, err := c.kyvernoClient.KyvernoV2().CleanupPolicies(namespace).UpdateStatus(ctx, latest, metav1.UpdateOptions{})
		if err != nil {
			return err
		}
		logging.V(3).Info("updated cleanup policy status", "name", policy.GetName(), "namespace", policy.GetNamespace(), "status", new.Status)
	}
	return nil
}
