// Copyright 2021 - 2025 Crunchy Data Solutions, Inc.
//
// SPDX-License-Identifier: Apache-2.0

package postgrescluster

import (
	"context"
	"errors"
	"fmt"
	"io"
	"time"

	appsv1 "k8s.io/api/apps/v1"
	batchv1 "k8s.io/api/batch/v1"
	corev1 "k8s.io/api/core/v1"
	policyv1 "k8s.io/api/policy/v1"
	rbacv1 "k8s.io/api/rbac/v1"
	"k8s.io/apimachinery/pkg/api/equality"
	"k8s.io/apimachinery/pkg/api/meta"
	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
	"k8s.io/apimachinery/pkg/util/validation/field"
	"k8s.io/client-go/tools/record"
	"sigs.k8s.io/controller-runtime/pkg/builder"
	"sigs.k8s.io/controller-runtime/pkg/client"
	"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
	"sigs.k8s.io/controller-runtime/pkg/manager"
	"sigs.k8s.io/controller-runtime/pkg/reconcile"

	"github.com/crunchydata/postgres-operator/internal/collector"
	"github.com/crunchydata/postgres-operator/internal/config"
	"github.com/crunchydata/postgres-operator/internal/controller/runtime"
	"github.com/crunchydata/postgres-operator/internal/initialize"
	"github.com/crunchydata/postgres-operator/internal/kubernetes"
	"github.com/crunchydata/postgres-operator/internal/logging"
	"github.com/crunchydata/postgres-operator/internal/naming"
	"github.com/crunchydata/postgres-operator/internal/pki"
	"github.com/crunchydata/postgres-operator/internal/postgres"
	"github.com/crunchydata/postgres-operator/internal/tracing"
	"github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1"
)

const controllerName = naming.ControllerPostgresCluster

// Reconciler holds resources for the PostgresCluster reconciler
type Reconciler struct {
	PodExec func(
		ctx context.Context, namespace, pod, container string,
		stdin io.Reader, stdout, stderr io.Writer, command ...string,
	) error

	Reader interface {
		Get(context.Context, client.ObjectKey, client.Object, ...client.GetOption) error
		List(context.Context, client.ObjectList, ...client.ListOption) error
	}
	Writer interface {
		Delete(context.Context, client.Object, ...client.DeleteOption) error
		DeleteAllOf(context.Context, client.Object, ...client.DeleteAllOfOption) error
		Patch(context.Context, client.Object, client.Patch, ...client.PatchOption) error
		Update(context.Context, client.Object, ...client.UpdateOption) error
	}
	StatusWriter interface {
		Patch(context.Context, client.Object, client.Patch, ...client.SubResourcePatchOption) error
	}

	Recorder record.EventRecorder
}

// +kubebuilder:rbac:groups="",resources="events",verbs={create,patch}
// +kubebuilder:rbac:groups="postgres-operator.crunchydata.com",resources="postgresclusters/status",verbs={patch}

func (r *Reconciler) Reconcile(
	ctx context.Context, cluster *v1beta1.PostgresCluster) (reconcile.Result, error,
) {
	ctx, span := tracing.Start(ctx, "reconcile-postgrescluster")
	log := logging.FromContext(ctx)
	defer span.End()

	// Set any defaults that may not have been stored in the API. No DeepCopy
	// is necessary because controller-runtime makes a copy before returning
	// from its cache.
	cluster.Default()

	// TODO(openshift): Separate this into more specific detections elsewhere.
	if cluster.Spec.OpenShift == nil {
		cluster.Spec.OpenShift = initialize.Bool(kubernetes.IsOpenShift(ctx))
	}

	// Keep a copy of cluster prior to any manipulations.
	before := cluster.DeepCopy()

	// NOTE(cbandy): When a namespace is deleted, objects owned by a
	// PostgresCluster may be deleted before the PostgresCluster is deleted.
	// When this happens, any attempt to reconcile those objects is rejected
	// as Forbidden: "unable to create new content in namespace … because it is
	// being terminated".

	// Check for and handle deletion of cluster. Return early if it is being
	// deleted or there was an error.
	if result, err := r.handleDelete(ctx, cluster); err != nil {
		log.Error(err, "deleting")
		return runtime.ErrorWithBackoff(tracing.Escape(span, err))

	} else if result != nil {
		if log := log.V(1); log.Enabled() {
			log.Info("deleting", "result", fmt.Sprintf("%+v", *result))
		}
		return *result, nil
	}

	// Perform initial validation on a cluster
	// TODO: Move this to a defaulting (mutating admission) webhook
	// to leverage regular validation.

	// verify all needed image values are defined
	if err := config.VerifyImageValues(cluster); err != nil {
		// warning event with missing image information
		r.Recorder.Event(cluster, corev1.EventTypeWarning, "MissingRequiredImage",
			err.Error())
		// specifically allow reconciliation if the cluster is shutdown to
		// facilitate upgrades, otherwise return
		if !initialize.FromPointer(cluster.Spec.Shutdown) {
			return runtime.ErrorWithBackoff(tracing.Escape(span, err))
		}
	}
	// Issue Warning Event if postgres version is EOL according to PostgreSQL:
	// https://www.postgresql.org/support/versioning/
	currentTime := time.Now()
	if postgres.ReleaseIsFinal(cluster.Spec.PostgresVersion, currentTime) {
		r.Recorder.Eventf(cluster, corev1.EventTypeWarning, "EndOfLifePostgresVersion",
			"The last minor version of Postgres %[1]v has been released."+
				" PG %[1]v will no longer receive updates. We recommend upgrading."+
				" See https://www.postgresql.org/support/versioning",
			cluster.Spec.PostgresVersion)
	}

	if cluster.Spec.Standby != nil &&
		cluster.Spec.Standby.Enabled &&
		cluster.Spec.Standby.Host == "" &&
		cluster.Spec.Standby.RepoName == "" {
		// When a standby cluster is requested but a repoName or host is not provided
		// the cluster will be created as a non-standby. Reject any clusters with
		// this configuration and provide an event
		path := field.NewPath("spec", "standby")
		err := field.Invalid(path, cluster.Name, "Standby requires a host or repoName to be enabled")
		r.Recorder.Event(cluster, corev1.EventTypeWarning, "InvalidStandbyConfiguration", err.Error())
		return runtime.ErrorWithBackoff(tracing.Escape(span, err))
	}

	var (
		clusterConfigMap             *corev1.ConfigMap
		clusterReplicationSecret     *corev1.Secret
		clusterPodService            *corev1.Service
		clusterVolumes               []*corev1.PersistentVolumeClaim
		instanceServiceAccount       *corev1.ServiceAccount
		instances                    *observedInstances
		patroniLeaderService         *corev1.Service
		primaryCertificate           *corev1.SecretProjection
		primaryService               *corev1.Service
		replicaService               *corev1.Service
		rootCA                       *pki.RootCertificateAuthority
		monitoringSecret             *corev1.Secret
		exporterQueriesConfig        *corev1.ConfigMap
		exporterWebConfig            *corev1.ConfigMap
		err                          error
		backupsSpecFound             bool
		backupsReconciliationAllowed bool
		dedicatedSnapshotPVC         *corev1.PersistentVolumeClaim
	)

	patchClusterStatus := func() error {
		if !equality.Semantic.DeepEqual(before.Status, cluster.Status) {
			// NOTE(cbandy): Kubernetes prior to v1.16.10 and v1.17.6 does not track
			// managed fields on the status subresource: https://issue.k8s.io/88901
			if err := r.StatusWriter.Patch(ctx, cluster, client.MergeFrom(before)); err != nil {
				log.Error(err, "patching cluster status")
				return err
			}
			log.V(1).Info("patched cluster status")
		}
		return nil
	}

	// if the cluster is paused, set a condition and return
	if cluster.Spec.Paused != nil && *cluster.Spec.Paused {
		meta.SetStatusCondition(&cluster.Status.Conditions, metav1.Condition{
			Type:    v1beta1.PostgresClusterProgressing,
			Status:  metav1.ConditionFalse,
			Reason:  "Paused",
			Message: "No spec changes will be applied and no other statuses will be updated.",

			ObservedGeneration: cluster.GetGeneration(),
		})
		return runtime.ErrorWithBackoff(tracing.Escape(span, patchClusterStatus()))
	} else {
		meta.RemoveStatusCondition(&cluster.Status.Conditions, v1beta1.PostgresClusterProgressing)
	}

	if err == nil {
		backupsSpecFound, backupsReconciliationAllowed, err = r.BackupsEnabled(ctx, cluster)

		// If we cannot reconcile because the backup reconciliation is paused, set a condition and exit
		if !backupsReconciliationAllowed {
			meta.SetStatusCondition(&cluster.Status.Conditions, metav1.Condition{
				Type:   v1beta1.PostgresClusterProgressing,
				Status: metav1.ConditionFalse,
				Reason: "Paused",
				Message: "Reconciliation is paused: please fill in spec.backups " +
					"or add the postgres-operator.crunchydata.com/authorizeBackupRemoval " +
					"annotation to authorize backup removal.",

				ObservedGeneration: cluster.GetGeneration(),
			})
			return runtime.ErrorWithBackoff(tracing.Escape(span, patchClusterStatus()))
		} else {
			meta.RemoveStatusCondition(&cluster.Status.Conditions, v1beta1.PostgresClusterProgressing)
		}
	}

	pgHBAs := r.generatePostgresHBAs(ctx, cluster)
	pgParameters := r.generatePostgresParameters(ctx, cluster, backupsSpecFound)

	otelConfig := collector.NewConfigForPostgresPod(ctx, cluster, pgParameters)

	if err == nil {
		rootCA, err = r.reconcileRootCertificate(ctx, cluster)
	}

	if err == nil {
		// Since any existing data directories must be moved prior to bootstrapping the
		// cluster, further reconciliation will not occur until the directory move Jobs
		// (if configured) have completed. Func reconcileDirMoveJobs() will therefore
		// return a bool indicating that the controller should return early while any
		// required Jobs are running, after which it will indicate that an early
		// return is no longer needed, and reconciliation can proceed normally.
		returnEarly, err := r.reconcileDirMoveJobs(ctx, cluster)
		if err != nil || returnEarly {
			return runtime.ErrorWithBackoff(tracing.Escape(span,
				errors.Join(err, patchClusterStatus())))
		}
	}
	if err == nil {
		clusterVolumes, err = r.observePersistentVolumeClaims(ctx, cluster)
	}
	if err == nil {
		clusterVolumes, err = r.configureExistingPVCs(ctx, cluster, clusterVolumes)
	}
	if err == nil {
		instances, err = r.observeInstances(ctx, cluster)
	}

	result := reconcile.Result{}

	if err == nil {
		var requeue time.Duration
		if requeue, err = r.reconcilePatroniStatus(ctx, cluster, instances); err == nil && requeue > 0 {
			result.RequeueAfter = requeue
		}
	}
	if err == nil {
		err = r.reconcilePatroniSwitchover(ctx, cluster, instances)
	}
	// reconcile the Pod service before reconciling any data source in case it is necessary
	// to start Pods during data source reconciliation that require network connections (e.g.
	// if it is necessary to start a dedicated repo host to bootstrap a new cluster using its
	// own existing backups).
	if err == nil {
		clusterPodService, err = r.reconcileClusterPodService(ctx, cluster)
	}
	// reconcile the RBAC resources before reconciling any data source in case
	// restore/move Job pods require the ServiceAccount to access any data source.
	// e.g., we are restoring from an S3 source using an IAM for access
	// - https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts-technical-overview.html
	if err == nil {
		instanceServiceAccount, err = r.reconcileRBACResources(ctx, cluster)
	}
	// First handle reconciling any data source configured for the PostgresCluster.  This includes
	// reconciling the data source defined to bootstrap a new cluster, as well as a reconciling
	// a data source to perform restore in-place and re-bootstrap the cluster.
	if err == nil {
		// Since the PostgreSQL data source needs to be populated prior to bootstrapping the
		// cluster, further reconciliation will not occur until the data source (if configured) is
		// initialized.  Func reconcileDataSource() will therefore return a bool indicating that
		// the controller should return early while data initialization is in progress, after
		// which it will indicate that an early return is no longer needed, and reconciliation
		// can proceed normally.
		returnEarly, err := r.reconcileDataSource(ctx, cluster, instances, clusterVolumes, rootCA, backupsSpecFound)
		if err != nil || returnEarly {
			return runtime.ErrorWithBackoff(tracing.Escape(span, errors.Join(err, patchClusterStatus())))
		}
	}
	if err == nil {
		clusterConfigMap, err = r.reconcileClusterConfigMap(ctx, cluster, pgHBAs, pgParameters)
	}
	if err == nil {
		clusterReplicationSecret, err = r.reconcileReplicationSecret(ctx, cluster, rootCA)
	}
	if err == nil {
		patroniLeaderService, err = r.reconcilePatroniLeaderLease(ctx, cluster)
	}
	if err == nil {
		primaryService, err = r.reconcileClusterPrimaryService(ctx, cluster, patroniLeaderService)
	}
	if err == nil {
		replicaService, err = r.reconcileClusterReplicaService(ctx, cluster)
	}
	if err == nil {
		primaryCertificate, err = r.reconcileClusterCertificate(ctx, rootCA, cluster, primaryService, replicaService)
	}
	if err == nil {
		err = r.reconcilePatroniDistributedConfiguration(ctx, cluster)
	}
	if err == nil {
		err = r.reconcilePatroniDynamicConfiguration(ctx, cluster, instances, pgHBAs, pgParameters)
	}
	if err == nil {
		monitoringSecret, err = r.reconcileMonitoringSecret(ctx, cluster)
	}
	if err == nil {
		exporterQueriesConfig, err = r.reconcileExporterQueriesConfig(ctx, cluster)
	}
	if err == nil {
		exporterWebConfig, err = r.reconcileExporterWebConfig(ctx, cluster)
	}
	if err == nil {
		err = r.reconcileInstanceSets(
			ctx, cluster, clusterConfigMap, clusterReplicationSecret, rootCA,
			clusterPodService, instanceServiceAccount, instances, patroniLeaderService,
			primaryCertificate, clusterVolumes, exporterQueriesConfig, exporterWebConfig,
			backupsSpecFound, otelConfig, pgParameters,
		)
	}

	if err == nil {
		err = r.reconcilePostgresDatabases(ctx, cluster, instances)
	}
	if err == nil {
		err = r.reconcilePostgresUsers(ctx, cluster, instances)
	}

	if err == nil {
		var next reconcile.Result
		if next, err = r.reconcilePGBackRest(ctx, cluster,
			instances, rootCA, backupsSpecFound); err == nil && !next.IsZero() {
			result.Requeue = result.Requeue || next.Requeue
			if next.RequeueAfter > 0 {
				result.RequeueAfter = next.RequeueAfter
			}
		}
	}
	if err == nil {
		dedicatedSnapshotPVC, err = r.reconcileDedicatedSnapshotVolume(ctx, cluster, clusterVolumes)
	}
	if err == nil {
		err = r.reconcileVolumeSnapshots(ctx, cluster, dedicatedSnapshotPVC)
	}
	if err == nil {
		err = r.reconcilePGBouncer(ctx, cluster, instances, primaryCertificate, rootCA)
	}
	if err == nil {
		err = r.reconcilePGMonitorExporter(ctx, cluster, instances, monitoringSecret)
	}
	if err == nil {
		err = r.reconcileDatabaseInitSQL(ctx, cluster, instances)
	}
	if err == nil {
		err = r.reconcilePGAdmin(ctx, cluster)
	}
	if err == nil {
		// This is after [Reconciler.rolloutInstances] to ensure that recreating
		// Pods takes precedence.
		err = r.handlePatroniRestarts(ctx, cluster, instances)
	}

	// at this point everything reconciled successfully, and we can update the
	// observedGeneration
	cluster.Status.ObservedGeneration = cluster.GetGeneration()

	log.V(1).Info("reconciled cluster")

	return result, tracing.Escape(span, errors.Join(err, patchClusterStatus()))
}

// deleteControlled safely deletes object when it is controlled by cluster.
func (r *Reconciler) deleteControlled(
	ctx context.Context, cluster *v1beta1.PostgresCluster, object client.Object,
) error {
	if metav1.IsControlledBy(object, cluster) {
		uid := object.GetUID()
		version := object.GetResourceVersion()
		exactly := client.Preconditions{UID: &uid, ResourceVersion: &version}

		return r.Writer.Delete(ctx, object, exactly)
	}

	return nil
}

// The owner reference created by controllerutil.SetControllerReference blocks
// deletion. The OwnerReferencesPermissionEnforcement plugin requires that the
// creator of such a reference have either "delete" permission on the owner or
// "update" permission on the owner's "finalizers" subresource.
// - https://docs.k8s.io/reference/access-authn-authz/admission-controllers/
// +kubebuilder:rbac:groups="postgres-operator.crunchydata.com",resources="postgresclusters/finalizers",verbs={update}

// setControllerReference sets owner as a Controller OwnerReference on controlled.
// Only one OwnerReference can be a controller, so it returns an error if another
// is already set.
func (r *Reconciler) setControllerReference(
	owner *v1beta1.PostgresCluster, controlled client.Object,
) error {
	return controllerutil.SetControllerReference(owner, controlled, runtime.Scheme)
}

// setOwnerReference sets an OwnerReference on the object without setting the
// owner as a controller. This allows for multiple OwnerReferences on an object.
func (r *Reconciler) setOwnerReference(
	owner *v1beta1.PostgresCluster, controlled client.Object,
) error {
	return controllerutil.SetOwnerReference(owner, controlled, runtime.Scheme)
}

// +kubebuilder:rbac:groups="",resources="configmaps",verbs={get,list,watch}
// +kubebuilder:rbac:groups="",resources="endpoints",verbs={get,list,watch}
// +kubebuilder:rbac:groups="",resources="persistentvolumeclaims",verbs={get,list,watch}
// +kubebuilder:rbac:groups="",resources="secrets",verbs={get,list,watch}
// +kubebuilder:rbac:groups="",resources="services",verbs={get,list,watch}
// +kubebuilder:rbac:groups="",resources="serviceaccounts",verbs={get,list,watch}
// +kubebuilder:rbac:groups="apps",resources="deployments",verbs={get,list,watch}
// +kubebuilder:rbac:groups="apps",resources="statefulsets",verbs={get,list,watch}
// +kubebuilder:rbac:groups="batch",resources="jobs",verbs={get,list,watch}
// +kubebuilder:rbac:groups="rbac.authorization.k8s.io",resources="roles",verbs={get,list,watch}
// +kubebuilder:rbac:groups="rbac.authorization.k8s.io",resources="rolebindings",verbs={get,list,watch}
// +kubebuilder:rbac:groups="batch",resources="cronjobs",verbs={get,list,watch}
// +kubebuilder:rbac:groups="policy",resources="poddisruptionbudgets",verbs={get,list,watch}
// +kubebuilder:rbac:groups="postgres-operator.crunchydata.com",resources="postgresclusters",verbs={get,list,watch}

// ManagedReconciler creates a [Reconciler] and adds it to m.
func ManagedReconciler(m manager.Manager) error {
	exec, err := runtime.NewPodExecutor(m.GetConfig())
	kubernetes := client.WithFieldOwner(m.GetClient(), naming.ControllerPostgresCluster)
	recorder := m.GetEventRecorderFor(naming.ControllerPostgresCluster)

	reconciler := &Reconciler{
		PodExec:      exec,
		Reader:       kubernetes,
		Recorder:     recorder,
		StatusWriter: kubernetes.Status(),
		Writer:       kubernetes,
	}

	return errors.Join(err, builder.ControllerManagedBy(m).
		For(&v1beta1.PostgresCluster{}).
		Owns(&corev1.ConfigMap{}).
		Owns(&corev1.Endpoints{}).
		Owns(&corev1.PersistentVolumeClaim{}).
		Owns(&corev1.Secret{}).
		Owns(&corev1.Service{}).
		Owns(&corev1.ServiceAccount{}).
		Owns(&appsv1.Deployment{}).
		Owns(&appsv1.StatefulSet{}).
		Owns(&batchv1.Job{}).
		Owns(&rbacv1.Role{}).
		Owns(&rbacv1.RoleBinding{}).
		Owns(&batchv1.CronJob{}).
		Owns(&policyv1.PodDisruptionBudget{}).
		Watches(&corev1.Pod{}, reconciler.watchPods()).
		Watches(&appsv1.StatefulSet{},
			reconciler.controllerRefHandlerFuncs()). // watch all StatefulSets
		Complete(reconcile.AsReconciler(kubernetes, reconciler)))
}
