// Copyright 2025 Supabase, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package manager

import (
	"context"
	"fmt"
	"time"

	"google.golang.org/protobuf/encoding/protojson"

	"github.com/multigres/multigres/go/mterrors"
	clustermetadatapb "github.com/multigres/multigres/go/pb/clustermetadata"
	mtrpcpb "github.com/multigres/multigres/go/pb/mtrpc"
	multipoolermanagerdatapb "github.com/multigres/multigres/go/pb/multipoolermanagerdata"
)

// WaitForLSN waits for PostgreSQL server to reach a specific LSN position
func (pm *MultiPoolerManager) WaitForLSN(ctx context.Context, targetLsn string) error {
	if err := pm.checkReady(); err != nil {
		return err
	}
	pm.logger.InfoContext(ctx, "WaitForLSN called", "target_lsn", targetLsn)

	// Check REPLICA guardrails (pooler type and recovery mode)
	if err := pm.checkReplicaGuardrails(ctx); err != nil {
		return err
	}

	// Wait for the standby to replay WAL up to the target LSN
	// We use a polling approach to check if the replay LSN has reached the target
	pm.logger.InfoContext(ctx, "Waiting for standby to reach target LSN", "target_lsn", targetLsn)

	ticker := time.NewTicker(100 * time.Millisecond)
	defer ticker.Stop()

	for {
		select {
		case <-ctx.Done():
			pm.logger.ErrorContext(ctx, "WaitForLSN context cancelled or timed out",
				"target_lsn", targetLsn,
				"error", ctx.Err())
			return mterrors.Wrap(ctx.Err(), "context cancelled or timed out while waiting for LSN")

		case <-ticker.C:
			// Check if the standby has replayed up to the target LSN
			reachedTarget, err := pm.checkLSNReached(ctx, targetLsn)
			if err != nil {
				pm.logger.ErrorContext(ctx, "Failed to check replay LSN", "error", err)
				return err
			}

			if reachedTarget {
				pm.logger.InfoContext(ctx, "Standby reached target LSN", "target_lsn", targetLsn)
				return nil
			}
		}
	}
}

// SetPrimaryConnInfo sets the primary connection info for a standby server
func (pm *MultiPoolerManager) SetPrimaryConnInfo(ctx context.Context, host string, port int32, stopReplicationBefore, startReplicationAfter bool, currentTerm int64, force bool) error {
	if err := pm.checkReady(); err != nil {
		return err
	}

	// Acquire the action lock to ensure only one mutation runs at a time
	ctx, err := pm.actionLock.Acquire(ctx, "SetPrimaryConnInfo")
	if err != nil {
		return err
	}
	defer pm.actionLock.Release(ctx)

	pm.logger.InfoContext(ctx, "SetPrimaryConnInfo called",
		"host", host,
		"port", port,
		"stop_replication_before", stopReplicationBefore,
		"start_replication_after", startReplicationAfter,
		"current_term", currentTerm,
		"force", force)

	// Validate and update consensus term following consensus rules
	if err = pm.validateAndUpdateTerm(ctx, currentTerm, force); err != nil {
		return err
	}

	// Call the locked version that assumes action lock is already held
	return pm.setPrimaryConnInfoLocked(ctx, host, port, stopReplicationBefore, startReplicationAfter)
}

// setPrimaryConnInfoLocked sets the primary connection info for a standby server.
// This function assumes the action lock is already held by the caller.
func (pm *MultiPoolerManager) setPrimaryConnInfoLocked(ctx context.Context, host string, port int32, stopReplicationBefore, startReplicationAfter bool) error {
	if err := AssertActionLockHeld(ctx); err != nil {
		return err
	}

	if err := pm.checkReady(); err != nil {
		return err
	}

	// Guardrail: Check pooler type - only REPLICA poolers can set primary_conninfo
	if err := pm.checkPoolerType(clustermetadatapb.PoolerType_REPLICA, "SetPrimaryConnInfo"); err != nil {
		return err
	}

	// Guardrail: Check if the PostgreSQL instance is in recovery (standby mode)
	isPrimary, err := pm.isPrimary(ctx)
	if err != nil {
		pm.logger.ErrorContext(ctx, "Failed to check if instance is in recovery", "error", err)
		return mterrors.Wrap(err, "failed to check recovery status")
	}

	if isPrimary {
		pm.logger.ErrorContext(ctx, "SetPrimaryConnInfo called on non-standby instance", "service_id", pm.serviceID.String())
		return mterrors.New(mtrpcpb.Code_FAILED_PRECONDITION,
			fmt.Sprintf("operation not allowed: the PostgreSQL instance is not in standby mode (service_id: %s)", pm.serviceID.String()))
	}

	// Optionally stop replication before making changes
	if stopReplicationBefore {
		_, err := pm.pauseReplication(ctx, multipoolermanagerdatapb.ReplicationPauseMode_REPLICATION_PAUSE_MODE_REPLAY_ONLY, false)
		if err != nil {
			return err
		}
	}

	// Build primary_conninfo connection string
	// Format: host=<host> port=<port> user=<user> application_name=<name>
	// The heartbeat_interval is converted to keepalives_interval/keepalives_idle
	pm.mu.Lock()
	database := pm.multipooler.Database
	pm.mu.Unlock()

	// Generate application name using the shared helper
	appName := generateApplicationName(pm.serviceID)
	connInfo := fmt.Sprintf("host=%s port=%d user=%s application_name=%s",
		host, port, database, appName)

	// Set primary_conninfo using ALTER SYSTEM
	if err = pm.setPrimaryConnInfo(ctx, connInfo); err != nil {
		return err
	}

	// Reload PostgreSQL configuration to apply changes
	if err = pm.reloadPostgresConfig(ctx); err != nil {
		return err
	}

	// Optionally start replication after making changes.
	// Note: If replication was already running when calling SetPrimaryConnInfo,
	// even if we don't set startReplicationAfter to true, replication will be running.
	if startReplicationAfter {
		// Reconnect to database after restart
		if err := pm.connectDB(); err != nil {
			pm.logger.ErrorContext(ctx, "Failed to reconnect to database after restart", "error", err)
			return mterrors.Wrap(err, "failed to reconnect to database")
		}

		pm.logger.InfoContext(ctx, "Starting replication after setting primary_conninfo")
		if err := pm.resumeWALReplay(ctx); err != nil {
			return err
		}
	}

	pm.logger.InfoContext(ctx, "SetPrimaryConnInfo completed successfully")
	return nil
}

// StartReplication starts WAL replay on standby (calls pg_wal_replay_resume)
func (pm *MultiPoolerManager) StartReplication(ctx context.Context) error {
	if err := pm.checkReady(); err != nil {
		return err
	}

	// Acquire the action lock to ensure only one mutation runs at a time
	ctx, err := pm.actionLock.Acquire(ctx, "StartReplication")
	if err != nil {
		return err
	}
	defer pm.actionLock.Release(ctx)

	pm.logger.InfoContext(ctx, "StartReplication called")

	// Check REPLICA guardrails (pooler type and recovery mode)
	if err = pm.checkReplicaGuardrails(ctx); err != nil {
		return err
	}

	// Resume WAL replay on the standby
	if err := pm.resumeWALReplay(ctx); err != nil {
		return err
	}

	pm.logger.InfoContext(ctx, "StartReplication completed successfully")
	return nil
}

// StopReplication stops replication based on the specified mode
func (pm *MultiPoolerManager) StopReplication(ctx context.Context, mode multipoolermanagerdatapb.ReplicationPauseMode, wait bool) error {
	if err := pm.checkReady(); err != nil {
		return err
	}

	// Acquire the action lock to ensure only one mutation runs at a time
	ctx, err := pm.actionLock.Acquire(ctx, "StopReplication")
	if err != nil {
		return err
	}
	defer pm.actionLock.Release(ctx)

	pm.logger.InfoContext(ctx, "StopReplication called", "mode", mode, "wait", wait)

	// Check REPLICA guardrails (pooler type and recovery mode)
	if err = pm.checkReplicaGuardrails(ctx); err != nil {
		return err
	}

	_, err = pm.pauseReplication(ctx, mode, wait)
	if err != nil {
		return err
	}

	pm.logger.InfoContext(ctx, "StopReplication completed successfully")
	return nil
}

// StandbyReplicationStatus gets the current replication status of the standby
func (pm *MultiPoolerManager) StandbyReplicationStatus(ctx context.Context) (*multipoolermanagerdatapb.StandbyReplicationStatus, error) {
	if err := pm.checkReady(); err != nil {
		return nil, err
	}
	pm.logger.InfoContext(ctx, "StandbyReplicationStatus called")

	// Check REPLICA guardrails (pooler type and recovery mode)
	if err := pm.checkReplicaGuardrails(ctx); err != nil {
		return nil, err
	}

	// Query all replication status fields
	status, err := pm.queryReplicationStatus(ctx)
	if err != nil {
		pm.logger.ErrorContext(ctx, "Failed to get replication status", "error", err)
		return nil, err
	}

	pm.logger.InfoContext(ctx, "StandbyReplicationStatus completed",
		"last_replay_lsn", status.LastReplayLsn,
		"last_receive_lsn", status.LastReceiveLsn,
		"is_paused", status.IsWalReplayPaused,
		"pause_state", status.WalReplayPauseState,
		"primary_conn_info", status.PrimaryConnInfo)

	return status, nil
}

// Status gets unified status that works for both PRIMARY and REPLICA poolers
// The multipooler returns information based on what type it believes itself to be
func (pm *MultiPoolerManager) Status(ctx context.Context) (*multipoolermanagerdatapb.Status, error) {
	if err := pm.checkReady(); err != nil {
		return nil, err
	}
	pm.logger.InfoContext(ctx, "Status called")

	// Get pooler type from topology
	pm.mu.Lock()
	poolerType := pm.multipooler.MultiPooler.Type
	pm.mu.Unlock()

	// Check actual PostgreSQL role
	isPrimary, err := pm.isPrimary(ctx)
	if err != nil {
		pm.logger.ErrorContext(ctx, "Failed to check PostgreSQL role", "error", err)
		return nil, err
	}

	// Verify consistency between topology and PostgreSQL state
	expectedPrimary := (poolerType == clustermetadatapb.PoolerType_PRIMARY)
	if expectedPrimary != isPrimary {
		pm.logger.ErrorContext(ctx, "Mismatch between pooler type and PostgreSQL role",
			"pooler_type", poolerType,
			"is_primary_in_pg", isPrimary)
		pgRole := "standby"
		if isPrimary {
			pgRole = "primary"
		}
		return nil, mterrors.MT13002(poolerType.String(), pgRole).Err
	}

	poolerStatus := &multipoolermanagerdatapb.Status{
		PoolerType: poolerType,
	}

	// Based on actual PostgreSQL role, populate appropriate status
	if isPrimary {
		// Acting as primary - get primary status
		primaryStatus, err := pm.PrimaryStatus(ctx)
		if err != nil {
			pm.logger.ErrorContext(ctx, "Failed to get primary status", "error", err)
			return nil, err
		}
		poolerStatus.PrimaryStatus = primaryStatus
		pm.logger.InfoContext(ctx, "ReplicationStatus completed (acting as primary)",
			"pooler_type", poolerType,
			"lsn", primaryStatus.Lsn)
	} else {
		// Acting as standby - get replication status
		replStatus, err := pm.StandbyReplicationStatus(ctx)
		if err != nil {
			pm.logger.ErrorContext(ctx, "Failed to get standby replication status", "error", err)
			return nil, err
		}
		poolerStatus.ReplicationStatus = replStatus
		pm.logger.InfoContext(ctx, "ReplicationStatus completed (acting as standby)",
			"pooler_type", poolerType,
			"last_replay_lsn", replStatus.LastReplayLsn)
	}

	return poolerStatus, nil
}

// ResetReplication resets the standby's connection to its primary by clearing primary_conninfo
// and reloading PostgreSQL configuration. This effectively disconnects the replica from the primary
// and prevents it from acknowledging commits, making it unavailable for synchronous replication
// until reconfigured.
func (pm *MultiPoolerManager) ResetReplication(ctx context.Context) error {
	if err := pm.checkReady(); err != nil {
		return err
	}

	// Acquire the action lock to ensure only one mutation runs at a time
	ctx, err := pm.actionLock.Acquire(ctx, "ResetReplication")
	if err != nil {
		return err
	}
	defer pm.actionLock.Release(ctx)

	pm.logger.InfoContext(ctx, "ResetReplication called")

	// Check REPLICA guardrails (pooler type and recovery mode)
	if err = pm.checkReplicaGuardrails(ctx); err != nil {
		return err
	}

	// Pause the receiver (clear primary_conninfo) and wait for disconnect
	_, err = pm.pauseReplication(ctx, multipoolermanagerdatapb.ReplicationPauseMode_REPLICATION_PAUSE_MODE_RECEIVER_ONLY, true /* wait */)
	if err != nil {
		return err
	}

	pm.logger.InfoContext(ctx, "ResetReplication completed successfully - standby disconnected from primary")
	return nil
}

// ConfigureSynchronousReplication configures PostgreSQL synchronous replication settings
func (pm *MultiPoolerManager) ConfigureSynchronousReplication(ctx context.Context, synchronousCommit multipoolermanagerdatapb.SynchronousCommitLevel, synchronousMethod multipoolermanagerdatapb.SynchronousMethod, numSync int32, standbyIDs []*clustermetadatapb.ID, reloadConfig bool) error {
	if err := pm.checkReady(); err != nil {
		return err
	}

	// Acquire the action lock to ensure only one mutation runs at a time
	ctx, err := pm.actionLock.Acquire(ctx, "ConfigureSynchronousReplication")
	if err != nil {
		return err
	}
	defer pm.actionLock.Release(ctx)

	pm.logger.InfoContext(ctx, "ConfigureSynchronousReplication called",
		"synchronous_commit", synchronousCommit,
		"synchronous_method", synchronousMethod,
		"num_sync", numSync,
		"standby_ids", standbyIDs,
		"reload_config", reloadConfig)

	// Validate input parameters
	if err = validateSyncReplicationParams(numSync, standbyIDs); err != nil {
		return err
	}

	// Check PRIMARY guardrails (pooler type and non-recovery mode)
	if err = pm.checkPrimaryGuardrails(ctx); err != nil {
		return err
	}

	// Set synchronous_commit level
	if err := pm.setSynchronousCommit(ctx, synchronousCommit); err != nil {
		return err
	}

	// Build and set synchronous_standby_names
	if err := pm.setSynchronousStandbyNames(ctx, synchronousMethod, numSync, standbyIDs); err != nil {
		return err
	}

	// Reload configuration if requested
	if reloadConfig {
		if err := pm.reloadPostgresConfig(ctx); err != nil {
			return err
		}
	}

	pm.logger.InfoContext(ctx, "ConfigureSynchronousReplication completed successfully")
	return nil
}

// UpdateSynchronousStandbyList updates PostgreSQL synchronous_standby_names by adding,
// removing, or replacing members. It is idempotent and only valid when synchronous
// replication is already configured.
func (pm *MultiPoolerManager) UpdateSynchronousStandbyList(ctx context.Context, operation multipoolermanagerdatapb.StandbyUpdateOperation, standbyIDs []*clustermetadatapb.ID, reloadConfig bool, consensusTerm int64, force bool) error {
	if err := pm.checkReady(); err != nil {
		return err
	}

	ctx, err := pm.actionLock.Acquire(ctx, "UpdateSynchronousStandbyList")
	if err != nil {
		return err
	}
	defer pm.actionLock.Release(ctx)

	pm.logger.InfoContext(ctx, "UpdateSynchronousStandbyList called",
		"operation", operation,
		"standby_ids", standbyIDs,
		"reload_config", reloadConfig,
		"consensus_term", consensusTerm,
		"force", force)

	// === Validation ===
	// TODO: We need to validate consensus term here.
	// We should check if the request is a valid term.
	// If it's a newer term and probably we need to demote
	// ourself. But details yet to be implemented

	// Validate operation
	if operation == multipoolermanagerdatapb.StandbyUpdateOperation_STANDBY_UPDATE_OPERATION_UNSPECIFIED {
		return mterrors.New(mtrpcpb.Code_INVALID_ARGUMENT, "operation must be specified")
	}

	// Validate standby IDs using the shared validation function
	if err = validateStandbyIDs(standbyIDs); err != nil {
		return err
	}

	// Check PRIMARY guardrails (pooler type and non-recovery mode)
	if err = pm.checkPrimaryGuardrails(ctx); err != nil {
		return err
	}

	// === Parse Current Configuration ===

	// Get current synchronous replication configuration
	syncConfig, err := pm.getSynchronousReplicationConfig(ctx)
	if err != nil {
		return err
	}

	// Check if synchronous replication is configured
	if len(syncConfig.StandbyIds) == 0 {
		pm.logger.ErrorContext(ctx, "UpdateSynchronousStandbyList requires synchronous replication to be configured")
		return mterrors.New(mtrpcpb.Code_FAILED_PRECONDITION,
			"synchronous replication is not configured - use ConfigureSynchronousReplication first")
	}

	// Build the current value string for comparison
	currentValue, err := buildSynchronousStandbyNamesValue(syncConfig.SynchronousMethod, syncConfig.NumSync, syncConfig.StandbyIds)
	if err != nil {
		return err
	}

	pm.logger.InfoContext(ctx, "Current synchronous_standby_names", "value", currentValue)

	// === Apply Operation ===

	// Apply the requested operation using the current standby list
	var updatedStandbys []*clustermetadatapb.ID
	switch operation {
	case multipoolermanagerdatapb.StandbyUpdateOperation_STANDBY_UPDATE_OPERATION_ADD:
		updatedStandbys = applyAddOperation(syncConfig.StandbyIds, standbyIDs)

	case multipoolermanagerdatapb.StandbyUpdateOperation_STANDBY_UPDATE_OPERATION_REMOVE:
		updatedStandbys = applyRemoveOperation(syncConfig.StandbyIds, standbyIDs)

	case multipoolermanagerdatapb.StandbyUpdateOperation_STANDBY_UPDATE_OPERATION_REPLACE:
		updatedStandbys = applyReplaceOperation(standbyIDs)

	default:
		return mterrors.New(mtrpcpb.Code_INVALID_ARGUMENT,
			fmt.Sprintf("unsupported operation: %s", operation.String()))
	}

	// Validate that the final list is not empty
	if len(updatedStandbys) == 0 {
		return mterrors.New(mtrpcpb.Code_INVALID_ARGUMENT,
			"resulting standby list cannot be empty after operation")
	}

	// === Build and Apply New Configuration ===

	// Build new synchronous_standby_names value using shared helper
	newValue, err := buildSynchronousStandbyNamesValue(syncConfig.SynchronousMethod, syncConfig.NumSync, updatedStandbys)
	if err != nil {
		return err
	}

	// Check if there are any changes (idempotent)
	if currentValue == newValue {
		pm.logger.InfoContext(ctx, "No changes needed - configuration already matches desired state")
		return nil
	}

	pm.logger.InfoContext(ctx, "Updating synchronous_standby_names",
		"old_value", currentValue,
		"new_value", newValue)

	// Apply the setting using shared helper
	if err = applySynchronousStandbyNames(ctx, pm.db, pm.logger, newValue); err != nil {
		return err
	}

	// Reload configuration if requested
	if reloadConfig {
		if err := pm.reloadPostgresConfig(ctx); err != nil {
			return err
		}
	}

	pm.logger.InfoContext(ctx, "UpdateSynchronousStandbyList completed successfully")
	return nil
}

// PrimaryStatus gets the status of the leader server
func (pm *MultiPoolerManager) PrimaryStatus(ctx context.Context) (*multipoolermanagerdatapb.PrimaryStatus, error) {
	if err := pm.checkReady(); err != nil {
		return nil, err
	}

	pm.logger.InfoContext(ctx, "PrimaryStatus called")

	// Check PRIMARY guardrails (pooler type and non-recovery mode)
	if err := pm.checkPrimaryGuardrails(ctx); err != nil {
		return nil, err
	}

	status := &multipoolermanagerdatapb.PrimaryStatus{}

	// Get current LSN
	// Note: checkPrimaryGuardrails already validated this is a PRIMARY
	lsn, err := pm.getPrimaryLSN(ctx)
	if err != nil {
		return nil, err
	}
	status.Lsn = lsn
	// If we got to this point, checkPrimaryGuardrails passed, so this is a
	// PRIMARY server from the PG perspective and should be ready to serve traffic.
	status.Ready = true

	// Get connected followers from pg_stat_replication
	followers, err := pm.getConnectedFollowerIDs(ctx)
	if err != nil {
		return nil, err
	}
	status.ConnectedFollowers = followers

	// Get synchronous replication configuration
	syncConfig, err := pm.getSynchronousReplicationConfig(ctx)
	if err != nil {
		return nil, err
	}
	status.SyncReplicationConfig = syncConfig

	pm.logger.InfoContext(ctx, "PrimaryStatus completed", "lsn", lsn, "followers_count", len(followers))
	return status, nil
}

// PrimaryPosition gets the current LSN position of the leader
func (pm *MultiPoolerManager) PrimaryPosition(ctx context.Context) (string, error) {
	if err := pm.checkReady(); err != nil {
		return "", err
	}

	pm.logger.InfoContext(ctx, "PrimaryPosition called")

	// Check PRIMARY guardrails (pooler type and non-recovery mode)
	if err := pm.checkPrimaryGuardrails(ctx); err != nil {
		return "", err
	}

	// Get current primary LSN position
	return pm.getPrimaryLSN(ctx)
}

// StopReplicationAndGetStatus stops PostgreSQL replication (replay and/or receiver based on mode) and returns the status
func (pm *MultiPoolerManager) StopReplicationAndGetStatus(ctx context.Context, mode multipoolermanagerdatapb.ReplicationPauseMode, wait bool) (*multipoolermanagerdatapb.StandbyReplicationStatus, error) {
	if err := pm.checkReady(); err != nil {
		return nil, err
	}

	// Acquire the action lock to ensure only one mutation runs at a time
	ctx, err := pm.actionLock.Acquire(ctx, "StopReplicationAndGetStatus")
	if err != nil {
		return nil, err
	}
	defer pm.actionLock.Release(ctx)

	pm.logger.InfoContext(ctx, "StopReplicationAndGetStatus called", "mode", mode, "wait", wait)

	// Check REPLICA guardrails (pooler type and recovery mode)
	if err = pm.checkReplicaGuardrails(ctx); err != nil {
		return nil, err
	}

	status, err := pm.pauseReplication(ctx, mode, wait)
	if err != nil {
		return nil, err
	}

	pm.logger.InfoContext(ctx, "StopReplicationAndGetStatus completed",
		"last_replay_lsn", status.LastReplayLsn,
		"last_receive_lsn", status.LastReceiveLsn,
		"is_paused", status.IsWalReplayPaused,
		"pause_state", status.WalReplayPauseState,
		"primary_conn_info", status.PrimaryConnInfo)

	return status, nil
}

// ChangeType changes the pooler type (PRIMARY/REPLICA)
func (pm *MultiPoolerManager) ChangeType(ctx context.Context, poolerType string) error {
	if err := pm.checkReady(); err != nil {
		return err
	}

	// Acquire the action lock to ensure only one mutation runs at a time
	ctx, err := pm.actionLock.Acquire(ctx, "ChangeType")
	if err != nil {
		return err
	}
	defer pm.actionLock.Release(ctx)

	// Validate pooler type
	var newType clustermetadatapb.PoolerType
	// TODO: For now allow to change type to PRIMARY, this is to make it easier
	// to perform tests while we are still developing HA. Once, we have multiorch
	// fully implemented, we shouldn't allow to change the type to Primary.
	// This would happen organically as part of Promote workflow.
	switch poolerType {
	case "PRIMARY":
		newType = clustermetadatapb.PoolerType_PRIMARY
	case "REPLICA":
		newType = clustermetadatapb.PoolerType_REPLICA
	default:
		return mterrors.New(mtrpcpb.Code_INVALID_ARGUMENT,
			fmt.Sprintf("invalid pooler type: %s, must be PRIMARY or REPLICA", poolerType))
	}

	pm.logger.InfoContext(ctx, "ChangeType called", "pooler_type", poolerType, "service_id", pm.serviceID.String())
	// Update the multipooler record in topology
	updatedMultipooler, err := pm.topoClient.UpdateMultiPoolerFields(ctx, pm.serviceID, func(mp *clustermetadatapb.MultiPooler) error {
		mp.Type = newType
		return nil
	})
	if err != nil {
		pm.logger.ErrorContext(ctx, "Failed to update pooler type in topology", "error", err, "service_id", pm.serviceID.String())
		return mterrors.Wrap(err, "failed to update pooler type in topology")
	}

	pm.mu.Lock()
	defer pm.mu.Unlock()
	pm.multipooler.MultiPooler = updatedMultipooler
	pm.updateCachedMultipooler()
	pm.logger.InfoContext(ctx, "Pooler type updated successfully", "new_type", poolerType, "service_id", pm.serviceID.String())

	return nil
}

// State returns the current manager status and error information
func (pm *MultiPoolerManager) State(ctx context.Context) (*multipoolermanagerdatapb.StateResponse, error) {
	pm.mu.Lock()
	defer pm.mu.Unlock()

	state := string(pm.state)
	var errorMessage string
	if pm.stateError != nil {
		errorMessage = pm.stateError.Error()
	}

	return &multipoolermanagerdatapb.StateResponse{
		State:        state,
		ErrorMessage: errorMessage,
	}, nil
}

// GetFollowers gets the list of follower servers with detailed replication status
func (pm *MultiPoolerManager) GetFollowers(ctx context.Context) (*multipoolermanagerdatapb.GetFollowersResponse, error) {
	if err := pm.checkReady(); err != nil {
		return nil, err
	}

	pm.logger.InfoContext(ctx, "GetFollowers called")

	// Check PRIMARY guardrails (only primary can have followers)
	if err := pm.checkPrimaryGuardrails(ctx); err != nil {
		return nil, err
	}

	// Get current synchronous replication configuration
	syncConfig, err := pm.getSynchronousReplicationConfig(ctx)
	if err != nil {
		return nil, err
	}

	// Query pg_stat_replication for all connected followers with full details
	connectedMap, err := pm.queryFollowerReplicationStats(ctx)
	if err != nil {
		return nil, err
	}

	// Build the response with all configured standbys
	followers := make([]*multipoolermanagerdatapb.FollowerInfo, 0, len(syncConfig.StandbyIds))
	for _, standbyID := range syncConfig.StandbyIds {
		appName := generateApplicationName(standbyID)

		followerInfo := &multipoolermanagerdatapb.FollowerInfo{
			FollowerId:      standbyID,
			ApplicationName: appName,
		}

		// Check if this standby is currently connected
		if stats, connected := connectedMap[appName]; connected {
			followerInfo.IsConnected = true
			followerInfo.ReplicationStats = stats
		} else {
			followerInfo.IsConnected = false
			// ReplicationStats remains nil for disconnected followers
		}

		followers = append(followers, followerInfo)
	}

	pm.logger.InfoContext(ctx, "GetFollowers completed",
		"total_configured", len(followers),
		"connected_count", len(connectedMap))

	return &multipoolermanagerdatapb.GetFollowersResponse{
		Followers:  followers,
		SyncConfig: syncConfig,
	}, nil
}

// Demote demotes the current primary server
// This can be called for any of the following use cases:
// - By orchestrator when fixing a broken shard.
// - When performing a Planned demotion.
// - When receiving a SIGTERM and the pooler needs to shutdown.
func (pm *MultiPoolerManager) Demote(ctx context.Context, consensusTerm int64, drainTimeout time.Duration, force bool) (*multipoolermanagerdatapb.DemoteResponse, error) {
	if err := pm.checkReady(); err != nil {
		return nil, err
	}

	// Acquire the action lock to ensure only one mutation runs at a time
	ctx, err := pm.actionLock.Acquire(ctx, "Demote")
	if err != nil {
		return nil, err
	}
	defer pm.actionLock.Release(ctx)

	pm.logger.InfoContext(ctx, "Demote called",
		"consensus_term", consensusTerm,
		"drain_timeout", drainTimeout,
		"force", force)

	// === Validation & State Check ===

	// Demote is an operational cleanup, not a leadership change.
	// Accept if term >= currentTerm to ensure the request isn't stale.
	// Equal or higher terms are safe.
	// Note: we still update the term, as this may arrive after a leader
	// appointment that this (now old) primary missed due to a network partition.
	if err = pm.validateAndUpdateTerm(ctx, consensusTerm, force); err != nil {
		return nil, err
	}

	// Guard rail: Demote can only be called on a PRIMARY
	if err := pm.checkPrimaryGuardrails(ctx); err != nil {
		return nil, err
	}

	// Check current demotion state
	state, err := pm.checkDemotionState(ctx)
	if err != nil {
		return nil, err
	}

	// If everything is already complete, return early (fully idempotent)
	if state.isServingReadOnly && state.isReplicaInTopology && state.isReadOnly {
		pm.logger.InfoContext(ctx, "Demotion already complete (idempotent)",
			"lsn", state.finalLSN)
		return &multipoolermanagerdatapb.DemoteResponse{
			WasAlreadyDemoted:     true,
			ConsensusTerm:         consensusTerm,
			LsnPosition:           state.finalLSN,
			ConnectionsTerminated: 0,
		}, nil
	}

	// Transition to Read-Only Serving
	// For now, this is not that useful as we have to restart
	// the server anyways to make it a standby.
	// However, we are setting the hooks to make sure that
	// we can make the primary readonly first,
	// drain write connections and then transition it
	// as a replica without restarting postgres
	if err := pm.setServingReadOnly(ctx, state); err != nil {
		return nil, err
	}

	// Drain & Checkpoint (Parallel)

	if err := pm.drainAndCheckpoint(ctx, drainTimeout); err != nil {
		return nil, err
	}

	// Terminate Remaining Write Connections

	connectionsTerminated, err := pm.terminateWriteConnections(ctx)
	if err != nil {
		// Log but don't fail - connections will eventually timeout
		pm.logger.WarnContext(ctx, "Failed to terminate write connections", "error", err)
	}

	// Capture State & Make PostgreSQL Read-Only
	finalLSN, err := pm.getPrimaryLSN(ctx)
	if err != nil {
		pm.logger.ErrorContext(ctx, "Failed to capture final LSN", "error", err)
		return nil, err
	}

	if err := pm.restartPostgresAsStandby(ctx, state); err != nil {
		return nil, err
	}

	// Reset Synchronous Replication Configuration
	// Now that the server is read-only, it's safe to clear sync replication settings
	// This ensures we don't have a window where writes could be accepted with incorrect replication config
	if err := pm.resetSynchronousReplication(ctx); err != nil {
		// Log but don't fail - this is cleanup
		pm.logger.WarnContext(ctx, "Failed to reset synchronous replication configuration", "error", err)
	}

	// Update Topology

	if err := pm.updateTopologyAfterDemotion(ctx, state); err != nil {
		return nil, err
	}

	pm.logger.InfoContext(ctx, "Demote completed successfully",
		"final_lsn", finalLSN,
		"consensus_term", consensusTerm,
		"connections_terminated", connectionsTerminated)

	return &multipoolermanagerdatapb.DemoteResponse{
		WasAlreadyDemoted:     false,
		ConsensusTerm:         consensusTerm,
		LsnPosition:           finalLSN,
		ConnectionsTerminated: connectionsTerminated,
	}, nil
}

// UndoDemote undoes a demotion
func (pm *MultiPoolerManager) UndoDemote(ctx context.Context) error {
	if err := pm.checkReady(); err != nil {
		return err
	}

	// Acquire the action lock to ensure only one mutation runs at a time
	ctx, err := pm.actionLock.Acquire(ctx, "UndoDemote")
	if err != nil {
		return err
	}
	defer pm.actionLock.Release(ctx)

	pm.logger.InfoContext(ctx, "UndoDemote called")
	return mterrors.New(mtrpcpb.Code_UNIMPLEMENTED, "method UndoDemote not implemented")
}

// Promote promotes a standby to primary
// This is called during the Propagate stage of generalized consensus to safely
// transition a standby to primary and reconfigure replication.
// This operation is fully idempotent - it checks what steps are already complete
// and only executes the missing steps.
func (pm *MultiPoolerManager) Promote(ctx context.Context, consensusTerm int64, expectedLSN string, syncReplicationConfig *multipoolermanagerdatapb.ConfigureSynchronousReplicationRequest, force bool) (*multipoolermanagerdatapb.PromoteResponse, error) {
	if err := pm.checkReady(); err != nil {
		return nil, err
	}

	// Acquire the action lock to ensure only one mutation runs at a time
	ctx, err := pm.actionLock.Acquire(ctx, "Promote")
	if err != nil {
		return nil, err
	}
	defer pm.actionLock.Release(ctx)

	pm.logger.InfoContext(ctx, "Promote called",
		"consensus_term", consensusTerm,
		"expected_lsn", expectedLSN,
		"force", force)

	// Validation & Readiness

	// Validate term - strict equality, no automatic updates
	if err = pm.validateTermExactMatch(ctx, consensusTerm, force); err != nil {
		return nil, err
	}

	// Check current promotion state to determine what needs to be done
	state, err := pm.checkPromotionState(ctx, syncReplicationConfig)
	if err != nil {
		return nil, err
	}

	// Guard rail: Check topology type and validate state consistency
	// If topology is PRIMARY, verify everything is in expected state (idempotency check)
	// If topology is REPLICA, proceed with promotion
	if state.isPrimaryInTopology {
		// Topology shows PRIMARY - validate that everything is consistent
		pm.logger.InfoContext(ctx, "Promote called but topology already shows PRIMARY - validating state consistency")

		// Check if everything is in expected state
		if state.isPrimaryInPostgres && state.syncReplicationMatches {
			// Everything is consistent and complete - idempotent success
			pm.logger.InfoContext(ctx, "Promotion already complete and consistent (idempotent)",
				"lsn", state.currentLSN)
			return &multipoolermanagerdatapb.PromoteResponse{
				LsnPosition:       state.currentLSN,
				WasAlreadyPrimary: true,
				ConsensusTerm:     consensusTerm,
			}, nil
		}

		// Inconsistent state detected
		pm.logger.ErrorContext(ctx, "Inconsistent state detected - topology is PRIMARY but state is incomplete",
			"is_primary_in_postgres", state.isPrimaryInPostgres,
			"sync_replication_matches", state.syncReplicationMatches,
			"force", force)

		if !force {
			// Without force flag, require manual intervention
			return nil, mterrors.New(mtrpcpb.Code_FAILED_PRECONDITION,
				fmt.Sprintf("inconsistent state: topology is PRIMARY but PostgreSQL state doesn't match (pg_primary=%v, sync_matches=%v). Manual intervention required or use force=true.",
					state.isPrimaryInPostgres, state.syncReplicationMatches))
		}

		// With force flag, attempt to fix the inconsistency by completing missing steps
		pm.logger.WarnContext(ctx, "Force flag set - attempting to fix inconsistent state by completing missing steps")
		// Fall through to execute missing promotion steps below
	}

	// If PostgreSQL is not promoted yet, validate expected LSN before promotion
	if !state.isPrimaryInPostgres {
		if err := pm.validateExpectedLSN(ctx, expectedLSN); err != nil {
			return nil, err
		}
	}

	// Execute missing steps

	// Promote PostgreSQL if needed
	if err := pm.promoteStandbyToPrimary(ctx, state); err != nil {
		return nil, err
	}

	// Update topology if needed
	if err := pm.updateTopologyAfterPromotion(ctx, state); err != nil {
		return nil, err
	}

	// Configure sync replication if needed
	if err := pm.configureReplicationAfterPromotion(ctx, state, syncReplicationConfig); err != nil {
		return nil, err
	}

	// TODO: Populate consensus metadata tables.

	// Get final LSN position
	finalLSN, err := pm.getPrimaryLSN(ctx)
	if err != nil {
		pm.logger.ErrorContext(ctx, "Failed to get final LSN", "error", err)
		return nil, err
	}

	pm.logger.InfoContext(ctx, "Promote completed successfully",
		"final_lsn", finalLSN,
		"consensus_term", consensusTerm,
		"was_already_primary", state.isPrimaryInPostgres)

	return &multipoolermanagerdatapb.PromoteResponse{
		LsnPosition:       finalLSN,
		WasAlreadyPrimary: state.isPrimaryInPostgres && state.isPrimaryInTopology && state.syncReplicationMatches,
		ConsensusTerm:     consensusTerm,
	}, nil
}

// SetTerm sets the consensus term information to local disk
func (pm *MultiPoolerManager) SetTerm(ctx context.Context, term *multipoolermanagerdatapb.ConsensusTerm) error {
	if err := pm.checkReady(); err != nil {
		return err
	}

	// Acquire the action lock to ensure only one mutation runs at a time
	ctx, err := pm.actionLock.Acquire(ctx, "SetTerm")
	if err != nil {
		return err
	}
	defer pm.actionLock.Release(ctx)

	pm.logger.InfoContext(ctx, "SetTerm called", "current_term", term.GetTermNumber())

	// Initialize consensus state if needed
	pm.mu.Lock()
	if pm.consensusState == nil {
		pm.consensusState = NewConsensusState(pm.config.PoolerDir, pm.serviceID)
	}
	cs := pm.consensusState
	pm.mu.Unlock()

	// Save to disk and update memory atomically
	if err := cs.SetTermDirectly(ctx, term); err != nil {
		pm.logger.ErrorContext(ctx, "Failed to save consensus term", "error", err)
		return mterrors.Wrap(err, "failed to set consensus term")
	}

	pm.logger.InfoContext(ctx, "SetTerm completed successfully", "current_term", term.GetTermNumber())
	return nil
}

// CreateDurabilityPolicy creates a new durability policy in the local database
// Used by MultiOrch to initialize policies via gRPC instead of direct database connection
func (pm *MultiPoolerManager) CreateDurabilityPolicy(ctx context.Context, req *multipoolermanagerdatapb.CreateDurabilityPolicyRequest) (*multipoolermanagerdatapb.CreateDurabilityPolicyResponse, error) {
	if err := pm.checkReady(); err != nil {
		return nil, err
	}

	pm.logger.InfoContext(ctx, "CreateDurabilityPolicy called", "policy_name", req.PolicyName)

	// Validate inputs
	if req.PolicyName == "" {
		return &multipoolermanagerdatapb.CreateDurabilityPolicyResponse{
			Success:      false,
			ErrorMessage: "policy_name is required",
		}, nil
	}

	if req.QuorumRule == nil {
		return &multipoolermanagerdatapb.CreateDurabilityPolicyResponse{
			Success:      false,
			ErrorMessage: "quorum_rule is required",
		}, nil
	}

	// Check that we have a database connection
	if pm.db == nil {
		return &multipoolermanagerdatapb.CreateDurabilityPolicyResponse{
			Success:      false,
			ErrorMessage: "database connection not available",
		}, nil
	}

	// Marshal the quorum rule to JSON using protojson
	marshaler := protojson.MarshalOptions{
		UseEnumNumbers: true, // Encode enums as numbers, not strings
	}
	quorumRuleJSON, err := marshaler.Marshal(req.QuorumRule)
	if err != nil {
		return &multipoolermanagerdatapb.CreateDurabilityPolicyResponse{
			Success:      false,
			ErrorMessage: fmt.Sprintf("failed to marshal quorum rule: %v", err),
		}, nil
	}

	// Insert the policy into the durability_policy table using helper function
	if err := InsertDurabilityPolicy(ctx, pm.db, req.PolicyName, quorumRuleJSON); err != nil {
		pm.logger.ErrorContext(ctx, "Failed to insert durability policy", "error", err)
		return &multipoolermanagerdatapb.CreateDurabilityPolicyResponse{
			Success:      false,
			ErrorMessage: err.Error(),
		}, nil
	}

	pm.logger.InfoContext(ctx, "Successfully created durability policy",
		"policy_name", req.PolicyName,
		"quorum_type", req.QuorumRule.QuorumType,
		"required_count", req.QuorumRule.RequiredCount)

	return &multipoolermanagerdatapb.CreateDurabilityPolicyResponse{
		Success: true,
	}, nil
}
