use actix_web::{
    HttpRequest, HttpResponse, Responder, ResponseError, delete, get,
    http::{StatusCode, header::ContentType},
    post,
    web::{Data, Json, Path},
};
use etl_postgres::replication::{
    TableLookupError, get_table_names_from_table_ids, health, lag, state,
};
use etl_postgres::types::TableId;
use serde::{Deserialize, Serialize};
use sqlx::PgPool;
use std::ops::DerefMut;
use thiserror::Error;
use utoipa::ToSchema;

use crate::configs::encryption::EncryptionKey;
use crate::configs::pipeline::{FullApiPipelineConfig, PartialApiPipelineConfig};
use crate::db;
use crate::db::destinations::{DestinationsDbError, destination_exists};
use crate::db::images::ImagesDbError;
use crate::db::pipelines::{MAX_PIPELINES_PER_TENANT, PipelinesDbError, read_pipeline_components};
use crate::db::replicators::ReplicatorsDbError;
use crate::db::sources::{SourcesDbError, source_exists};
use crate::feature_flags::get_max_pipelines_per_tenant;
use crate::k8s::core::{
    create_k8s_object_prefix, create_or_update_pipeline_resources_in_k8s,
    delete_pipeline_resources_in_k8s,
};
use crate::k8s::{K8sClient, K8sError};
use crate::routes::{
    ErrorMessage, TenantIdError, connect_to_source_database_with_defaults, extract_tenant_id,
};
use crate::utils::parse_docker_image_tag;
use crate::{config::ApiConfig, k8s::PodStatus};

#[derive(Debug, Error)]
pub enum PipelineError {
    #[error("The pipeline with id {0} was not found")]
    PipelineNotFound(i64),

    #[error("The source with id {0} was not found")]
    SourceNotFound(i64),

    #[error("The destination with id {0} was not found")]
    DestinationNotFound(i64),

    #[error("The replicator with pipeline id {0} was not found")]
    ReplicatorNotFound(i64),

    #[error("The image with replicator id {0} was not found")]
    ImageNotFound(i64),

    #[error("No default image was found")]
    NoDefaultImageFound,

    #[error(transparent)]
    TenantId(#[from] TenantIdError),

    #[error("The table replication state is not present")]
    MissingTableReplicationState,

    #[error("The table replication state is not valid: {0}")]
    InvalidTableReplicationState(serde_json::Error),

    #[error("The table state is not rollbackable: {0}")]
    NotRollbackable(String),

    #[error("The ETL table state has not been initialized first")]
    EtlStateNotInitialized,

    #[error("invalid destination config")]
    InvalidConfig(#[from] serde_json::Error),

    #[error("A K8s error occurred: {0}")]
    K8s(#[from] K8sError),

    #[error(transparent)]
    SourcesDb(#[from] SourcesDbError),

    #[error(transparent)]
    DestinationsDb(#[from] DestinationsDbError),

    #[error(transparent)]
    PipelinesDb(PipelinesDbError),

    #[error(transparent)]
    ReplicatorsDb(#[from] ReplicatorsDbError),

    #[error(transparent)]
    ImagesDb(#[from] ImagesDbError),

    #[error("The trusted root certs config was not found")]
    TrustedRootCertsConfigMissing,

    #[error("A pipeline already exists for this source and destination combination")]
    DuplicatePipeline,

    #[error("The specified image id {0} does not match the default image id")]
    ImageIdNotDefault(i64),

    #[error("The maximum number of pipelines ({limit}) has been reached for this project")]
    PipelineLimitReached { limit: i64 },

    #[error("There was an error while looking up table information in the source database: {0}")]
    TableLookup(#[from] TableLookupError),

    #[error("Database error: {0}")]
    Database(#[from] sqlx::Error),

    #[error("Could not load app environment")]
    MissingEnvironment,
}

impl From<PipelinesDbError> for PipelineError {
    fn from(e: PipelinesDbError) -> Self {
        match e {
            PipelinesDbError::Database(err)
                if db::utils::is_unique_constraint_violation_error(&err) =>
            {
                Self::DuplicatePipeline
            }
            e => Self::PipelinesDb(e),
        }
    }
}

impl PipelineError {
    fn to_message(&self) -> String {
        match self {
            // Do not expose internal database details in error messages
            PipelineError::SourcesDb(SourcesDbError::Database(_))
            | PipelineError::DestinationsDb(DestinationsDbError::Database(_))
            | PipelineError::PipelinesDb(PipelinesDbError::Database(_))
            | PipelineError::ReplicatorsDb(ReplicatorsDbError::Database(_))
            | PipelineError::ImagesDb(ImagesDbError::Database(_))
            | PipelineError::Database(_) => "internal server error".to_string(),
            // Every other message is ok, as they do not divulge sensitive information
            e => e.to_string(),
        }
    }
}

impl ResponseError for PipelineError {
    fn status_code(&self) -> StatusCode {
        match self {
            PipelineError::InvalidConfig(_)
            | PipelineError::ReplicatorNotFound(_)
            | PipelineError::ImageNotFound(_)
            | PipelineError::NoDefaultImageFound
            | PipelineError::SourcesDb(_)
            | PipelineError::DestinationsDb(_)
            | PipelineError::PipelinesDb(_)
            | PipelineError::ReplicatorsDb(_)
            | PipelineError::ImagesDb(_)
            | PipelineError::K8s(_)
            | PipelineError::TrustedRootCertsConfigMissing
            | PipelineError::Database(_)
            | PipelineError::TableLookup(_)
            | PipelineError::InvalidTableReplicationState(_)
            | PipelineError::MissingEnvironment
            | PipelineError::MissingTableReplicationState => StatusCode::INTERNAL_SERVER_ERROR,
            PipelineError::PipelineNotFound(_)
            | PipelineError::EtlStateNotInitialized
            | PipelineError::ImageIdNotDefault(_)
            | PipelineError::DestinationNotFound(_)
            | PipelineError::SourceNotFound(_) => StatusCode::NOT_FOUND,
            PipelineError::TenantId(_) | PipelineError::NotRollbackable(_) => {
                StatusCode::BAD_REQUEST
            }
            PipelineError::DuplicatePipeline => StatusCode::CONFLICT,
            PipelineError::PipelineLimitReached { .. } => StatusCode::UNPROCESSABLE_ENTITY,
        }
    }

    fn error_response(&self) -> HttpResponse {
        let error_message = ErrorMessage {
            error: self.to_message(),
        };
        let body =
            serde_json::to_string(&error_message).expect("failed to serialize error message");
        HttpResponse::build(self.status_code())
            .insert_header(ContentType::json())
            .body(body)
    }
}

#[derive(Debug, Serialize, Deserialize, ToSchema)]
pub struct CreatePipelineRequest {
    #[schema(example = 1, required = true)]
    pub source_id: i64,
    #[schema(example = 1, required = true)]
    pub destination_id: i64,
    #[schema(required = true)]
    pub config: FullApiPipelineConfig,
}

#[derive(Debug, Serialize, Deserialize, ToSchema)]
pub struct CreatePipelineResponse {
    #[schema(example = 1)]
    pub id: i64,
}

#[derive(Debug, Serialize, Deserialize, ToSchema)]
pub struct UpdatePipelineRequest {
    #[schema(example = 1, required = true)]
    pub source_id: i64,
    #[schema(example = 1, required = true)]
    pub destination_id: i64,
    #[schema(required = true)]
    pub config: FullApiPipelineConfig,
}

#[derive(Debug, Serialize, Deserialize, ToSchema)]
pub struct UpdatePipelineConfigRequest {
    #[schema(required = true)]
    pub config: PartialApiPipelineConfig,
}

#[derive(Debug, Serialize, Deserialize, ToSchema)]
pub struct UpdatePipelineConfigResponse {
    pub config: FullApiPipelineConfig,
}

#[derive(Debug, Serialize, Deserialize, ToSchema)]
pub struct ReadPipelineResponse {
    #[schema(example = 1)]
    pub id: i64,
    #[schema(example = "abczjjlmfsijwrlnwatw")]
    pub tenant_id: String,
    #[schema(example = 1)]
    pub source_id: i64,
    #[schema(example = "My Postgres Source")]
    pub source_name: String,
    #[schema(example = 1)]
    pub destination_id: i64,
    #[schema(example = "My BigQuery Destination")]
    pub destination_name: String,
    #[schema(example = 1)]
    pub replicator_id: i64,
    pub config: FullApiPipelineConfig,
}

#[derive(Debug, Serialize, Deserialize, ToSchema)]
pub struct ReadPipelinesResponse {
    pub pipelines: Vec<ReadPipelineResponse>,
}

#[derive(Debug, Serialize, Deserialize, ToSchema)]
pub struct UpdatePipelineVersionRequest {
    #[schema(example = 1, required = true)]
    pub version_id: i64,
}

#[derive(Debug, Serialize, Deserialize, ToSchema)]
pub struct GetPipelineStatusResponse {
    #[schema(example = 1)]
    pub pipeline_id: i64,
    pub status: PipelineStatus,
}

/// UI-friendly representation of table replication state
#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
#[serde(rename_all = "snake_case")]
#[serde(tag = "name")]
pub enum SimpleTableReplicationState {
    Queued,
    CopyingTable,
    CopiedTable,
    FollowingWal,
    Error {
        reason: String,
        #[serde(skip_serializing_if = "Option::is_none")]
        solution: Option<String>,
        retry_policy: SimpleRetryPolicy,
    },
}

/// Simplified retry policy for UI display
#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
#[serde(rename_all = "snake_case")]
#[serde(tag = "policy")]
pub enum SimpleRetryPolicy {
    NoRetry,
    ManualRetry,
    TimedRetry {
        #[schema(example = "2023-12-25T12:00:00Z")]
        next_retry: String,
    },
}

impl From<state::TableReplicationState> for SimpleTableReplicationState {
    fn from(state: state::TableReplicationState) -> Self {
        match state {
            state::TableReplicationState::Init => SimpleTableReplicationState::Queued,
            state::TableReplicationState::DataSync => SimpleTableReplicationState::CopyingTable,
            state::TableReplicationState::FinishedCopy => SimpleTableReplicationState::CopiedTable,
            state::TableReplicationState::SyncDone { .. } => {
                SimpleTableReplicationState::FollowingWal
            }
            state::TableReplicationState::Ready => SimpleTableReplicationState::FollowingWal,
            state::TableReplicationState::Errored {
                reason,
                solution,
                retry_policy,
            } => {
                let simple_retry_policy = match retry_policy {
                    state::RetryPolicy::NoRetry => SimpleRetryPolicy::NoRetry,
                    state::RetryPolicy::ManualRetry => SimpleRetryPolicy::ManualRetry,
                    state::RetryPolicy::TimedRetry { next_retry } => {
                        SimpleRetryPolicy::TimedRetry {
                            next_retry: next_retry.to_rfc3339(),
                        }
                    }
                };

                SimpleTableReplicationState::Error {
                    reason,
                    solution,
                    retry_policy: simple_retry_policy,
                }
            }
        }
    }
}

#[derive(Debug, Serialize, Deserialize, ToSchema)]
pub struct TableReplicationStatus {
    #[schema(example = 1)]
    pub table_id: u32,
    #[schema(example = "public.users")]
    pub table_name: String,
    pub state: SimpleTableReplicationState,
    #[serde(skip_serializing_if = "Option::is_none")]
    #[schema(nullable = true)]
    pub table_sync_lag: Option<SlotLagMetricsResponse>,
}

/// Lag metrics reported for replication slots.
#[derive(Debug, Serialize, Deserialize, ToSchema)]
pub struct SlotLagMetricsResponse {
    /// Bytes between the current WAL location and the slot restart LSN.
    #[schema(example = 1024)]
    pub restart_lsn_bytes: i64,
    /// Bytes between the current WAL location and the confirmed flush LSN.
    #[schema(example = 2048)]
    pub confirmed_flush_lsn_bytes: i64,
    /// How many bytes of WAL are still safe to build up before the limit of the slot is reached.
    #[schema(example = 8192)]
    pub safe_wal_size_bytes: i64,
    /// Write lag expressed in milliseconds.
    #[serde(skip_serializing_if = "Option::is_none")]
    #[schema(example = 1500, nullable = true)]
    pub write_lag: Option<i64>,
    /// Flush lag expressed in milliseconds.
    #[serde(skip_serializing_if = "Option::is_none")]
    #[schema(example = 1200, nullable = true)]
    pub flush_lag: Option<i64>,
}

impl From<lag::SlotLagMetrics> for SlotLagMetricsResponse {
    fn from(metrics: lag::SlotLagMetrics) -> Self {
        Self {
            restart_lsn_bytes: metrics.restart_lsn_bytes,
            confirmed_flush_lsn_bytes: metrics.confirmed_flush_lsn_bytes,
            safe_wal_size_bytes: metrics.safe_wal_size_bytes,
            write_lag: metrics.write_lag_ms,
            flush_lag: metrics.flush_lag_ms,
        }
    }
}

#[derive(Debug, Serialize, Deserialize, ToSchema)]
pub struct GetPipelineReplicationStatusResponse {
    #[schema(example = 1)]
    pub pipeline_id: i64,
    #[serde(skip_serializing_if = "Option::is_none")]
    #[schema(nullable = true)]
    pub apply_lag: Option<SlotLagMetricsResponse>,
    pub table_statuses: Vec<TableReplicationStatus>,
}

#[derive(Debug, Copy, Clone, Serialize, Deserialize, ToSchema)]
#[serde(rename_all = "snake_case")]
pub enum RollbackType {
    Individual,
    Full,
}

#[derive(Debug, Serialize, Deserialize, ToSchema)]
pub struct RollbackTableStateRequest {
    #[schema(example = 1)]
    pub table_id: u32,
    pub rollback_type: RollbackType,
}

#[derive(Debug, Serialize, Deserialize, ToSchema)]
pub struct RollbackTableStateResponse {
    #[schema(example = 1)]
    pub pipeline_id: i64,
    #[schema(example = 1)]
    pub table_id: u32,
    pub new_state: SimpleTableReplicationState,
}

#[derive(Debug, Serialize, Deserialize, ToSchema)]
#[serde(rename_all = "snake_case")]
#[serde(tag = "name")]
pub enum PipelineStatus {
    Stopped,
    Starting,
    Started,
    Stopping,
    Failed,
    Unknown,
}

impl From<PodStatus> for PipelineStatus {
    fn from(value: PodStatus) -> Self {
        match value {
            PodStatus::Stopped => PipelineStatus::Stopped,
            PodStatus::Starting => PipelineStatus::Starting,
            PodStatus::Started => PipelineStatus::Started,
            PodStatus::Stopping => PipelineStatus::Stopping,
            PodStatus::Failed => PipelineStatus::Failed,
            PodStatus::Unknown => PipelineStatus::Unknown,
        }
    }
}

#[derive(Debug, Serialize, Deserialize, ToSchema)]
pub struct PipelineVersion {
    #[schema(example = 1)]
    pub id: i64,
    #[schema(example = "1.2.3")]
    pub name: String,
}

#[derive(Debug, Serialize, Deserialize, ToSchema)]
pub struct GetPipelineVersionResponse {
    #[schema(example = 1)]
    pub pipeline_id: i64,
    pub version: PipelineVersion,
    #[serde(skip_serializing_if = "Option::is_none")]
    pub new_version: Option<PipelineVersion>,
}

#[utoipa::path(
    summary = "Create a pipeline",
    description = "Creates a pipeline linking a source to a destination.",
    request_body = CreatePipelineRequest,
    params(
        ("tenant_id" = String, Header, description = "Tenant ID used to scope the request")
    ),
    responses(
        (status = 200, description = "Pipeline created successfully", body = CreatePipelineResponse),
        (status = 400, description = "Bad request", body = ErrorMessage),
        (status = 500, description = "Internal server error", body = ErrorMessage),
    ),
    tag = "Pipelines"
)]
#[post("/pipelines")]
pub async fn create_pipeline(
    req: HttpRequest,
    pool: Data<PgPool>,
    pipeline: Json<CreatePipelineRequest>,
    feature_flags_client: Option<Data<configcat::Client>>,
) -> Result<impl Responder, PipelineError> {
    let tenant_id = extract_tenant_id(&req)?;
    let pipeline = pipeline.into_inner();

    let mut txn = pool.begin().await?;
    if !source_exists(txn.deref_mut(), tenant_id, pipeline.source_id).await? {
        return Err(PipelineError::SourceNotFound(pipeline.source_id));
    }

    if !destination_exists(txn.deref_mut(), tenant_id, pipeline.destination_id).await? {
        return Err(PipelineError::DestinationNotFound(pipeline.destination_id));
    }

    let max_pipelines = get_max_pipelines_per_tenant(
        feature_flags_client.as_ref(),
        tenant_id,
        MAX_PIPELINES_PER_TENANT,
    )
    .await;
    let pipeline_count =
        db::pipelines::count_pipelines_for_tenant(txn.deref_mut(), tenant_id).await?;
    if pipeline_count >= max_pipelines {
        return Err(PipelineError::PipelineLimitReached {
            limit: max_pipelines,
        });
    }

    let image = db::images::read_default_image(txn.deref_mut())
        .await?
        .ok_or(PipelineError::NoDefaultImageFound)?;

    let id = db::pipelines::create_pipeline(
        &mut txn,
        tenant_id,
        pipeline.source_id,
        pipeline.destination_id,
        image.id,
        pipeline.config,
    )
    .await?;
    txn.commit().await?;

    let response = CreatePipelineResponse { id };

    Ok(Json(response))
}

#[utoipa::path(
    summary = "Retrieve a pipeline",
    description = "Returns a pipeline by ID for the given tenant.",
    params(
        ("pipeline_id" = i64, Path, description = "Unique ID of the pipeline"),
        ("tenant_id" = String, Header, description = "Tenant ID used to scope the request")
    ),
    responses(
        (status = 200, description = "Pipeline retrieved successfully", body = ReadPipelineResponse),
        (status = 404, description = "Pipeline not found", body = ErrorMessage),
        (status = 500, description = "Internal server error", body = ErrorMessage)
    ),
    tag = "Pipelines"
)]
#[get("/pipelines/{pipeline_id}")]
pub async fn read_pipeline(
    req: HttpRequest,
    pool: Data<PgPool>,
    pipeline_id: Path<i64>,
) -> Result<impl Responder, PipelineError> {
    let tenant_id = extract_tenant_id(&req)?;
    let pipeline_id = pipeline_id.into_inner();

    let response = db::pipelines::read_pipeline(&**pool, tenant_id, pipeline_id)
        .await?
        .map(|pipeline| {
            Ok::<ReadPipelineResponse, serde_json::Error>(ReadPipelineResponse {
                id: pipeline.id,
                tenant_id: pipeline.tenant_id,
                source_id: pipeline.source_id,
                source_name: pipeline.source_name,
                destination_id: pipeline.destination_id,
                destination_name: pipeline.destination_name,
                replicator_id: pipeline.replicator_id,
                config: pipeline.config.into(),
            })
        })
        .transpose()?
        .ok_or(PipelineError::PipelineNotFound(pipeline_id))?;

    Ok(Json(response))
}

#[utoipa::path(
    summary = "Update a pipeline",
    description = "Updates a pipeline's source, destination, or configuration.",
    request_body = UpdatePipelineRequest,
    params(
        ("pipeline_id" = i64, Path, description = "Unique ID of the pipeline"),
        ("tenant_id" = String, Header, description = "Tenant ID used to scope the request")
    ),
    responses(
        (status = 200, description = "Pipeline updated successfully"),
        (status = 404, description = "Pipeline not found", body = ErrorMessage),
        (status = 500, description = "Internal server error", body = ErrorMessage)
    ),
    tag = "Pipelines"
)]
// Forcing {pipeline_id} to be all digits by appending :\\d+
// to avoid this route clashing with /pipelines/stop
#[post("/pipelines/{pipeline_id:\\d+}")]
pub async fn update_pipeline(
    req: HttpRequest,
    pool: Data<PgPool>,
    pipeline_id: Path<i64>,
    pipeline: Json<UpdatePipelineRequest>,
) -> Result<impl Responder, PipelineError> {
    let tenant_id = extract_tenant_id(&req)?;
    let pipeline_id = pipeline_id.into_inner();
    let pipeline = pipeline.into_inner();

    let mut txn = pool.begin().await?;
    if !source_exists(txn.deref_mut(), tenant_id, pipeline.source_id).await? {
        return Err(PipelineError::SourceNotFound(pipeline.source_id));
    }

    if !destination_exists(txn.deref_mut(), tenant_id, pipeline.destination_id).await? {
        return Err(PipelineError::DestinationNotFound(pipeline.destination_id));
    }

    db::pipelines::update_pipeline(
        txn.deref_mut(),
        tenant_id,
        pipeline_id,
        pipeline.source_id,
        pipeline.destination_id,
        pipeline.config,
    )
    .await?
    .ok_or(PipelineError::PipelineNotFound(pipeline_id))?;
    txn.commit().await?;

    Ok(HttpResponse::Ok().finish())
}

#[utoipa::path(
    summary = "Delete a pipeline",
    description = "Deletes a pipeline and its associated resources.",
    params(
        ("pipeline_id" = i64, Path, description = "Unique ID of the pipeline"),
        ("tenant_id" = String, Header, description = "Tenant ID used to scope the request")
    ),
    responses(
        (status = 200, description = "Pipeline deleted successfully"),
        (status = 404, description = "Pipeline not found", body = ErrorMessage),
        (status = 500, description = "Internal server error", body = ErrorMessage)
    ),
    tag = "Pipelines"
)]
#[delete("/pipelines/{pipeline_id}")]
pub async fn delete_pipeline(
    req: HttpRequest,
    pool: Data<PgPool>,
    encryption_key: Data<EncryptionKey>,
    pipeline_id: Path<i64>,
) -> Result<impl Responder, PipelineError> {
    let tenant_id = extract_tenant_id(&req)?;
    let pipeline_id = pipeline_id.into_inner();

    let mut txn = pool.begin().await?;

    let pipeline = db::pipelines::read_pipeline(txn.deref_mut(), tenant_id, pipeline_id)
        .await?
        .ok_or(PipelineError::PipelineNotFound(pipeline_id))?;

    let source = db::sources::read_source(
        txn.deref_mut(),
        tenant_id,
        pipeline.source_id,
        &encryption_key,
    )
    .await?
    .ok_or(PipelineError::SourceNotFound(pipeline.source_id))?;

    db::pipelines::delete_pipeline_cascading(txn, tenant_id, &pipeline, &source, None).await?;

    Ok(HttpResponse::Ok().finish())
}

#[utoipa::path(
    summary = "List pipelines",
    description = "Returns all pipelines for the specified tenant.",
    params(
        ("tenant_id" = String, Header, description = "Tenant ID used to scope the request")
    ),
    responses(
        (status = 200, description = "Pipelines listed successfully", body = ReadPipelinesResponse),
        (status = 500, description = "Internal server error", body = ErrorMessage)
    ),
    tag = "Pipelines"
)]
#[get("/pipelines")]
pub async fn read_all_pipelines(
    req: HttpRequest,
    pool: Data<PgPool>,
) -> Result<impl Responder, PipelineError> {
    let tenant_id = extract_tenant_id(&req)?;

    let mut pipelines = vec![];
    for pipeline in db::pipelines::read_all_pipelines(&**pool, tenant_id).await? {
        let pipeline = ReadPipelineResponse {
            id: pipeline.id,
            tenant_id: pipeline.tenant_id,
            source_id: pipeline.source_id,
            source_name: pipeline.source_name,
            destination_id: pipeline.destination_id,
            destination_name: pipeline.destination_name,
            replicator_id: pipeline.replicator_id,
            config: pipeline.config.into(),
        };
        pipelines.push(pipeline);
    }

    let response = ReadPipelinesResponse { pipelines };

    Ok(Json(response))
}

#[utoipa::path(
    summary = "Start a pipeline",
    description = "Starts the pipeline by deploying its replicator.",
    params(
        ("pipeline_id" = i64, Path, description = "Unique ID of the pipeline"),
        ("tenant_id" = String, Header, description = "Tenant ID used to scope the request")
    ),
    responses(
        (status = 200, description = "Pipeline started successfully"),
        (status = 500, description = "Internal server error", body = ErrorMessage)
    ),
    tag = "Pipelines"
)]
#[post("/pipelines/{pipeline_id}/start")]
pub async fn start_pipeline(
    req: HttpRequest,
    pool: Data<PgPool>,
    encryption_key: Data<EncryptionKey>,
    k8s_client: Data<dyn K8sClient>,
    api_config: Data<ApiConfig>,
    pipeline_id: Path<i64>,
) -> Result<impl Responder, PipelineError> {
    let tenant_id = extract_tenant_id(&req)?;
    let pipeline_id = pipeline_id.into_inner();
    let k8s_client = k8s_client.into_inner();

    let mut txn = pool.begin().await?;
    let (pipeline, replicator, image, source, destination) =
        read_pipeline_components(&mut txn, tenant_id, pipeline_id, &encryption_key).await?;

    // We update the pipeline in K8s.
    create_or_update_pipeline_resources_in_k8s(
        k8s_client.as_ref(),
        tenant_id,
        pipeline,
        replicator,
        image,
        source,
        destination,
        api_config.supabase_api_url.as_deref(),
    )
    .await?;
    txn.commit().await?;

    Ok(HttpResponse::Ok().finish())
}

#[utoipa::path(
    summary = "Stop a pipeline",
    description = "Stops the pipeline by terminating its replicator.",
    params(
        ("pipeline_id" = i64, Path, description = "Unique ID of the pipeline"),
        ("tenant_id" = String, Header, description = "Tenant ID used to scope the request")
    ),
    responses(
        (status = 200, description = "Pipeline stopped successfully"),
        (status = 500, description = "Internal server error", body = ErrorMessage)
    ),
    tag = "Pipelines"
)]
#[post("/pipelines/{pipeline_id}/stop")]
pub async fn stop_pipeline(
    req: HttpRequest,
    pool: Data<PgPool>,
    k8s_client: Data<dyn K8sClient>,
    pipeline_id: Path<i64>,
) -> Result<impl Responder, PipelineError> {
    let tenant_id = extract_tenant_id(&req)?;
    let pipeline_id = pipeline_id.into_inner();
    let k8s_client = k8s_client.into_inner();

    let mut txn = pool.begin().await?;
    let replicator =
        db::replicators::read_replicator_by_pipeline_id(txn.deref_mut(), tenant_id, pipeline_id)
            .await?
            .ok_or(PipelineError::ReplicatorNotFound(pipeline_id))?;

    delete_pipeline_resources_in_k8s(k8s_client.as_ref(), tenant_id, replicator).await?;
    txn.commit().await?;

    Ok(HttpResponse::Ok().finish())
}

#[utoipa::path(
    summary = "Stop all pipelines",
    description = "Stops all pipelines for the specified tenant.",
    params(
        ("tenant_id" = String, Header, description = "Tenant ID used to scope the request")
    ),
    responses(
        (status = 200, description = "All pipelines stopped successfully"),
        (status = 500, description = "Internal server error", body = ErrorMessage)
    ),
    tag = "Pipelines"
)]
#[post("/pipelines/stop")]
pub async fn stop_all_pipelines(
    req: HttpRequest,
    pool: Data<PgPool>,
    k8s_client: Data<dyn K8sClient>,
) -> Result<impl Responder, PipelineError> {
    let tenant_id = extract_tenant_id(&req)?;
    let k8s_client = k8s_client.into_inner();

    let mut txn = pool.begin().await?;
    let replicators = db::replicators::read_replicators(txn.deref_mut(), tenant_id).await?;
    for replicator in replicators {
        delete_pipeline_resources_in_k8s(k8s_client.as_ref(), tenant_id, replicator).await?;
    }
    txn.commit().await?;

    Ok(HttpResponse::Ok().finish())
}

#[utoipa::path(
    summary = "Get pipeline version",
    description = "Returns the current version for the pipeline and an optional new default version.",
    params(
        ("pipeline_id" = i64, Path, description = "Unique ID of the pipeline"),
        ("tenant_id" = String, Header, description = "Tenant ID used to scope the request")
    ),
    responses(
        (status = 200, description = "Pipeline version retrieved successfully", body = GetPipelineVersionResponse),
        (status = 404, description = "Pipeline not found", body = ErrorMessage),
        (status = 500, description = "Internal server error", body = ErrorMessage)
    ),
    tag = "Pipelines"
)]
#[get("/pipelines/{pipeline_id}/version")]
pub async fn get_pipeline_version(
    req: HttpRequest,
    pool: Data<PgPool>,
    pipeline_id: Path<i64>,
) -> Result<impl Responder, PipelineError> {
    let tenant_id = extract_tenant_id(&req)?;
    let pipeline_id = pipeline_id.into_inner();

    let mut txn = pool.begin().await?;

    let replicator =
        db::replicators::read_replicator_by_pipeline_id(txn.deref_mut(), tenant_id, pipeline_id)
            .await?
            .ok_or(PipelineError::ReplicatorNotFound(pipeline_id))?;

    let current_image = db::images::read_image_by_replicator_id(txn.deref_mut(), replicator.id)
        .await?
        .ok_or(PipelineError::ImageNotFound(replicator.id))?;

    let default_image = db::images::read_default_image(txn.deref_mut()).await?;

    txn.commit().await?;

    let current_version = PipelineVersion {
        id: current_image.id,
        name: parse_docker_image_tag(&current_image.name),
    };

    let new_version = match default_image {
        Some(default_image) if default_image.id != current_image.id => Some(PipelineVersion {
            id: default_image.id,
            name: parse_docker_image_tag(&default_image.name),
        }),
        _ => None,
    };

    let response = GetPipelineVersionResponse {
        pipeline_id,
        version: current_version,
        new_version,
    };

    Ok(Json(response))
}

#[utoipa::path(
    summary = "Check pipeline status",
    description = "Returns the current status of the pipeline's replicator.",
    params(
        ("pipeline_id" = i64, Path, description = "Unique ID of the pipeline"),
        ("tenant_id" = String, Header, description = "Tenant ID used to scope the request")
    ),
    responses(
        (status = 200, description = "Pipeline status retrieved successfully", body = GetPipelineStatusResponse),
        (status = 500, description = "Internal server error", body = ErrorMessage)
    ),
    tag = "Pipelines"
)]
#[get("/pipelines/{pipeline_id}/status")]
pub async fn get_pipeline_status(
    req: HttpRequest,
    pool: Data<PgPool>,
    k8s_client: Data<dyn K8sClient>,
    pipeline_id: Path<i64>,
) -> Result<impl Responder, PipelineError> {
    let tenant_id = extract_tenant_id(&req)?;
    let pipeline_id = pipeline_id.into_inner();
    let k8s_client = k8s_client.into_inner();

    let replicator =
        db::replicators::read_replicator_by_pipeline_id(&**pool, tenant_id, pipeline_id)
            .await?
            .ok_or(PipelineError::ReplicatorNotFound(pipeline_id))?;

    let prefix = create_k8s_object_prefix(tenant_id, replicator.id);

    let pod_status = k8s_client.get_replicator_pod_status(&prefix).await?;
    let status = pod_status.into();

    let response = GetPipelineStatusResponse {
        pipeline_id,
        status,
    };

    Ok(Json(response))
}

#[utoipa::path(
    summary = "Get replication status",
    description = "Returns the replication status for all tables in the pipeline.",
    params(
        ("pipeline_id" = i64, Path, description = "Unique ID of the pipeline"),
        ("tenant_id" = String, Header, description = "Tenant ID used to scope the request")
    ),
    responses(
        (status = 200, description = "Replication status retrieved successfully", body = GetPipelineReplicationStatusResponse),
        (status = 404, description = "Pipeline not found", body = ErrorMessage),
        (status = 500, description = "Internal server error", body = ErrorMessage)
    ),
    tag = "Pipelines"
)]
#[get("/pipelines/{pipeline_id}/replication-status")]
pub async fn get_pipeline_replication_status(
    req: HttpRequest,
    pool: Data<PgPool>,
    encryption_key: Data<EncryptionKey>,
    pipeline_id: Path<i64>,
) -> Result<impl Responder, PipelineError> {
    let tenant_id = extract_tenant_id(&req)?;
    let pipeline_id = pipeline_id.into_inner();

    let mut txn = pool.begin().await?;

    // Read the pipeline to ensure it exists and get the source configuration
    let pipeline = db::pipelines::read_pipeline(txn.deref_mut(), tenant_id, pipeline_id)
        .await?
        .ok_or(PipelineError::PipelineNotFound(pipeline_id))?;

    // Get the source configuration
    let source = db::sources::read_source(
        txn.deref_mut(),
        tenant_id,
        pipeline.source_id,
        &encryption_key,
    )
    .await?
    .ok_or(PipelineError::SourceNotFound(pipeline.source_id))?;

    txn.commit().await?;

    // Connect to the source database to read the necessary state
    let source_pool =
        connect_to_source_database_with_defaults(&source.config.into_connection_config()).await?;

    // Ensure ETL tables exist in the source DB
    if !health::etl_tables_present(&source_pool).await? {
        return Err(PipelineError::EtlStateNotInitialized);
    }

    // Fetch replication state for all tables in this pipeline
    let state_rows = state::get_table_replication_state_rows(&source_pool, pipeline_id).await?;
    let mut lag_metrics = lag::get_pipeline_lag_metrics(&source_pool, pipeline_id as u64).await?;
    let apply_lag = lag_metrics.apply.map(Into::into);

    // Collect all table IDs and fetch their names in a single batch query
    let table_ids: Vec<TableId> = state_rows
        .iter()
        .map(|row| TableId::new(row.table_id.0))
        .collect();
    let table_names = get_table_names_from_table_ids(&source_pool, &table_ids).await?;

    // Convert database states to UI-friendly format
    let mut tables: Vec<TableReplicationStatus> = Vec::new();
    for row in state_rows {
        let table_id = TableId::new(row.table_id.0);
        let table_name = table_names
            .get(&table_id)
            .ok_or(TableLookupError::TableNotFound(table_id))?;

        // Extract the metadata row from the database
        let table_replication_state = row
            .deserialize_metadata()
            .map_err(PipelineError::InvalidTableReplicationState)?
            .ok_or(PipelineError::MissingTableReplicationState)?;

        tables.push(TableReplicationStatus {
            table_id: table_id.into_inner(),
            table_name: table_name.to_string(),
            state: table_replication_state.into(),
            table_sync_lag: lag_metrics.table_sync.remove(&table_id).map(Into::into),
        });
    }

    let response = GetPipelineReplicationStatusResponse {
        pipeline_id,
        apply_lag,
        table_statuses: tables,
    };

    Ok(Json(response))
}

#[utoipa::path(
    summary = "Roll back table state",
    description = "Rolls back the replication state of a specific table in the pipeline.",
    request_body = RollbackTableStateRequest,
    params(
        ("pipeline_id" = i64, Path, description = "Unique ID of the pipeline"),
        ("tenant_id" = String, Header, description = "Tenant ID used to scope the request")
    ),
    responses(
        (status = 200, description = "Table state rolled back successfully", body = RollbackTableStateResponse),
        (status = 400, description = "Bad request – state not rollbackable", body = ErrorMessage),
        (status = 404, description = "Pipeline or table not found", body = ErrorMessage),
        (status = 500, description = "Internal server error", body = ErrorMessage)
    ),
    tag = "Pipelines"
)]
#[post("/pipelines/{pipeline_id}/rollback-table-state")]
pub async fn rollback_table_state(
    req: HttpRequest,
    pool: Data<PgPool>,
    encryption_key: Data<EncryptionKey>,
    pipeline_id: Path<i64>,
    rollback_request: Json<RollbackTableStateRequest>,
) -> Result<impl Responder, PipelineError> {
    let tenant_id = extract_tenant_id(&req)?;
    let pipeline_id = pipeline_id.into_inner();
    let table_id = rollback_request.table_id;
    let rollback_type = rollback_request.rollback_type;

    let mut txn = pool.begin().await?;

    // Read the pipeline to ensure it exists and get the source configuration
    let pipeline = db::pipelines::read_pipeline(txn.deref_mut(), tenant_id, pipeline_id)
        .await?
        .ok_or(PipelineError::PipelineNotFound(pipeline_id))?;

    // Get the source configuration
    let source = db::sources::read_source(
        txn.deref_mut(),
        tenant_id,
        pipeline.source_id,
        &encryption_key,
    )
    .await?
    .ok_or(PipelineError::SourceNotFound(pipeline.source_id))?;

    txn.commit().await?;

    // Connect to the source database to perform rollback
    let source_pool =
        connect_to_source_database_with_defaults(&source.config.into_connection_config()).await?;

    // Ensure ETL tables exist in the source DB
    if !health::etl_tables_present(&source_pool).await? {
        return Err(PipelineError::EtlStateNotInitialized);
    }

    // First, check current state to ensure it's rollbackable (manual retry policy)
    let state_rows = state::get_table_replication_state_rows(&source_pool, pipeline_id).await?;
    let current_row = state_rows
        .into_iter()
        .find(|row| row.table_id.0 == table_id)
        .ok_or(PipelineError::MissingTableReplicationState)?;

    // Check if the current state is rollbackable (has ManualRetry policy)
    let current_state = current_row
        .deserialize_metadata()
        .map_err(PipelineError::InvalidTableReplicationState)?
        .ok_or(PipelineError::MissingTableReplicationState)?;
    if !current_state.supports_manual_retry() {
        return Err(PipelineError::NotRollbackable(
            "Only manual retry errors can be rolled back".to_string(),
        ));
    }

    let new_state_row = match rollback_type {
        RollbackType::Individual => {
            let Some(new_state_row) = state::rollback_replication_state(
                &source_pool,
                pipeline_id,
                TableId::new(table_id),
            )
            .await?
            else {
                return Err(PipelineError::NotRollbackable(
                    "No previous state to rollback to".to_string(),
                ));
            };

            new_state_row
        }
        RollbackType::Full => {
            state::reset_replication_state(&source_pool, pipeline_id, TableId::new(table_id))
                .await?
        }
    };

    // We extract the state from the metadata of the row
    let new_state = new_state_row
        .deserialize_metadata()
        .map_err(PipelineError::InvalidTableReplicationState)?
        .ok_or(PipelineError::MissingTableReplicationState)?;

    let response = RollbackTableStateResponse {
        pipeline_id,
        table_id,
        new_state: new_state.into(),
    };

    Ok(Json(response))
}

#[utoipa::path(
    summary = "Update pipeline version",
    description = "Updates the pipeline's version while preserving its state.",
    request_body = UpdatePipelineVersionRequest,
    params(
        ("pipeline_id" = i64, Path, description = "Unique ID of the pipeline"),
        ("tenant_id" = String, Header, description = "Tenant ID used to scope the request")
    ),
    responses(
        (status = 200, description = "Pipeline version updated successfully"),
        (status = 400, description = "Bad request or pipeline not running", body = ErrorMessage),
        (status = 404, description = "Pipeline or version not found", body = ErrorMessage),
        (status = 500, description = "Internal server error", body = ErrorMessage)
    ),
    tag = "Pipelines"
)]
#[post("/pipelines/{pipeline_id}/version")]
pub async fn update_pipeline_version(
    req: HttpRequest,
    pool: Data<PgPool>,
    encryption_key: Data<EncryptionKey>,
    k8s_client: Data<dyn K8sClient>,
    api_config: Data<ApiConfig>,
    pipeline_id: Path<i64>,
    update_request: Json<UpdatePipelineVersionRequest>,
) -> Result<impl Responder, PipelineError> {
    let tenant_id = extract_tenant_id(&req)?;
    let pipeline_id = pipeline_id.into_inner();
    let update_request = update_request.into_inner();
    let k8s_client = k8s_client.into_inner();

    let mut txn = pool.begin().await?;
    let (pipeline, replicator, current_image, source, destination) =
        read_pipeline_components(&mut txn, tenant_id, pipeline_id, &encryption_key).await?;

    // Only allow updating to the current default image. The client must provide the version id and
    // it must match the default version id. If it does not, we consider this a race condition and we
    // fail the update.
    let default_image = db::images::read_default_image(txn.deref_mut())
        .await?
        .ok_or(PipelineError::NoDefaultImageFound)?;

    if update_request.version_id != default_image.id {
        return Err(PipelineError::ImageIdNotDefault(update_request.version_id));
    }

    let target_image = default_image;

    // If the image ids are different, we change the database entry.
    if target_image.id != current_image.id {
        db::replicators::update_replicator_image(
            txn.deref_mut(),
            tenant_id,
            replicator.id,
            target_image.id,
        )
        .await?
        .ok_or(PipelineError::ReplicatorNotFound(pipeline_id))?;
    }

    // If the images have equal name, we don't care about their id from the K8S perspective, so we
    // won't update any resources.
    if target_image.name == current_image.name {
        txn.commit().await?;

        return Ok(HttpResponse::Ok().finish());
    }

    // We update the pipeline in K8s if client is available.
    create_or_update_pipeline_resources_in_k8s(
        k8s_client.as_ref(),
        tenant_id,
        pipeline,
        replicator,
        target_image,
        source,
        destination,
        api_config.supabase_api_url.as_deref(),
    )
    .await?;
    txn.commit().await?;

    Ok(HttpResponse::Ok().finish())
}

#[utoipa::path(
    summary = "Update pipeline config",
    description = "Updates the pipeline's configuration while preserving its running state.",
    context_path = "/v1",
    request_body = UpdatePipelineConfigRequest,
    params(
        ("pipeline_id" = i64, Path, description = "Unique ID of the pipeline"),
        ("tenant_id" = String, Header, description = "Tenant ID used to scope the request")
    ),
    responses(
        (status = 200, description = "Pipeline configuration updated successfully", body = UpdatePipelineConfigResponse),
        (status = 400, description = "Bad request or pipeline not running", body = ErrorMessage),
        (status = 404, description = "Pipeline not found", body = ErrorMessage),
        (status = 500, description = "Internal server error", body = ErrorMessage)
    ),
    tag = "Pipelines"
)]
#[post("/pipelines/{pipeline_id}/update-config")]
pub async fn update_pipeline_config(
    req: HttpRequest,
    pool: Data<PgPool>,
    pipeline_id: Path<i64>,
    update_request: Json<UpdatePipelineConfigRequest>,
) -> Result<impl Responder, PipelineError> {
    let tenant_id = extract_tenant_id(&req)?;
    let pipeline_id = pipeline_id.into_inner();
    let update_request = update_request.into_inner();
    let mut txn = pool.begin().await?;

    let config = db::pipelines::update_pipeline_config(
        &mut txn,
        tenant_id,
        pipeline_id,
        update_request.config,
    )
    .await?
    .ok_or(PipelineError::PipelineNotFound(pipeline_id))?;

    txn.commit().await?;

    let response = UpdatePipelineConfigResponse {
        config: config.into(),
    };

    Ok(Json(response))
}
