/*
Copyright 2024-2025 The Spice.ai OSS Authors

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

     https://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

#![allow(clippy::needless_for_each)]

use crate::datafusion::DataFusion;
use crate::datafusion::request_context_extension::DataFusionContextExtension;
use crate::model::ModelContextLayer;
use crate::request::DatabricksAuthExtension;
use crate::{search::search_engine, status::RuntimeStatus};

use crate::Runtime;
use crate::config;
#[cfg(feature = "openapi")]
use crate::http::v1::{
    Format,
    datasets::{DatasetFilter, DatasetQueryParams},
};
use runtime_request_context::{Protocol, RequestContext};

#[cfg(feature = "mcp")]
use crate::tools::mcp::server::RuntimeServer;
use app::App;
use axum::{extract::State, routing::patch};
use http::header::{ACCEPT, AUTHORIZATION, CONTENT_TYPE};
use opentelemetry::KeyValue;
#[cfg(feature = "mcp")]
use rmcp::transport::SseServer;
#[cfg(feature = "mcp")]
use rmcp::transport::sse_server::SseServerConfig;
use spicepod::component::runtime::CorsConfig;
use std::sync::Arc;
use tokio::sync::RwLock;

#[cfg(feature = "openapi")]
use utoipa::{
    OpenApi,
    openapi::{HttpMethod, path::Operation},
};

#[cfg(feature = "dev")]
use utoipa_swagger_ui::SwaggerUi;

use super::{metrics, v1};

use axum::{
    Extension,
    body::Body,
    extract::MatchedPath,
    http::{HeaderValue, Method, Request},
    middleware::{self, Next},
    response::IntoResponse,
    routing::{Router, get, post},
};
use runtime_auth::layer::http::AuthLayer;
use tokio::time::Instant;
use tower_http::cors::{AllowOrigin, Any, CorsLayer};
use tower_http::limit::RequestBodyLimitLayer;

#[cfg(feature = "openapi")]
#[derive(OpenApi)]
#[openapi(
    servers(
        (url = "http://localhost:8090", description = "Local development server. Configure with `--http`."),
    ),
    security(
        ("api_key" = [])
    ),
    paths(
        // Order here will be preserved in sidebar at https://spiceai.org/docs/api/http/runtime.
        v1::query::post,
        v1::datasets::get,
        v1::datasets::acceleration,
        v1::datasets::refresh,
        v1::catalogs::get,
        v1::iceberg::get_config,
        v1::iceberg::get_namespaces,
        v1::iceberg::head_namespace,
        v1::ready::get,
        v1::status::get,
        v1::spicepods::get,
        v1::embeddings::post,
        v1::search::post,
        v1::chat::post,
        v1::models::get,
        v1::nsql::post,
        v1::eval::list,
        v1::eval::post,
        v1::inference::get,
        v1::inference::post,
        v1::tools::list,
        v1::tools::post,
        v1::packages::generate,
    ),

    components(schemas(DatasetQueryParams, DatasetFilter, Format)) // These schemas, for some reason, weren't getting picked up.
)]
pub(crate) struct ApiDoc;

/// Returns the `OpenAPI` documentation for the HTTP API. Adds MCP endpoints if the feature is enabled.
#[cfg(feature = "openapi")]
#[must_use]
pub fn get_api_doc() -> utoipa::openapi::OpenApi {
    use utoipa::openapi::{
        Required,
        path::{Parameter, ParameterIn},
    };

    let mut openai = ApiDoc::openapi();

    #[cfg(feature = "mcp")]
    {
        openai.paths.add_path_operation(
            "/v1/mcp/sse",
            vec![HttpMethod::Get],
            Operation::builder()
                .operation_id(Some("operation_id"))
                .tag("mcp")
                .summary(Some("Establish an MCP SSE Connection"))
                .description(Some(
                    "Initiates a Server-Sent Events (SSE) connection using the Model Context Protocol (MCP) to interact with Spice tools.\n\n
             Once connected, clients can send messages via `POST /v1/mcp/sse` and receive responses through this SSE stream.",
                ))
                .build(),
        );
        openai.paths.add_path_operation(
            "/v1/mcp/sse",
            vec![HttpMethod::Post],
            Operation::builder()
                .operation_id(Some("mcp_event"))
                .tag("mcp")
                .summary(Some("Send message to MCP server"))
                .description(Some(
                    "Send message to the MCP endoint, for a given session.",
                ))
                .parameter(
                    Parameter::builder()
                        .name("sessionId")
                        .parameter_in(ParameterIn::Query)
                        .required(Required::True)
                        .build(),
                )
                .response(
                    "202",
                    utoipa::openapi::ResponseBuilder::new()
                        .description("Message accepted. Response will stream via SSE.")
                        .build(),
                )
                .response(
                    "404",
                    utoipa::openapi::ResponseBuilder::new()
                        .description(
                            "Session not found. No active session for the given `session_id`.",
                        )
                        .build(),
                )
                .response(
                    "413",
                    utoipa::openapi::ResponseBuilder::new()
                        .description("Payload too large. Maximum allowed size is 4MB.")
                        .build(),
                )
                .response(
                    "500",
                    utoipa::openapi::ResponseBuilder::new()
                        .description("Internal server error. An unexpected issue occurred.")
                        .build(),
                )
                .build(),
        );
    }
    openai
}

// Request body size limits to prevent DoS attacks (all limits use binary units: MiB = 1024 * 1024 bytes)
// Applied at three levels:
// 1. DEFAULT_REQUEST_BODY_LIMIT (128 MiB) - for all authenticated endpoints (queries, chat, embeddings, evals)
//    Applied as a route layer to the entire authenticated router to allow reasonable payload sizes for SQL INSERT operations and LLM requests
// 2. MCP_REQUEST_BODY_LIMIT (32 MiB) - for Model Context Protocol (MCP) endpoints
//    Applied to /v1/mcp/sse routes to support MCP message payloads while preventing excessive memory usage
// 3. HEALTH_REQUEST_BODY_LIMIT (128 KiB) - strict limit for unauthenticated endpoints (health checks, ready checks)
//    Applied to unauthenticated routes to prevent DoS via health check endpoints
const DEFAULT_REQUEST_BODY_LIMIT: usize = 128 * 1024 * 1024; // 128 MiB
#[cfg(feature = "mcp")]
const MCP_REQUEST_BODY_LIMIT: usize = 32 * 1024 * 1024; // 32 MiB
const HEALTH_REQUEST_BODY_LIMIT: usize = 128 * 1024; // 128 KiB

#[expect(clippy::too_many_lines)]
pub(crate) fn routes(
    rt: &Arc<Runtime>,
    config: Arc<config::Config>,
    search: Arc<search_engine::SearchEngine>,
    auth_layer: Option<AuthLayer>,
    cors_config: &CorsConfig,
) -> Router {
    let mut authenticated_router = Router::new()
        .route("/v1/sql", post(v1::query::post).layer(ModelContextLayer))
        .route("/v1/status", get(v1::status::get))
        .route("/v1/catalogs", get(v1::catalogs::get))
        .route("/v1/datasets", get(v1::datasets::get))
        .route(
            "/v1/datasets/{name}/acceleration/refresh",
            post(v1::datasets::refresh),
        )
        .route(
            "/v1/datasets/{name}/acceleration",
            patch(v1::datasets::acceleration),
        )
        .route("/v1/spicepods", get(v1::spicepods::get))
        .route("/v1/packages/generate", post(v1::packages::generate));

    let iceberg_router = Router::new()
        .route("/v1/config", get(v1::iceberg::get_config))
        .route("/v1/namespaces", get(v1::iceberg::get_namespaces))
        .route(
            "/v1/namespaces/{namespace}",
            get(v1::iceberg::get_namespace).head(v1::iceberg::head_namespace),
        )
        .route(
            "/v1/namespaces/{namespace}/tables",
            get(v1::iceberg::list_tables),
        )
        .route(
            "/v1/namespaces/{namespace}/tables/{table}",
            get(v1::iceberg::tables::get).head(v1::iceberg::tables::head),
        );

    authenticated_router = authenticated_router.merge(iceberg_router);

    // Enable Swagger UI & OpenAPI JSON for dev.
    #[cfg(feature = "dev")]
    {
        authenticated_router = authenticated_router
            .merge(SwaggerUi::new("/docs").url("/docs/openapi.json", get_api_doc()));
    }

    if cfg!(feature = "models") {
        authenticated_router = authenticated_router
            .route("/v1/models", get(v1::models::get))
            .route("/v1/models/{name}/predict", get(v1::inference::get))
            .route("/v1/predict", post(v1::inference::post))
            .route("/v1/nsql", post(v1::nsql::post).layer(ModelContextLayer))
            .route(
                "/v1/chat/completions",
                post(v1::chat::post).layer(ModelContextLayer),
            )
            .route(
                "/v1/responses",
                post(v1::responses::post).layer(ModelContextLayer),
            )
            .route("/v1/embeddings", post(v1::embeddings::post))
            .route("/v1/search", post(v1::search::post))
            .route("/v1/tools", get(v1::tools::list))
            .route("/v1/tools/{*name}", post(v1::tools::post))
            // Deprecated, use /v1/evals/:name instead
            .route("/v1/tool/{name}", post(v1::tools::post))
            .route(
                "/v1/evals/{name}",
                post(v1::eval::post).layer(ModelContextLayer),
            )
            .route("/v1/evals", get(v1::eval::list))
            .route("/v1/workers", get(v1::workers::get))
            .layer(Extension(Arc::clone(&rt.completion_llms)))
            .layer(Extension(Arc::clone(&rt.models)))
            .layer(Extension(Arc::clone(&rt.eval_scorers)))
            .layer(Extension(search))
            .layer(Extension(Arc::clone(&rt.embeds)))
            .layer(Extension(Arc::clone(&rt.workers)))
            .layer(Extension(Arc::clone(&rt.responses_llms)));
    }

    #[cfg(feature = "mcp")]
    {
        let (sse_server, mcp_router) = SseServer::new(SseServerConfig {
            bind: config.http_bind_address,
            sse_path: "/v1/mcp/sse".to_string(),
            post_path: "/v1/mcp/sse".to_string(),
            ct: tokio_util::sync::CancellationToken::new(),
            sse_keep_alive: None,
        });

        let runtime_arc = Arc::clone(rt);
        let _cancellation_token =
            sse_server.with_service(move || RuntimeServer::from(&runtime_arc));

        // Apply MCP-specific request body limit before merging
        tracing::debug!(
            "MCP request body size limit set to {} bytes",
            MCP_REQUEST_BODY_LIMIT
        );
        let mcp_router = mcp_router.route_layer(RequestBodyLimitLayer::new(MCP_REQUEST_BODY_LIMIT));
        authenticated_router = mcp_router.merge(authenticated_router);
    }

    authenticated_router = authenticated_router
        .layer(Extension(Arc::clone(rt)))
        .layer(Extension(rt.metrics_endpoint))
        .layer(Extension(config));

    // Apply request body size limit to prevent DoS attacks via unbounded request payloads
    // This must be applied as a route layer before auth
    authenticated_router =
        authenticated_router.route_layer(RequestBodyLimitLayer::new(DEFAULT_REQUEST_BODY_LIMIT));

    // If we have an auth layer, add it to the authenticated router
    if let Some(auth_layer) = auth_layer {
        tracing::info!("Enabled API key authentication on HTTP routes");
        authenticated_router = authenticated_router.route_layer(auth_layer);
    }

    let unauthenticated_router = Router::new()
        .route("/health", get(|| async { "ok\n" }))
        .route("/v1/ready", get(v1::ready::get))
        .layer(Extension(Arc::clone(&rt.status)))
        .route_layer(RequestBodyLimitLayer::new(HEALTH_REQUEST_BODY_LIMIT));

    unauthenticated_router
        .merge(authenticated_router)
        .route_layer(middleware::from_fn_with_state(rt.status(), check_shutdown))
        .route_layer(middleware::from_fn_with_state(
            Arc::clone(&rt.df),
            track_metrics,
        ))
        .layer(Extension(Arc::clone(&rt.app)))
        .layer(cors_layer(cors_config))
}

async fn track_metrics(
    State(df): State<Arc<DataFusion>>,
    Extension(app): Extension<Arc<RwLock<Option<Arc<App>>>>>,
    headers: http::HeaderMap,
    req: Request<Body>,
    next: Next,
) -> impl IntoResponse {
    let app_lock = app.read().await;
    let app = app_lock.as_ref().map(Arc::clone);
    let mut request_context_builder = RequestContext::builder(Protocol::Http)
        .with_app_opt(app_lock.as_ref().map(Arc::clone))
        .from_headers(&headers);

    if let Some(ext) = DatabricksAuthExtension::from_headers(&app, &Some(Arc::clone(&df)), &headers)
    {
        request_context_builder = ext.add_from_headers(request_context_builder, &headers);
    }
    let request_context = Arc::new(
        request_context_builder
            .with_extension(DataFusionContextExtension::new(Arc::clone(&df)))
            .build(),
    );

    let request_dimensions = request_context.to_dimensions();

    let start = Instant::now();
    let path = if let Some(matched_path) = req.extensions().get::<MatchedPath>() {
        matched_path.as_str().to_owned()
    } else {
        req.uri().path().to_owned()
    };
    let method = req.method().clone();

    let response = Arc::clone(&request_context)
        .scope(async move {
            request_context.load_extensions().await;
            next.run(req).await
        })
        .await;

    let latency_ms = start.elapsed().as_secs_f64() * 1000.0;
    let status = response.status().as_u16().to_string();

    let mut labels = vec![
        KeyValue::new("method", method.to_string()),
        KeyValue::new("path", path),
        KeyValue::new("status", status),
    ];

    labels.extend(request_dimensions.into_iter());

    metrics::REQUESTS_TOTAL.add(1, &labels);
    metrics::REQUESTS.add(1, &labels);
    metrics::REQUESTS_DURATION_MS.record(latency_ms, &labels);

    response
}

fn cors_layer(cors_config: &CorsConfig) -> CorsLayer {
    // By default, the layer is disabled unless .allow* methods are called.
    let cors = CorsLayer::new();

    if !cors_config.enabled {
        return cors;
    }

    let allowed_origins: AllowOrigin = if cors_config.allowed_origins.contains(&"*".to_string()) {
        Any.into()
    } else {
        cors_config
            .allowed_origins
            .iter()
            .filter_map(|o| HeaderValue::try_from(o).ok())
            .collect::<Vec<HeaderValue>>()
            .into()
    };

    tracing::info!(
        target: "runtime::http",
        "CORS (Cross-Origin Resource Sharing) enabled on HTTP endpoint for allowed origins: {:?}",
        cors_config.allowed_origins
    );

    cors.allow_methods([Method::GET, Method::POST, Method::PATCH, Method::OPTIONS])
        .allow_headers([ACCEPT, CONTENT_TYPE, AUTHORIZATION])
        .allow_origin(allowed_origins)
}

async fn check_shutdown(
    State(status): State<Arc<RuntimeStatus>>,
    req: axum::http::Request<Body>,
    next: Next,
) -> impl IntoResponse {
    // Allow /health to bypass shutdown check
    if req.uri().path() == "/health" {
        return next.run(req).await;
    }

    if status.is_shutdown() {
        return (
            http::StatusCode::SERVICE_UNAVAILABLE,
            "Runtime is shutting down",
        )
            .into_response();
    }

    next.run(req).await
}
