#![expect(rustdoc::private_doc_tests)]

use std::collections::{BTreeMap, BTreeSet};
use std::sync::Arc;

use arrow::array::RecordBatchReader;
use arrow::datatypes::Schema;
use arrow::pyarrow::PyArrowType;
use datafusion::catalog::TableProvider;
use datafusion_ffi::table_provider::FFI_TableProvider;
use pyo3::exceptions::{PyTypeError, PyValueError};
use pyo3::prelude::PyAnyMethods as _;
use pyo3::types::{PyCapsule, PyDict, PyTuple};
use pyo3::{Bound, Py, PyAny, PyRef, PyResult, Python, pyclass, pymethods};
use re_chunk::ComponentIdentifier;
use re_chunk_store::{QueryExpression, SparseFillStrategy, ViewContentsSelector};
use re_datafusion::DataframeQueryTableProvider;
use re_log_types::{AbsoluteTimeRange, EntityPath, EntityPathFilter};
#[cfg(feature = "perf_telemetry")]
use re_perf_telemetry::extract_trace_context_from_contextvar;
use re_sdk::ComponentDescriptor;
use re_sorbet::ColumnDescriptor;
use tracing::instrument;

#[cfg(feature = "perf_telemetry")]
use crate::catalog::trace_context::with_trace_span;
use crate::catalog::{PyDatasetEntryInternal, to_py_err};
use crate::utils::{get_tokio_runtime, wait_for_future};

/// View into a remote dataset acting as DataFusion table provider.
#[pyclass(name = "DataframeQueryView", module = "rerun_bindings.rerun_bindings")] // NOLINT: ignore[py-cls-eq] non-trivial implementation
pub struct PyDataframeQueryView {
    dataset: Py<PyDatasetEntryInternal>,

    query_expression: QueryExpression,

    /// Limit the query to these segment ids.
    ///
    /// If empty, use the whole dataset.
    segment_ids: Vec<String>,
}

impl PyDataframeQueryView {
    #[instrument(skip(dataset, contents, py))]
    pub fn new(
        dataset: Py<PyDatasetEntryInternal>,
        index: Option<String>,
        contents: Py<PyAny>,
        include_semantically_empty_columns: bool,
        include_tombstone_columns: bool,
        py: Python<'_>,
    ) -> PyResult<Self> {
        // Static only implies:
        // - we include only static columns in the contents
        // - we only return one row per segment, with the static data
        let static_only = index.is_none();

        // We get the schema from the store since we need it to resolve our columns
        // TODO(jleibs): This is way too slow -- maybe we cache it somewhere?
        let schema = PyDatasetEntryInternal::fetch_arrow_schema(&dataset.borrow(py))?;

        // TODO(jleibs): Check schema for the index name

        let view_contents = extract_contents_expr(contents.bind(py), &schema)?;

        Ok(Self {
            dataset,

            query_expression: QueryExpression {
                view_contents: Some(view_contents),
                include_semantically_empty_columns,
                include_tombstone_columns,
                include_static_columns: if static_only {
                    re_chunk_store::StaticColumnSelection::StaticOnly
                } else {
                    re_chunk_store::StaticColumnSelection::Both
                },
                filtered_index: index.map(Into::into),
                filtered_index_range: None,
                filtered_index_values: None,
                using_index_values: None,
                filtered_is_not_null: None,
                sparse_fill_strategy: SparseFillStrategy::None,
                selection: None,
            },
            segment_ids: vec![],
        })
    }

    fn clone_with_new_query(
        &self,
        py: Python<'_>,
        mutation_fn: impl FnOnce(&mut QueryExpression),
    ) -> Self {
        let mut copy = Self {
            dataset: self.dataset.clone_ref(py),
            query_expression: self.query_expression.clone(),
            segment_ids: self.segment_ids.clone(),
        };

        mutation_fn(&mut copy.query_expression);

        copy
    }
}

#[pymethods]
impl PyDataframeQueryView {
    /// Filter by one or more segment ids. All segment ids are included if not specified.
    #[pyo3(signature = (segment_id, *args))]
    fn filter_segment_id<'py>(
        &self,
        py: Python<'py>,
        segment_id: String,
        args: &Bound<'py, PyTuple>,
    ) -> PyResult<Self> {
        let mut segment_ids = vec![segment_id];

        for i in 0..args.len()? {
            let item = args.get_item(i)?;
            segment_ids.push(item.extract()?);
        }

        Ok(Self {
            dataset: self.dataset.clone_ref(py),
            query_expression: self.query_expression.clone(),
            segment_ids,
        })
    }

    /// Filter the view to only include data between the given index sequence numbers.
    ///
    /// This range is inclusive and will contain both the value at the start and the value at the end.
    ///
    /// The view must be of a sequential index type to use this method.
    ///
    /// Parameters
    /// ----------
    /// start : int
    ///     The inclusive start of the range.
    /// end : int
    ///     The inclusive end of the range.
    ///
    /// Returns
    /// -------
    /// RecordingView
    ///     A new view containing only the data within the specified range.
    ///
    ///     The original view will not be modified.
    fn filter_range_sequence(&self, py: Python<'_>, start: i64, end: i64) -> PyResult<Self> {
        // TODO(emilk): it would be nice to add a check here that the index type is indeed a sequence.
        match self.query_expression.filtered_index.as_ref() {
            Some(_) => {}

            None => {
                return Err(PyValueError::new_err(
                    "Specify an index to filter on first.".to_owned(),
                ));
            }
        }

        let start = if let Ok(seq) = re_chunk::TimeInt::try_from(start) {
            seq
        } else {
            re_log::error!(
                illegal_value = start,
                new_value = re_chunk::TimeInt::MIN.as_i64(),
                "set_time_sequence() called with illegal value - clamped to minimum legal value"
            );
            re_chunk::TimeInt::MIN
        };

        let end = if let Ok(seq) = re_chunk::TimeInt::try_from(end) {
            seq
        } else {
            re_log::error!(
                illegal_value = end,
                new_value = re_chunk::TimeInt::MAX.as_i64(),
                "set_time_sequence() called with illegal value - clamped to maximum legal value"
            );
            re_chunk::TimeInt::MAX
        };

        let resolved = AbsoluteTimeRange::new(start, end);

        Ok(self.clone_with_new_query(py, |query_expression| {
            query_expression.filtered_index_range = Some(resolved);
        }))
    }

    /// Filter the view to only include data between the given index values expressed as seconds.
    ///
    /// This range is inclusive and will contain both the value at the start and the value at the end.
    ///
    /// The view must be of a temporal index type to use this method.
    ///
    /// Parameters
    /// ----------
    /// start : int
    ///     The inclusive start of the range.
    /// end : int
    ///     The inclusive end of the range.
    ///
    /// Returns
    /// -------
    /// RecordingView
    ///     A new view containing only the data within the specified range.
    ///
    ///     The original view will not be modified.
    fn filter_range_secs(&self, py: Python<'_>, start: f64, end: f64) -> PyResult<Self> {
        // TODO(emilk): it would be nice to add a check here that the index type is indeed temporal
        match self.query_expression.filtered_index.as_ref() {
            Some(_) => {}

            None => {
                return Err(PyValueError::new_err(
                    "Specify an index to filter on first.".to_owned(),
                ));
            }
        }

        let start = re_log_types::Timestamp::from_secs_since_epoch(start);
        let end = re_log_types::Timestamp::from_secs_since_epoch(end);

        let resolved = AbsoluteTimeRange::new(start, end);

        Ok(self.clone_with_new_query(py, |query_expression| {
            query_expression.filtered_index_range = Some(resolved);
        }))
    }

    /// Filter the view to only include data between the given index values expressed as nanoseconds.
    ///
    /// This range is inclusive and will contain both the value at the start and the value at the end.
    ///
    /// The view must be of a temporal index type to use this method.
    ///
    /// Parameters
    /// ----------
    /// start : int
    ///     The inclusive start of the range.
    /// end : int
    ///     The inclusive end of the range.
    ///
    /// Returns
    /// -------
    /// RecordingView
    ///     A new view containing only the data within the specified range.
    ///
    ///     The original view will not be modified.
    fn filter_range_nanos(&self, py: Python<'_>, start: i64, end: i64) -> PyResult<Self> {
        // TODO(emilk): it would be nice to add a check here that the index type is indeed a sequence.
        match self.query_expression.filtered_index.as_ref() {
            Some(_) => {}

            None => {
                return Err(PyValueError::new_err(
                    "Specify an index to filter on first.".to_owned(),
                ));
            }
        }

        let start = re_log_types::Timestamp::from_nanos_since_epoch(start);
        let end = re_log_types::Timestamp::from_nanos_since_epoch(end);

        let resolved = AbsoluteTimeRange::new(start, end);

        Ok(self.clone_with_new_query(py, |query_expression| {
            query_expression.filtered_index_range = Some(resolved);
        }))
    }

    /// Filter the view to only include data at the provided index values.
    ///
    /// The index values returned will be the intersection between the provided values and the
    /// original index values.
    ///
    /// This requires index values to be a precise match. Index values in Rerun are
    /// represented as i64 sequence counts or nanoseconds. This API does not expose an interface
    /// in floating point seconds, as the numerical conversion would risk false mismatches.
    ///
    /// Parameters
    /// ----------
    /// values : IndexValuesLike
    ///     The index values to filter by.
    ///
    /// Returns
    /// -------
    /// RecordingView
    ///     A new view containing only the data at the specified index values.
    ///
    ///     The original view will not be modified.
    fn filter_index_values(
        &self,
        py: Python<'_>,
        values: crate::dataframe::IndexValuesLike<'_>,
    ) -> PyResult<Self> {
        let values = values.to_index_values()?;

        Ok(self.clone_with_new_query(py, |query_expression| {
            query_expression.filtered_index_values = Some(values);
        }))
    }

    /// Filter the view to only include rows where the given component column is not null.
    ///
    /// This corresponds to rows for index values where this component was provided to Rerun explicitly
    /// via `.log()` or `.send_columns()`.
    ///
    /// Parameters
    /// ----------
    /// column : AnyComponentColumn
    ///     The component column to filter by.
    ///
    /// Returns
    /// -------
    /// RecordingView
    ///     A new view containing only the data where the specified component column is not null.
    ///
    ///     The original view will not be modified.
    fn filter_is_not_null(
        &self,
        py: Python<'_>,
        column: crate::dataframe::AnyComponentColumn,
    ) -> PyResult<Self> {
        let column = column.into_selector()?;

        Ok(self.clone_with_new_query(py, |query_expression| {
            query_expression.filtered_is_not_null = Some(column);
        }))
    }

    /// Create a new view that contains the provided index values.
    ///
    /// If they exist in the original data they are selected, otherwise empty rows are added to the view.
    ///
    /// The output view will always have the same number of rows as the provided values, even if
    /// those rows are empty. Use with [`.fill_latest_at()`][rerun.dataframe.RecordingView.fill_latest_at]
    /// to populate these rows with the most recent data.
    ///
    /// Parameters
    /// ----------
    /// values : IndexValuesLike
    ///     The index values to use.
    ///
    /// Returns
    /// -------
    /// RecordingView
    ///     A new view containing the provided index values.
    ///
    ///     The original view will not be modified.
    fn using_index_values(
        &self,
        py: Python<'_>,
        values: crate::dataframe::IndexValuesLike<'_>,
    ) -> PyResult<Self> {
        let values = values.to_index_values()?;

        Ok(self.clone_with_new_query(py, |query_expression| {
            query_expression.using_index_values = Some(values);
        }))
    }

    /// Populate any null values in a row with the latest valid data according to the index.
    ///
    /// Returns
    /// -------
    /// RecordingView
    ///     A new view with the null values filled in.
    ///
    ///     The original view will not be modified.
    fn fill_latest_at(&self, py: Python<'_>) -> Self {
        self.clone_with_new_query(py, |query_expression| {
            query_expression.sparse_fill_strategy = SparseFillStrategy::LatestAtGlobal;
        })
    }

    /// Returns a DataFusion table provider capsule.
    #[instrument(skip_all)]
    fn __datafusion_table_provider__<'py>(
        self_: PyRef<'py, Self>,
        py: Python<'py>,
    ) -> PyResult<Bound<'py, PyCapsule>> {
        let provider = self_.as_table_provider(py)?;

        let capsule_name = cr"datafusion_table_provider".into();

        let runtime = get_tokio_runtime().handle().clone();
        let provider = FFI_TableProvider::new(provider, true, Some(runtime));

        PyCapsule::new(py, provider, Some(capsule_name))
    }

    /// Convert this view to a [`pyarrow.RecordBatchReader`][].
    #[instrument(skip_all)]
    fn to_arrow_reader<'py>(
        self_: PyRef<'py, Self>,
        py: Python<'py>,
    ) -> PyResult<Bound<'py, PyAny>> {
        let df = Self::df(self_)?;

        py.import("pyarrow")?
            .getattr("RecordBatchReader")?
            .call_method1("from_stream", (df,))
    }

    /// Register this view to the global DataFusion context and return a DataFrame.
    #[instrument(skip_all)]
    fn df(self_: PyRef<'_, Self>) -> PyResult<Bound<'_, PyAny>> {
        let py = self_.py();

        // TODO(zehiko) this will go away asap as we're enabling perf telemetry by default
        #[cfg(feature = "perf_telemetry")]
        {
            with_trace_span!(py, "df", {
                let dataset = self_.dataset.borrow(py);
                let client = dataset.client().borrow(py);
                let ctx = client.ctx(py)?;
                let ctx = ctx.bind(py);

                drop(client);
                drop(dataset);

                let df = ctx.call_method1("read_table", (self_,))?;
                Ok(df)
            })
        }
        #[cfg(not(feature = "perf_telemetry"))]
        {
            let dataset = self_.dataset.borrow(py);
            let client = dataset.client().borrow(py);
            let ctx = client.ctx(py)?;
            let ctx = ctx.bind(py);

            drop(client);
            drop(dataset);

            let df = ctx.call_method1("read_table", (self_,))?;
            Ok(df)
        }
    }

    /// Get the relevant chunk_ids for this view.
    #[instrument(skip_all)]
    fn get_chunk_ids<'py>(
        self_: PyRef<'py, Self>,
        py: Python<'py>,
    ) -> PyResult<PyArrowType<Box<dyn RecordBatchReader + Send>>> {
        let dataset = self_.dataset.borrow(py);
        let dataset_id = dataset.entry_id();
        let connection = dataset.client().borrow(py).connection().clone();

        // Fetch relevant chunks
        connection.get_chunk_ids_for_dataframe_query(
            py,
            dataset_id,
            &self_.query_expression,
            self_.segment_ids.as_slice(),
        )
    }

    pub fn __str__(&self, py: Python<'_>) -> String {
        let dataset_str = PyDatasetEntryInternal::__str__(self.dataset.borrow(py));
        let query_expr_str = format!("{:#?}", self.query_expression);

        let dataset_line = indent::indent_all_by(1, format!("dataset={dataset_str},"));
        let query_line = indent::indent_all_by(1, format!("query_expression={query_expr_str},"));
        let segment_line = indent::indent_all_by(1, format!("segment_ids={:?}", self.segment_ids));

        format!("DataframeQueryView(\n{dataset_line}\n{query_line}\n{segment_line}\n)")
    }
}

impl PyDataframeQueryView {
    fn as_table_provider(&self, py: Python<'_>) -> PyResult<Arc<dyn TableProvider>> {
        let dataset = self.dataset.borrow(py);
        let dataset_id = dataset.entry_id();
        let connection = dataset.client().borrow(py).connection().clone();

        // Capture trace context to propagate into async query execution
        #[cfg(all(feature = "perf_telemetry", not(target_arch = "wasm32")))]
        let trace_headers_opt = {
            let trace_headers = extract_trace_context_from_contextvar(py);
            if trace_headers.traceparent.is_empty() {
                None
            } else {
                Some(trace_headers)
            }
        };
        #[cfg(not(all(feature = "perf_telemetry", not(target_arch = "wasm32"))))]
        let trace_headers_opt = None;

        wait_for_future(py, async move {
            DataframeQueryTableProvider::new(
                connection.origin().clone(),
                connection.connection_registry().clone(),
                dataset_id,
                &self.query_expression,
                &self.segment_ids,
                #[cfg(not(target_arch = "wasm32"))]
                trace_headers_opt,
            )
            .await
        })
        .map(|p| Arc::new(p) as Arc<dyn TableProvider>)
        .map_err(to_py_err)
    }
}

/// Convert a `ViewContentsLike` into a `ViewContentsSelector`.
///
/// ```python
/// ViewContentsLike = Union[str, Dict[str, Union[str, Sequence[str]]]]
/// ```
///
/// We cant do this with the normal `FromPyObject` mechanisms because we want access to the
/// `QueryEngine` to resolve the entity paths.
fn extract_contents_expr(
    expr: &Bound<'_, PyAny>,
    schema: &Schema,
) -> PyResult<re_chunk_store::ViewContentsSelector> {
    let descriptors = schema
        .fields()
        .iter()
        .map(|field| ColumnDescriptor::try_from_arrow_field(None, field.as_ref()))
        .filter_map(Result::ok)
        .collect::<Vec<_>>();

    let component_descriptors = descriptors
        .iter()
        .filter_map(|descriptor| {
            if let ColumnDescriptor::Component(component) = descriptor {
                Some(component)
            } else {
                None
            }
        })
        .cloned()
        .collect::<Vec<_>>();

    let mut known_components = BTreeMap::<EntityPath, BTreeSet<ComponentDescriptor>>::new();

    for component in &component_descriptors {
        // We need to resolve the component type to the best one in the schema
        // (e.g. "color" -> "rerun.color")
        known_components
            .entry(component.entity_path.clone())
            .or_default()
            .insert(component.into());
    }

    if let Ok(expr) = expr.extract::<String>() {
        // `str`

        let path_filter =
                EntityPathFilter::parse_strict(&expr)
                    .map_err(|err| {
                        PyValueError::new_err(format!(
                            "Could not interpret `contents` as a ViewContentsLike. Failed to parse {expr}: {err}.",
                        ))
                    })?.resolve_without_substitutions();

        // Iterate every entity path in the schema

        let contents = known_components
            .keys()
            .filter(|p| path_filter.matches(p))
            .map(|p| (p.clone(), None))
            .collect();

        Ok(contents)
    } else if let Ok(dict) = expr.downcast::<PyDict>() {
        // `Union[str, Sequence[str]]]`

        let mut contents = ViewContentsSelector::default();

        for (key, value) in dict {
            let key = key.extract::<String>().map_err(|_err| {
                    PyTypeError::new_err(
                        format!("Could not interpret `contents` as a ViewContentsLike. Key: {key} is not a path expression."),
                    )
                })?;

            let path_filter = EntityPathFilter::parse_strict(&key).map_err(|err| {
                    PyValueError::new_err(format!(
                        "Could not interpret `contents` as a ViewContentsLike. Failed to parse {key}: {err}.",
                    ))
                })?.resolve_without_substitutions();

            let component_strs: BTreeSet<ComponentIdentifier> = if let Ok(component) =
                value.extract::<String>()
            {
                std::iter::once(component.into()).collect()
            } else if let Ok(components) = value.extract::<Vec<String>>() {
                components.into_iter().map(Into::into).collect()
            } else {
                return Err(PyTypeError::new_err(format!(
                    "Could not interpret `contents` as a ViewContentsLike. Value: {value} is not a `str` or Sequence[str]."
                )));
            };

            let mut key_contents = known_components
                .keys()
                .filter(|p| path_filter.matches(p))
                .map(|entity_path| (entity_path.clone(), Some(component_strs.clone())))
                .collect();

            contents.append(&mut key_contents);
        }

        Ok(contents)
    } else {
        return Err(PyTypeError::new_err(
            "Could not interpret `contents` as a ViewContentsLike. Top-level type must be a string or a dictionary.",
        ));
    }
}
