use super::partition::Partition;
use crate::catalog::metrics::TableMetrics;
use crate::CatalogChunk;
use data_types::partition_metadata::{PartitionAddr, PartitionSummary};
use data_types::timestamp::TimestampRange;
use hashbrown::HashMap;
use schema::{
    builder::SchemaBuilder,
    merge::{Error as SchemaMergerError, SchemaMerger},
    Schema,
};
use std::{ops::Deref, result::Result, sync::Arc};
use time::TimeProvider;
use tracker::{RwLock, RwLockReadGuard, RwLockWriteGuard};

/// A `Table` is a collection of `Partition` each of which is a collection of `Chunk`
#[derive(Debug)]
pub struct Table {
    /// Database name
    db_name: Arc<str>,

    /// Table name
    table_name: Arc<str>,

    /// key is partition key
    partitions: HashMap<Arc<str>, Arc<RwLock<Partition>>>,

    /// Table metrics
    metrics: Arc<TableMetrics>,

    /// Table-wide schema.
    ///
    /// Notes on the type:
    /// - the outer `Arc<RwLock<...>>` so so that we can reference the locked schema w/o a lifetime to the table
    /// - the inner `Arc<Schema>` is a schema that we don't need to copy when moving it around the query stack
    schema: Arc<RwLock<Arc<Schema>>>,

    time_provider: Arc<dyn TimeProvider>,
}

impl Table {
    /// Create a new table catalog object.
    ///
    /// This function is not pub because `Table`s should be
    /// created using the interfaces on [`Catalog`](crate::catalog::Catalog) and not
    /// instantiated directly.
    pub(super) fn new(
        db_name: Arc<str>,
        table_name: Arc<str>,
        metrics: TableMetrics,
        time_provider: Arc<dyn TimeProvider>,
    ) -> Self {
        // build empty schema for this table
        let mut builder = SchemaBuilder::new();
        builder.measurement(table_name.as_ref());
        let schema = builder.build().expect("cannot build empty schema");
        let schema = Arc::new(metrics.new_table_lock(Arc::new(schema)));

        Self {
            db_name,
            table_name,
            partitions: Default::default(),
            metrics: Arc::new(metrics),
            schema,
            time_provider,
        }
    }

    pub fn partition(&self, partition_key: impl AsRef<str>) -> Option<&Arc<RwLock<Partition>>> {
        self.partitions.get(partition_key.as_ref())
    }

    pub fn partitions(&self) -> impl Iterator<Item = &Arc<RwLock<Partition>>> + '_ {
        self.partitions.values()
    }

    pub fn get_or_create_partition(
        &mut self,
        partition_key: impl AsRef<str>,
    ) -> &Arc<RwLock<Partition>> {
        let metrics = &self.metrics;
        let db_name = &self.db_name;
        let table_name = &self.table_name;
        let time_provider = &self.time_provider;
        let (_, partition) = self
            .partitions
            .raw_entry_mut()
            .from_key(partition_key.as_ref())
            .or_insert_with(|| {
                let partition_key = Arc::from(partition_key.as_ref());
                let partition_metrics = metrics.new_partition_metrics();
                let partition = Partition::new(
                    PartitionAddr {
                        db_name: Arc::clone(db_name),
                        table_name: Arc::clone(table_name),
                        partition_key: Arc::clone(&partition_key),
                    },
                    partition_metrics,
                    Arc::clone(time_provider),
                );
                let partition = Arc::new(metrics.new_partition_lock(partition));
                (partition_key, partition)
            });
        partition
    }

    pub fn partition_keys(&self) -> impl Iterator<Item = &Arc<str>> + '_ {
        self.partitions.keys()
    }

    pub fn partition_summaries(&self) -> impl Iterator<Item = PartitionSummary> + '_ {
        self.partitions.values().filter_map(|x| x.read().summary())
    }

    /// Calls `map` with every chunk and returns a collection of the results
    ///
    /// If `partition_key` is `Some` restricts to chunks in that partition.
    /// If `timestamp_range` is `Some` restricts to chunks with timestamps overlapping that range.
    ///
    pub fn filtered_chunks<F, C>(
        &self,
        partition_key: Option<&str>,
        timestamp_range: Option<TimestampRange>,
        map: F,
    ) -> Vec<C>
    where
        F: Fn(&CatalogChunk) -> C + Copy,
    {
        let partitions = match partition_key {
            Some(partition_key) => {
                itertools::Either::Left(self.partition(partition_key).into_iter())
            }
            None => itertools::Either::Right(self.partitions()),
        };

        let mut chunks = Vec::with_capacity(partitions.size_hint().1.unwrap_or_default());
        for partition in partitions {
            let partition = partition.read();
            chunks.extend(partition.chunks().into_iter().flat_map(|chunk| {
                let chunk = chunk.read();
                if let (Some(range), Some(min_max)) = (timestamp_range, chunk.timestamp_min_max()) {
                    if !min_max.overlaps(range) {
                        return None;
                    }
                }
                Some(map(&chunk))
            }))
        }
        chunks
    }

    pub fn name(&self) -> &Arc<str> {
        &self.table_name
    }

    pub fn metrics(&self) -> &Arc<TableMetrics> {
        &self.metrics
    }

    pub fn schema(&self) -> &Arc<RwLock<Arc<Schema>>> {
        &self.schema
    }
}

/// Inner state of [`TableSchemaUpsertHandle`] that depends if the schema will be changed during the write operation or
/// not.
#[derive(Debug)]
enum TableSchemaUpsertHandleInner<'a> {
    /// Schema will not be changed.
    NoChange {
        table_schema_read: RwLockReadGuard<'a, Arc<Schema>>,
    },

    /// Schema might change (if write to mutable buffer is successful).
    MightChange {
        table_schema_write: RwLockWriteGuard<'a, Arc<Schema>>,
        merged_schema: Schema,
    },
}

/// Handle that can be used to modify the table-wide [schema](Schema) during new writes.
#[derive(Debug)]
pub struct TableSchemaUpsertHandle<'a> {
    inner: TableSchemaUpsertHandleInner<'a>,
}

impl<'a> TableSchemaUpsertHandle<'a> {
    pub(crate) fn new(
        table_schema: &'a RwLock<Arc<Schema>>,
        new_schema: &Schema,
    ) -> Result<Self, SchemaMergerError> {
        // Be optimistic and only get a read lock. It is rather rare that the schema will change when new data arrives
        // and we do NOT want to serialize all writes on a single lock.
        let table_schema_read = table_schema.read();

        // Let's see if we can merge the new schema with the existing one (this may or may not result in any schema
        // change).
        let merged_schema = Self::try_merge(&table_schema_read, new_schema)?;

        // Now check if this would actually change the schema:
        if &merged_schema == table_schema_read.deref().deref() {
            // Optimism payed off and we get away we the read lock.
            Ok(Self {
                inner: TableSchemaUpsertHandleInner::NoChange { table_schema_read },
            })
        } else {
            // Schema changed, so we need a real write lock. To do that, we must first drop the read lock.
            drop(table_schema_read);

            // !!! Here we have a lock-gap !!!

            // Re-lock with write permissions.
            let table_schema_write = table_schema.write();

            // During the above write lock, the schema might have changed again, so we need to perform the merge again.
            // This may also lead to a failure now, e.g. when adding a column that was added with a different type
            // during the lock gap.
            let merged_schema = Self::try_merge(&table_schema_write, new_schema)?;

            Ok(Self {
                inner: TableSchemaUpsertHandleInner::MightChange {
                    table_schema_write,
                    merged_schema,
                },
            })
        }
    }

    /// Try to merge schema.
    ///
    /// This will also sort the columns!
    fn try_merge(schema1: &Schema, schema2: &Schema) -> Result<Schema, SchemaMergerError> {
        Ok(SchemaMerger::new().merge(schema1)?.merge(schema2)?.build())
    }

    /// Commit potential schema change.
    pub(crate) fn commit(self) {
        match self.inner {
            TableSchemaUpsertHandleInner::NoChange { table_schema_read } => {
                // Nothing to do since there was no schema changed queued. Just drop the read guard.
                drop(table_schema_read);
            }
            TableSchemaUpsertHandleInner::MightChange {
                mut table_schema_write,
                merged_schema,
            } => {
                // Commit new schema and drop write guard;
                *table_schema_write = Arc::new(merged_schema);
                drop(table_schema_write);
            }
        }
    }
}

#[cfg(test)]
mod tests {
    use schema::{InfluxColumnType, InfluxFieldType};

    use super::*;

    #[test]
    fn test_handle_no_change() {
        let table_schema_orig = SchemaBuilder::new()
            .measurement("m1")
            .influx_column("tag1", InfluxColumnType::Tag)
            .influx_column("tag2", InfluxColumnType::Tag)
            .build()
            .unwrap();
        let table_schema = RwLock::new(Arc::new(table_schema_orig.clone()));

        // writing with the same schema must not trigger a change
        let schema1 = SchemaBuilder::new()
            .measurement("m1")
            .influx_column("tag1", InfluxColumnType::Tag)
            .influx_column("tag2", InfluxColumnType::Tag)
            .build()
            .unwrap();
        let handle = TableSchemaUpsertHandle::new(&table_schema, &schema1).unwrap();
        assert!(matches!(
            handle.inner,
            TableSchemaUpsertHandleInner::NoChange { .. }
        ));
        assert_eq!(table_schema.read().deref().deref(), &table_schema_orig);
        handle.commit();
        assert_eq!(table_schema.read().deref().deref(), &table_schema_orig);

        // writing with different column order must not trigger a change
        let schema2 = SchemaBuilder::new()
            .measurement("m1")
            .influx_column("tag2", InfluxColumnType::Tag)
            .influx_column("tag1", InfluxColumnType::Tag)
            .build()
            .unwrap();
        let handle = TableSchemaUpsertHandle::new(&table_schema, &schema2).unwrap();
        assert!(matches!(
            handle.inner,
            TableSchemaUpsertHandleInner::NoChange { .. }
        ));
        assert_eq!(table_schema.read().deref().deref(), &table_schema_orig);
        handle.commit();
        assert_eq!(table_schema.read().deref().deref(), &table_schema_orig);

        // writing with a column subset must not trigger a change
        let schema3 = SchemaBuilder::new()
            .measurement("m1")
            .influx_column("tag1", InfluxColumnType::Tag)
            .build()
            .unwrap();
        let handle = TableSchemaUpsertHandle::new(&table_schema, &schema3).unwrap();
        assert!(matches!(
            handle.inner,
            TableSchemaUpsertHandleInner::NoChange { .. }
        ));
        assert_eq!(table_schema.read().deref().deref(), &table_schema_orig);
        handle.commit();
        assert_eq!(table_schema.read().deref().deref(), &table_schema_orig);
    }

    #[test]
    fn test_handle_might_change() {
        let table_schema_orig = SchemaBuilder::new()
            .measurement("m1")
            .influx_column("tag1", InfluxColumnType::Tag)
            .influx_column("tag2", InfluxColumnType::Tag)
            .build()
            .unwrap();
        let table_schema = RwLock::new(Arc::new(table_schema_orig));

        let new_schema = SchemaBuilder::new()
            .measurement("m1")
            .influx_column("tag1", InfluxColumnType::Tag)
            .influx_column("tag3", InfluxColumnType::Tag)
            .build()
            .unwrap();
        let handle = TableSchemaUpsertHandle::new(&table_schema, &new_schema).unwrap();
        assert!(matches!(
            handle.inner,
            TableSchemaUpsertHandleInner::MightChange { .. }
        ));

        // cannot read while lock is held
        assert!(table_schema.try_read().is_none());

        handle.commit();
        let table_schema_expected = SchemaBuilder::new()
            .measurement("m1")
            .influx_column("tag1", InfluxColumnType::Tag)
            .influx_column("tag2", InfluxColumnType::Tag)
            .influx_column("tag3", InfluxColumnType::Tag)
            .build()
            .unwrap();
        assert_eq!(table_schema.read().deref().deref(), &table_schema_expected);
    }

    #[test]
    fn test_handle_error() {
        let table_schema_orig = SchemaBuilder::new()
            .measurement("m1")
            .influx_column("tag1", InfluxColumnType::Tag)
            .influx_column("tag2", InfluxColumnType::Tag)
            .build()
            .unwrap();
        let table_schema = RwLock::new(Arc::new(table_schema_orig.clone()));

        let schema1 = SchemaBuilder::new()
            .measurement("m1")
            .influx_column("tag1", InfluxColumnType::Tag)
            .influx_column("tag2", InfluxColumnType::Field(InfluxFieldType::String))
            .build()
            .unwrap();
        assert!(TableSchemaUpsertHandle::new(&table_schema, &schema1).is_err());

        // schema did not change
        assert_eq!(table_schema.read().deref().deref(), &table_schema_orig);
    }
}
