pub(crate) mod cleaner;
pub mod edit;
pub(crate) mod error;
pub(crate) mod set;
pub(crate) mod timestamp;

use std::{
    ops::Bound,
    sync::{
        atomic::{AtomicU32, Ordering},
        Arc,
    },
};

use flume::Sender;
use fusio::DynFs;
use parquet::arrow::ProjectionMask;
use tracing::error;

use crate::{
    context::Context,
    fs::{manager::StoreManager, FileId, FileType},
    ondisk::sstable::SsTable,
    option::Order,
    record::{Record, Schema},
    scope::Scope,
    stream::{level::LevelStream, record_batch::RecordBatchEntry, ScanStream},
    version::{
        cleaner::CleanTag,
        edit::VersionEdit,
        error::VersionError,
        timestamp::{Timestamp, TsRef},
    },
    DbOption, ParquetLru,
};

pub const MAX_LEVEL: usize = 7;

pub type VersionRef<R> = Arc<Version<R>>;

pub trait TransactionTs {
    fn load_ts(&self) -> Timestamp;

    fn increase_ts(&self) -> Timestamp;
}

/// Tracks the current metadata of the `DB`
#[derive(Debug)]
pub struct Version<R>
where
    R: Record,
{
    // Timestamp at creation
    ts: Timestamp,
    // Holds the SSTable file ids and their min/max values for every level
    pub level_slice: [Vec<Scope<<R::Schema as Schema>::Key>>; MAX_LEVEL],
    clean_sender: Sender<CleanTag>,
    option: Arc<DbOption>,
    timestamp: Arc<AtomicU32>,
    log_length: u32,
}

impl<R> Version<R>
where
    R: Record,
{
    #[cfg(test)]
    #[allow(unused)]
    // Creates new `Version` for testing
    pub(crate) fn new(
        option: Arc<DbOption>,
        clean_sender: Sender<CleanTag>,
        timestamp: Arc<AtomicU32>,
    ) -> Self {
        Version {
            ts: Timestamp::from(0),
            level_slice: [const { Vec::new() }; MAX_LEVEL],
            clean_sender,
            option: option.clone(),
            timestamp,
            log_length: 0,
        }
    }

    /// Creates
    pub(crate) fn option(&self) -> &Arc<DbOption> {
        &self.option
    }
}

// Handles Timestamp operations for `Version`
impl<R> TransactionTs for Version<R>
where
    R: Record,
{
    fn load_ts(&self) -> Timestamp {
        self.timestamp.load(Ordering::Acquire).into()
    }

    fn increase_ts(&self) -> Timestamp {
        (self.timestamp.fetch_add(1, Ordering::Release) + 1).into()
    }
}

impl<R> Clone for Version<R>
where
    R: Record,
{
    fn clone(&self) -> Self {
        let mut level_slice = [const { Vec::new() }; MAX_LEVEL];

        for (level, scopes) in self.level_slice.iter().enumerate() {
            level_slice[level].clone_from(scopes);
        }

        Self {
            ts: self.ts,
            level_slice,
            clean_sender: self.clean_sender.clone(),
            option: self.option.clone(),
            timestamp: self.timestamp.clone(),
            log_length: self.log_length,
        }
    }
}

impl<R> Version<R>
where
    R: Record,
{
    /// Queries for 'get' operations
    pub(crate) async fn query(
        &self,
        manager: &StoreManager,
        key: &TsRef<<R::Schema as Schema>::Key>,
        projection_mask: ProjectionMask,
        parquet_lru: ParquetLru,
        pk_indices: &[usize],
    ) -> Result<Option<RecordBatchEntry<R>>, VersionError> {
        let level_0_path = self
            .option
            .level_fs_path(0)
            .unwrap_or(&self.option.base_path);
        let level_0_fs = manager.get_fs(level_0_path);

        // For level 0, check if the scope contains the key. If found we do a query into the level
        for scope in self.level_slice[0].iter().rev() {
            if !scope.contains(key.value()) {
                continue;
            }
            if let Some(entry) = self
                .table_query(
                    level_0_fs,
                    key,
                    0,
                    scope.gen,
                    projection_mask.clone(),
                    parquet_lru.clone(),
                    pk_indices,
                )
                .await?
            {
                return Ok(Some(entry));
            }
        }

        // For level 1+, a binary search is done on the level to find the key before querying on it
        for (i, sort_runs) in self.level_slice[1..MAX_LEVEL].iter().enumerate() {
            if sort_runs.is_empty() {
                continue;
            }
            let level = i + 1;
            let level_path = self
                .option
                .level_fs_path(level)
                .unwrap_or(&self.option.base_path);
            let level_fs = manager.get_fs(level_path);

            let index = Self::scope_search(key.value(), sort_runs);
            if !sort_runs[index].contains(key.value()) {
                continue;
            }
            if let Some(entry) = self
                .table_query(
                    level_fs,
                    key,
                    level,
                    sort_runs[index].gen,
                    projection_mask.clone(),
                    parquet_lru.clone(),
                    pk_indices,
                )
                .await?
            {
                return Ok(Some(entry));
            }
        }

        Ok(None)
    }

    // Opens the file by `FileId` and does a get operation on the SsTable
    #[allow(clippy::too_many_arguments)]
    async fn table_query(
        &self,
        store: &Arc<dyn DynFs>,
        key: &TsRef<<R::Schema as Schema>::Key>,
        level: usize,
        gen: FileId,
        projection_mask: ProjectionMask,
        parquet_lru: ParquetLru,
        pk_indices: &[usize],
    ) -> Result<Option<RecordBatchEntry<R>>, VersionError> {
        let file = store
            .open_options(
                &self.option.table_path(gen, level),
                FileType::Parquet.open_options(true),
            )
            .await
            .map_err(VersionError::Fusio)?;
        SsTable::<R>::open(parquet_lru, gen, file)
            .await?
            .get(key, projection_mask, pk_indices)
            .await
            .map_err(VersionError::Parquet)
    }

    /// Perform binary search on a level using the key
    pub fn scope_search(
        key: &<R::Schema as Schema>::Key,
        level: &[Scope<<R::Schema as Schema>::Key>],
    ) -> usize {
        level
            .binary_search_by(|scope| scope.min.cmp(key))
            .unwrap_or_else(|index| index.saturating_sub(1))
    }

    /// Returns the length of a level
    pub fn tables_len(&self, level: usize) -> usize {
        self.level_slice[level].len()
    }

    /// Checks all levels and pushes all data scans that fall in the range
    #[allow(clippy::too_many_arguments)]
    pub(crate) async fn streams<'streams>(
        &self,
        ctx: &Context<R>,
        streams: &mut Vec<ScanStream<'streams, R>>,
        range: (
            Bound<&'streams <R::Schema as Schema>::Key>,
            Bound<&'streams <R::Schema as Schema>::Key>,
        ),
        ts: Timestamp,
        limit: Option<usize>,
        projection_mask: ProjectionMask,
        order: Option<Order>,
        pk_indices: &'streams [usize],
    ) -> Result<(), VersionError> {
        let level_0_path = self
            .option
            .level_fs_path(0)
            .unwrap_or(&self.option.base_path);
        let level_0_fs = ctx.manager.get_fs(level_0_path);

        for scope in self.level_slice[0].iter() {
            if !scope.meets_range(range) {
                continue;
            }
            let file = level_0_fs
                .open_options(
                    &self.option.table_path(scope.gen, 0),
                    FileType::Parquet.open_options(true),
                )
                .await
                .map_err(VersionError::Fusio)?;
            let table = SsTable::open(ctx.parquet_lru.clone(), scope.gen, file).await?;

            streams.push(ScanStream::SsTable {
                inner: table
                    .scan(range, ts, limit, projection_mask.clone(), order, pk_indices)
                    .await
                    .map_err(VersionError::Parquet)?,
            })
        }

        for (i, scopes) in self.level_slice[1..].iter().enumerate() {
            if scopes.is_empty() {
                continue;
            }
            let level_path = self
                .option
                .level_fs_path(i + 1)
                .unwrap_or(&self.option.base_path);
            let level_fs = ctx.manager.get_fs(level_path);

            let (mut start, mut end) = (None, None);

            for (idx, scope) in scopes.iter().enumerate() {
                if scope.meets_range(range) {
                    if start.is_none() {
                        start = Some(idx);
                    }
                    end = Some(idx);
                }
            }
            if start.is_none() {
                continue;
            }

            streams.push(ScanStream::Level {
                // SAFETY: checked scopes no empty
                inner: LevelStream::new(
                    self,
                    i + 1,
                    start.unwrap(),
                    end.unwrap(),
                    range,
                    ts,
                    limit,
                    projection_mask.clone(),
                    level_fs.clone(),
                    ctx.parquet_lru.clone(),
                    order,
                    pk_indices,
                )
                .unwrap(),
            });
        }
        Ok(())
    }

    // Uses the changes made in the `level_slice` and adds the corresponding edits
    pub(crate) fn to_edits(&self) -> Vec<VersionEdit<<R::Schema as Schema>::Key>> {
        let mut edits = Vec::new();

        for (level, scopes) in self.level_slice.iter().enumerate() {
            for scope in scopes {
                edits.push(VersionEdit::Add {
                    level: level as u8,
                    scope: scope.clone(),
                })
            }
        }
        edits.push(VersionEdit::LatestTimeStamp { ts: self.load_ts() });
        edits.push(VersionEdit::NewLogLength { len: 0 });
        edits
    }
}

impl<R> Drop for Version<R>
where
    R: Record,
{
    fn drop(&mut self) {
        if let Err(err) = self.clean_sender.send(CleanTag::Clean { ts: self.ts }) {
            error!("[Version Drop Error]: {}", err)
        }
    }
}
