use std::{
    collections::HashSet,
    path::{Path, PathBuf},
    sync::Arc,
};

use rocksdb::{ColumnFamily, ColumnFamilyDescriptor, DB, IteratorMode, Options};

use crate::{
    app::{
        make_chunk_file_name, make_file_metadata_path_name,
        store::{PendingDownloadRecord, PublishedFileRecord, Store},
    },
    error::FileStoreError,
};

#[derive(Debug, Clone)]
pub struct RocksDb {
    db: Arc<DB>,
}

const PUBLISHED_FILE_COLUMN_FAMILY_NAME: &str = "published_files";
const PENDING_DOWNLOAD_COLUMN_FAMILY_NAME: &str = "pending_downloads";
// const DOWNLOAD_PROGRESSES_COLUMN_FAMILY_NAME: &str = "download_progresses";

impl RocksDb {
    pub fn new(folder: impl AsRef<Path>) -> Result<Self, FileStoreError> {
        let mut opts = Options::default();
        opts.create_if_missing(true);
        opts.create_missing_column_families(true);
        let published_files_cfs =
            ColumnFamilyDescriptor::new(PUBLISHED_FILE_COLUMN_FAMILY_NAME, opts.clone());
        let pending_downloads_cfs =
            ColumnFamilyDescriptor::new(PENDING_DOWNLOAD_COLUMN_FAMILY_NAME, opts.clone());
        Ok(Self {
            db: Arc::new(
                DB::open_cf_descriptors(
                    &opts,
                    folder,
                    [published_files_cfs, pending_downloads_cfs],
                )
                .map_err(FileStoreError::RocksDbError)?,
            ),
        })
    }

    fn column_family(&self, family_name: &str) -> Result<&ColumnFamily, FileStoreError> {
        self.db
            .cf_handle(family_name)
            .ok_or_else(|| FileStoreError::ColumnFamilyMissing(family_name.to_string()))
    }

    fn add_record<K, V>(&self, family_name: &str, key: K, value: V) -> Result<(), FileStoreError>
    where
        K: AsRef<[u8]>,
        V: AsRef<[u8]>,
    {
        let cf = self.column_family(family_name)?;
        self.db
            .put_cf(cf, &key, &value)
            .map_err(FileStoreError::RocksDbError)?;
        Ok(())
    }

    fn fetch_one<K, V>(&self, family_name: &str, key: K) -> Result<Option<V>, FileStoreError>
    where
        K: AsRef<[u8]>,
        V: TryFrom<Vec<u8>, Error = serde_cbor::Error>,
    {
        let cf = self.column_family(family_name)?;
        if let Some(value) = self
            .db
            .get_cf(cf, key)
            .map_err(FileStoreError::RocksDbError)?
        {
            let result_value: Result<V, serde_cbor::Error> = value.to_vec().try_into();
            if let Ok(record) = result_value {
                return Ok(Some(record));
            }
        };
        Ok(None)
    }

    fn fetch_all<V, F>(
        &self,
        family_name: &str,
        filter: Option<F>,
    ) -> Result<impl Iterator<Item = V>, FileStoreError>
    where
        V: TryFrom<Vec<u8>, Error = serde_cbor::Error>,
        F: Fn(&V) -> bool + 'static + Send + Sync,
    {
        let cf = self.column_family(family_name)?;
        // 处理闭包参数
        let filter_fn: Box<dyn Fn(&V) -> bool + Send + Sync> = match filter {
            Some(closure) => Box::new(closure),
            None => Box::new(|_| true),
        };
        let iter = self
            .db
            .iterator_cf(cf, IteratorMode::Start)
            .filter_map(move |result| {
                if let Ok((_, value)) = result {
                    let result_value: Result<V, serde_cbor::Error> = value.to_vec().try_into();
                    if let Ok(record) = result_value {
                        if filter_fn(&record) {
                            return Some(record);
                        }
                    }
                }
                None
            });

        Ok(iter)
    }

    fn remove_record<K>(&self, family_name: &str, key: K) -> Result<(), FileStoreError>
    where
        K: AsRef<[u8]>,
    {
        let cf = self.column_family(family_name)?;
        self.db
            .delete_cf(cf, key)
            .map_err(FileStoreError::RocksDbError)?;
        Ok(())
    }
}

impl Store for RocksDb {
    fn add_published_file_record(&self, record: PublishedFileRecord) -> Result<(), FileStoreError> {
        let key = record.key();
        let value: Vec<u8> = record.try_into().map_err(FileStoreError::CborError)?;
        self.add_record(PUBLISHED_FILE_COLUMN_FAMILY_NAME, key, value)?;
        Ok(())
    }

    fn published_file_exists(&self, file_id: u64) -> Result<bool, FileStoreError> {
        let cf = self.column_family(PUBLISHED_FILE_COLUMN_FAMILY_NAME)?;
        let file_id_key = file_id.to_be_bytes();
        Ok(self.db.key_may_exist_cf(cf, file_id_key))
    }

    fn published_file_metadata_path(&self, file_id: u64) -> Result<PathBuf, FileStoreError> {
        let key = file_id.to_be_bytes().to_vec();
        let result = self
            .fetch_one::<Vec<u8>, PublishedFileRecord>(PUBLISHED_FILE_COLUMN_FAMILY_NAME, key)?
            .ok_or(FileStoreError::PublishedFileNotFound(file_id))?;

        Ok(make_file_metadata_path_name(&result.chunks_directory))
    }

    fn published_file_chunks_dir(&self, file_id: u64) -> Result<PathBuf, FileStoreError> {
        let key = file_id.to_be_bytes().to_vec();
        let result = self
            .fetch_one::<Vec<u8>, PublishedFileRecord>(PUBLISHED_FILE_COLUMN_FAMILY_NAME, key)?
            .ok_or(FileStoreError::PublishedFileNotFound(file_id))?;

        Ok(result.chunks_directory)
    }

    fn fetch_published_file_chunk_path(
        &self,
        file_id: u64,
        chunk_id: usize,
    ) -> Result<Option<PathBuf>, FileStoreError> {
        self.published_file_chunks_dir(file_id)
            .map(|chunks_dir| chunks_dir.join(make_chunk_file_name(chunk_id)))
            .map(Some)
    }

    fn fetch_all_published_files(
        &self,
    ) -> Result<impl Iterator<Item = PublishedFileRecord> + Send + Sync, FileStoreError> {
        self.fetch_all::<PublishedFileRecord, Box<dyn Fn(&PublishedFileRecord) -> bool + Send + Sync>>(
            PUBLISHED_FILE_COLUMN_FAMILY_NAME,
            None,
        )
    }

    fn fetch_all_public_published_files(
        &self,
    ) -> Result<impl Iterator<Item = PublishedFileRecord> + Send + Sync, FileStoreError> {
        let filter = |v: &PublishedFileRecord| v.public;
        self.fetch_all::<PublishedFileRecord, _>(PUBLISHED_FILE_COLUMN_FAMILY_NAME, Some(filter))
    }

    fn add_pending_download(&self, record: PendingDownloadRecord) -> Result<(), FileStoreError> {
        let key = record.key();
        let value: Vec<u8> = record.try_into().map_err(FileStoreError::CborError)?;
        self.add_record(PENDING_DOWNLOAD_COLUMN_FAMILY_NAME, key, value)?;
        Ok(())
    }

    fn fetch_all_pending_downloads(
        &self,
    ) -> Result<impl Iterator<Item = PendingDownloadRecord> + Send, FileStoreError> {
        self.fetch_all::<
            PendingDownloadRecord,
            Box<dyn Fn(&PendingDownloadRecord) -> bool + Send + Sync>,
        >(PENDING_DOWNLOAD_COLUMN_FAMILY_NAME, None)
    }

    fn add_downloaded_chunk_to_pending_download(
        &self,
        file_id: u64,
        chunk_id: usize,
    ) -> Result<(), FileStoreError> {
        let key = file_id.to_be_bytes().to_vec();
        if let Some(mut record) = self
            .fetch_one::<Vec<u8>, PendingDownloadRecord>(PENDING_DOWNLOAD_COLUMN_FAMILY_NAME, key)?
        {
            record.downloaded_chunks.insert(chunk_id);
            self.add_pending_download(record)?
        };
        Err(FileStoreError::ChunkNotFound(chunk_id))
    }

    fn already_downloaded_chunks_in_pending_download(
        &self,
        file_id: u64,
    ) -> Result<HashSet<usize>, FileStoreError> {
        let key = file_id.to_be_bytes().to_vec();
        if let Some(record) = self
            .fetch_one::<Vec<u8>, PendingDownloadRecord>(PENDING_DOWNLOAD_COLUMN_FAMILY_NAME, key)?
        {
            Ok(record.downloaded_chunks)
        } else {
            Ok(HashSet::new())
        }
    }

    fn check_downloaded_chunks_in_pending_download(
        &self,
        file_id: u64,
        chunk_id: usize,
    ) -> Result<bool, FileStoreError> {
        let key = file_id.to_be_bytes().to_vec();
        if let Some(record) = self
            .fetch_one::<Vec<u8>, PendingDownloadRecord>(PENDING_DOWNLOAD_COLUMN_FAMILY_NAME, key)?
        {
            Ok(record.downloaded_chunks.contains(&chunk_id))
        } else {
            Ok(false)
        }
    }

    fn fetch_pending_downloaded_chunk_path(
        &self,
        file_id: u64,
        chunk_id: usize,
    ) -> Result<Option<PathBuf>, FileStoreError> {
        let key = file_id.to_be_bytes().to_vec();
        if let Some(record) = self
            .fetch_one::<Vec<u8>, PendingDownloadRecord>(PENDING_DOWNLOAD_COLUMN_FAMILY_NAME, key)?
        {
            let chunk_dir = record.download_path;
            Ok(Some(chunk_dir.join(make_chunk_file_name(chunk_id))))
        } else {
            Ok(None)
        }
    }

    fn remove_pending_download(&self, file_id: u64) -> Result<(), FileStoreError> {
        let key = file_id.to_be_bytes().to_vec();
        self.remove_record(PENDING_DOWNLOAD_COLUMN_FAMILY_NAME, &key)?;
        Ok(())
    }
}

#[cfg(test)]
mod tests {
    use super::*;
    use tempfile::tempdir;

    #[tokio::test]
    async fn test_published_file_record() -> anyhow::Result<()> {
        let temp_dir = tempdir()?;
        let store = RocksDb::new(temp_dir.path())?;
        let record = PublishedFileRecord::new(
            1,
            "test.txt".to_string(),
            temp_dir.path().to_path_buf(),
            true,
        );
        // 1.add published file record
        let record_key = record.key();
        store.add_published_file_record(record.clone())?;

        // 2.check published file exists
        let exists = store.published_file_exists(1)?;
        assert!(exists);

        let cf = store.column_family(PUBLISHED_FILE_COLUMN_FAMILY_NAME)?;
        let value = store.db.get_cf(cf, record_key)?;
        let ret: Vec<u8> = record.clone().try_into()?;
        assert_eq!(value, Some(ret));
        let value = value.unwrap();
        let get_record: PublishedFileRecord = value.try_into()?;
        assert_eq!(get_record, record);

        // 3.check metadata file path
        let metadata_path = store.published_file_metadata_path(1)?;
        assert_eq!(metadata_path, make_file_metadata_path_name(temp_dir.path()));

        // 4.published all files
        let files = store.fetch_all_published_files()?;
        let files: Vec<PublishedFileRecord> = files.collect();
        assert_eq!(files.len(), 1);
        assert_eq!(files[0], record);

        // 5.fetch all public published files
        let public_files = store.fetch_all_public_published_files()?;
        let public_files: Vec<PublishedFileRecord> = public_files.collect();
        assert_eq!(public_files.len(), 1);
        assert_eq!(public_files[0], record);

        Ok(())
    }
}
