use keys::VddKey;
pub use rust_rocksdb;
use rust_rocksdb::statistics::StatsLevel;
use rust_rocksdb::{
    ColumnFamilyDescriptor, MultiThreaded, OptimisticTransactionDB, Options,
    WriteBatchWithTransaction,
};
use std::sync::LazyLock;
use visit_dd_core::i18n_text::E_ROCKSDB;
use visit_dd_core::proj::APP_CONFIG;
use visit_dd_core::{extract_u64_from_key, i18n, Astr};

pub mod admin_db;
pub mod cache;
pub mod field_db;
pub mod gateway_db;
pub(crate) mod keys;
pub mod log_db;
pub mod message_db;
pub mod model_db;
pub mod query_db;
pub mod record_db;
pub mod record_rule_db;
pub mod record_version_db;
pub mod sqlx;
pub mod subscribe;
#[cfg(test)]
mod test;
pub mod user_db;
pub mod value_db;
pub mod visit_db;

pub fn disconnect_db() {
    OptimisticTransactionDB::<MultiThreaded>::destroy(
        &Options::default(),
        APP_CONFIG.db_dir.as_ref(),
    )
    .expect(i18n!(E_ROCKSDB).as_ref());
}

static DB_OPT: LazyLock<Options> = LazyLock::new(|| {
    let mut o = Options::default();
    o.create_missing_column_families(true);
    o.create_if_missing(true);
    o.increase_parallelism(4);
    o.set_wal_ttl_seconds(120);
    o.set_bloom_locality(4);
    o.enable_statistics();
    o.set_statistics_level(StatsLevel::All);
    o
});

pub const CF_META: &str = "META";
pub const CF_FV: &str = "FV";
pub const CF_LOG: &str = "LOG";
pub const CF_UNIQUE: &str = "UNIQUE";
pub const CF_CACHE: &str = "CACHE";
pub const CF_TRASH: &str = "TRASH";

pub static ROCKSDB: LazyLock<OptimisticTransactionDB> = LazyLock::new(|| {
    let o = LazyLock::force(&DB_OPT);
    let cf_meta = ColumnFamilyDescriptor::new(CF_META, o.clone());
    let cf_fv = ColumnFamilyDescriptor::new(CF_FV, o.clone());
    let cf_log = ColumnFamilyDescriptor::new(CF_LOG, o.clone());
    let cf_unique = ColumnFamilyDescriptor::new(CF_UNIQUE, o.clone());
    let cf_cache = ColumnFamilyDescriptor::new(CF_CACHE, o.clone());
    let cf_trash = ColumnFamilyDescriptor::new(CF_TRASH, o.clone());
    let db = OptimisticTransactionDB::open_cf_descriptors(
        &o,
        APP_CONFIG.db_dir.as_ref(),
        vec![cf_meta, cf_fv, cf_log, cf_unique, cf_cache, cf_trash],
    )
    .expect(i18n!(E_ROCKSDB).as_ref());
    db
});

pub async fn get_rocks_statistics() {
    // DB_OPT.get_statistics().map(|s| println!("{}", s));
}

/// 删除一个节点下的第一个子节点，由于不确定rocksdb是否按照顺序排列子节点，需要先把节点遍历，把最小id选出来
fn delete_first_child(batch: &mut WriteBatchWithTransaction<true>, vk: &VddKey) {
    let key_head = vk.to_string();
    if !key_head.ends_with("/") {
        panic!("key head must end with /");
    }
    let mut id = 0;
    let iter = if vk.db_name().is_none() {
        ROCKSDB.prefix_iterator(&key_head)
    } else {
        ROCKSDB.prefix_iterator_cf(vk.db_name().unwrap(), &key_head)
    };
    iter.for_each(|x| {
        let (k, _) = x.unwrap();
        let key_string = String::from_utf8(k.to_vec()).unwrap();
        if key_string.starts_with(&key_head) {
            let node_id = extract_u64_from_key(&key_string);
            if node_id > id {
                id = node_id;
            }
        }
    });
    batch.delete(format!("{}{}", key_head, id))
}

/// 根据key的值删除key及其子key
fn delete_by_value(batch: &mut WriteBatchWithTransaction<true>, vk: &VddKey, value: &[u8]) {
    let k = &vk.to_string();
    let iter = if vk.db_name().is_none() {
        ROCKSDB.prefix_iterator(k)
    } else {
        ROCKSDB.prefix_iterator_cf(vk.db_name().unwrap(), k)
    };
    for item in iter {
        let (rk, rv) = item.unwrap();
        let k_str = String::from_utf8(rk.to_vec()).unwrap();
        if k_str.starts_with(k) && rv.to_vec() == value {
            batch.delete(k_str);
        }
    }
}

/// 删除一个key及其子key
pub fn delete_by_key_head(batch: &mut WriteBatchWithTransaction<true>, kh: &VddKey) {
    let k = &kh.to_string();
    let iter = if kh.db_name().is_none() {
        ROCKSDB.prefix_iterator(k)
    } else {
        ROCKSDB.prefix_iterator_cf(kh.db_name().unwrap(), k)
    };
    for x in iter {
        let (key, _) = x.unwrap();
        let key = String::from_utf8(key.to_vec()).unwrap();
        if key.starts_with(k) {
            batch.delete(key);
        }
    }
}

/// 删除一个key及其子key
pub fn delete_by_key_head_in_trash(batch: &mut WriteBatchWithTransaction<true>, kh: &VddKey) {
    let k = &kh.to_string();
    let cf = ROCKSDB.cf_handle(CF_TRASH).unwrap();
    let iter = ROCKSDB.prefix_iterator_cf(cf, k);
    for x in iter {
        let (key, _) = x.unwrap();
        let key = String::from_utf8(key.to_vec()).unwrap();
        if key.starts_with(k) {
            batch.delete(key);
        }
    }
}

/// 通过前缀匹配搜索加载值
pub(crate) fn load_by_key_head(vk: &VddKey) -> Vec<Box<[u8]>> {
    let k = &vk.to_string();
    let mut r = Vec::<Box<[u8]>>::new();
    let iter = if vk.db_name().is_none() {
        ROCKSDB.prefix_iterator(k)
    } else {
        ROCKSDB.prefix_iterator_cf(vk.db_name().unwrap(), k)
    };
    let mut hitted = false;
    for x in iter {
        let (rk, rv) = x.unwrap();
        let key_string = String::from_utf8(rk.to_vec()).unwrap();
        if key_string.starts_with(k) {
            hitted = true;
            r.push(rv);
        } else if hitted {
            break;
        }
    }
    r
}

/// 通过前缀匹配搜索加载键值对
pub(crate) fn load_by_key_head_with_key(vk: &VddKey) -> Vec<(Astr, Box<[u8]>)> {
    let k = &vk.to_string();
    let mut r = Vec::<(Astr, Box<[u8]>)>::new();
    let iter = if vk.db_name().is_none() {
        ROCKSDB.prefix_iterator(k)
    } else {
        ROCKSDB.prefix_iterator_cf(vk.db_name().unwrap(), k)
    };
    let mut hitted = false;
    for x in iter {
        let (rk, rv) = x.unwrap();
        let key = String::from_utf8(rk.to_vec()).unwrap();
        if key.starts_with(k) {
            hitted = true;
            r.push((key.into(), rv));
        } else if hitted {
            break;
        }
    }
    r
}

pub(crate) fn count_in_key_head<F>(vk: &VddKey, filter: F) -> usize
where
    F: Fn(&Box<[u8]>) -> bool,
{
    let key_head = &vk.to_string();
    let mut r = 0;
    let iter = if vk.db_name().is_none() {
        ROCKSDB.prefix_iterator(key_head)
    } else {
        ROCKSDB.prefix_iterator_cf(vk.db_name().unwrap(), key_head)
    };
    iter.for_each(|x| {
        let (k, v) = x.unwrap();
        let key_string = String::from_utf8(k.to_vec()).unwrap();
        if key_string.starts_with(key_head) && filter(&v) {
            r += 1;
        }
    });
    r
}

/// 把数据移动进入回收站
fn move_to_trash(
    batch: &mut WriteBatchWithTransaction<true>,
    vk: &VddKey,
    buffer: Option<Vec<u8>>,
) {
    let k = &vk.to_string();
    let cf_trash = ROCKSDB.cf_handle(CF_TRASH).unwrap();
    match buffer {
        Some(buffer) => {
            batch.put_cf(&cf_trash, k, buffer);
        }
        None => {
            match vk.db_name() {
                Some(cf) => ROCKSDB.get_cf(cf, k),
                None => ROCKSDB.get(k),
            }
            .map(|r| {
                r.map(|r2| {
                    batch.put_cf(&cf_trash, k, r2);
                })
            })
            .expect(i18n!(E_ROCKSDB).as_ref());
        }
    }
    match vk.db_name() {
        Some(cf) => {
            batch.delete_cf(cf, k);
        }
        None => {
            batch.delete(k);
        }
    }
}

fn move_to_trash_by_key_head(batch: &mut WriteBatchWithTransaction<true>, vk: &VddKey) {
    let k = &vk.to_string();

    let iter = match vk.db_name() {
        None => ROCKSDB.prefix_iterator(k),
        Some(cf) => ROCKSDB.prefix_iterator_cf(cf, k),
    };
    let mut hitted = false;
    let cf_trash = ROCKSDB.cf_handle(CF_TRASH).unwrap();
    for x in iter {
        let (rk, rv) = x.unwrap();
        let key = String::from_utf8(rk.to_vec()).unwrap();
        if key.starts_with(k) {
            hitted = true;
            batch.put_cf(&cf_trash, &key, rv);
            match vk.db_name() {
                Some(cf) => {
                    batch.delete_cf(cf, &key);
                }
                None => {
                    batch.delete(&key);
                }
            }
        } else if hitted {
            break;
        }
    }
}

#[macro_export]
macro_rules! run_by_store {
    ($rdb_fn:expr, $sqlx_fn:expr) => {
        match &APP_CONFIG.meta_store {
            None => $rdb_fn(),
            Some(ds) => match ds.store_engine {
                #[cfg(feature = "postgres")]
                DataStoreEngine::Postgres => {
                    $sqlx_fn(&crate::cache::get_pg_pool(ds.as_ref()).await).await
                }
                DataStoreEngine::Mysql => {
                    $sqlx_fn(&crate::cache::get_mysql_pool(ds.as_ref()).await).await
                }
            },
        }
    };
    ($rdb_fn:expr, $sqlx_fn:expr, $( $content:expr ), *) => {
        match &APP_CONFIG.meta_store {
            None => $rdb_fn($( $content ), *),
            Some(ds) => match ds.store_engine {
                #[cfg(feature = "postgres")]
                DataStoreEngine::Postgres => {
                    $sqlx_fn(&crate::cache::get_pg_pool(ds.as_ref()).await, $( $content ), *).await
                }
                DataStoreEngine::Mysql => {
                    $sqlx_fn(&crate::cache::get_mysql_pool(ds.as_ref()).await, $( $content ), *).await
                }
            },
        }
    };
}

#[macro_export]
macro_rules! sqlx_run_by_store {
    ($sqlx_fn:expr) => {
        match &APP_CONFIG.meta_store {
            Some(ds) => match ds.store_engine {
                #[cfg(feature = "postgres")]
                DataStoreEngine::Postgres => {
                    $sqlx_fn(&crate::cache::get_pg_pool(ds.as_ref()).await).await
                }
                DataStoreEngine::Mysql => {
                    $sqlx_fn(&crate::cache::get_mysql_pool(ds.as_ref()).await).await
                }
            },
            None => {panic!("宏调用错误")},
        }
    };
    ($sqlx_fn:expr, $( $content:expr ), *) => {
        match &APP_CONFIG.meta_store {
            Some(ds) => match ds.store_engine {
                #[cfg(feature = "postgres")]
                DataStoreEngine::Postgres => {
                    $sqlx_fn(&crate::cache::get_pg_pool(ds.as_ref()).await, $( $content ), *).await
                }
                DataStoreEngine::Mysql => {
                    $sqlx_fn(&crate::cache::get_mysql_pool(ds.as_ref()).await, $( $content ), *).await
                }
            },
            None => {panic!("宏调用错误")},
        }
    };
}
