use crate::keys::VddKey;
use crate::{load_by_key_head_with_key, model_db, ROCKSDB};
use index::reload_index;
use rust_rocksdb::WriteBatchWithTransaction;
use sqlx::MySqlPool;
#[cfg(feature = "postgres")]
use sqlx::PgPool;
use std::collections::HashMap;
use std::sync::{Arc, LazyLock, RwLock};
use visit_dd_core::field::{VddField, VddFieldType};
use visit_dd_core::i18n_text::E_ROCKSDB;
use visit_dd_core::model::VddSchema;
use visit_dd_core::proj::DataSourceTrait;
use visit_dd_core::{extract_u64_from_key, i18n, Ares, Astr, RuntimeCache, EMPTY_FID};

/// 维护标签、名称缓存
pub(crate) fn update_label_cache(
    batch: &mut WriteBatchWithTransaction<true>,
    id: u64,
    label: &str,
) {
    let vk = VddKey::CacheLabel(id);
    batch.put_cf(&vk.db_name().unwrap(), vk.to_string(), label);
}

///
pub(crate) fn get_label_cache() -> HashMap<u64, Astr> {
    let mut r = HashMap::<u64, Astr>::new();
    load_by_key_head_with_key(&VddKey::CacheLabel(0))
        .iter()
        .for_each(|x| {
            let id = extract_u64_from_key(&x.0);
            let label = String::from_utf8(x.1.to_vec()).unwrap();
            r.insert(id, label.into());
        });
    r
}

mod index {
    use rust_rocksdb::WriteBatchWithTransaction;
    use serde_json::json;
    use visit_dd_core::Ares;

    use crate::ROCKSDB;
    use std::{
        collections::HashMap,
        sync::{LazyLock, RwLock},
    };

    use super::IndexChange;

    // index 读取缓存层
    static INDEX_CACHE: LazyLock<RwLock<HashMap<String, Vec<u64>>>> =
        LazyLock::new(|| RwLock::new(HashMap::<String, Vec<u64>>::new()));

    pub(super) fn get_index(key: &str) -> Option<Vec<u64>> {
        let map = LazyLock::force(&INDEX_CACHE).read();
        match map {
            Ok(map2) => map2.get(key).cloned(),
            Err(_) => None,
        }
    }

    /// 尝试从rocksdb中加载最新的索引
    pub(super) fn reload_index(key: &str) -> Vec<u64> {
        let r = match ROCKSDB.get(key) {
            Ok(Some(buffer)) => serde_json::from_slice::<Vec<u64>>(&buffer).unwrap(),
            _ => Vec::<u64>::new(),
        };
        // tracing::info!("index {} length {}", key, r.len());
        INDEX_CACHE
            .write()
            .unwrap()
            .insert(key.to_string(), r.clone());
        r
    }

    pub(super) fn change_index(index_changes: &[IndexChange]) -> Ares {
        let mut cache = LazyLock::force(&INDEX_CACHE).write().unwrap();
        // 本次更新的缓存以及批量更新后的缓存最终结果
        let mut new_index_map = HashMap::<String, Vec<u64>>::new();
        index_changes.iter().for_each(|change| {
            let key = change.key.to_string();
            // 在每次执行apply的过程中不能改变hashmap缓存中的数据，否则会引起过程中改过的数据被冲掉
            // 所以只能如果缓存中不存在这个key，把这个key加载一遍或者初始化空数组
            let mut indexs = if !new_index_map.contains_key(&key) {
                if !cache.contains_key(&key) {
                    match ROCKSDB.get(&key) {
                        Ok(Some(buffer)) => serde_json::from_slice::<Vec<u64>>(&buffer).unwrap(),
                        _ => Vec::<u64>::new(),
                    }
                } else {
                    cache.get(&key).unwrap().clone()
                }
            } else {
                new_index_map.get(&key).unwrap().clone()
            };
            match change.a {
                visit_dd_core::AC_DELETE => {
                    indexs.retain(|item| *item != change.data);
                }
                _ => {
                    if !indexs.contains(&change.data) {
                        indexs.push(change.data);
                    }
                }
            }
            new_index_map.insert(key.clone(), indexs.clone());
        });
        // 更新缓存
        cache.extend(new_index_map.clone());
        // 写入数据库
        let mut batch = WriteBatchWithTransaction::<true>::default();
        for (k, v) in new_index_map.iter() {
            batch.put(k, json!(v).to_string());
        }
        ROCKSDB.write(batch).map_err(|e| e.to_string().into())
    }
}

/// 读取一个索引 不能用hashset 因为hashset不保证顺序
pub(crate) fn get_index(key: &str) -> Vec<u64> {
    let r = index::get_index(key);
    match r {
        Some(r2) => r2,
        None => reload_index(key),
    }
}

#[derive(Debug)]
pub struct IndexChange {
    pub key: VddKey,
    pub a: u8,
    pub data: u64,
}

impl IndexChange {
    pub(crate) fn new(key: VddKey, a: u8, data: u64) -> Self {
        Self { key, a, data }
    }
}

pub(crate) fn apply_index_change(index_changes: &[IndexChange]) -> Ares {
    index::change_index(index_changes)
}

pub(crate) static RECORD_COUNT_CACHE: LazyLock<RwLock<HashMap<u64, RuntimeCache<usize>>>> =
    LazyLock::new(|| RwLock::new(HashMap::<u64, RuntimeCache<usize>>::new()));

#[cfg(feature = "postgres")]
static PG_POOLS: LazyLock<tokio::sync::RwLock<HashMap<Astr, PgPool>>> =
    LazyLock::new(|| tokio::sync::RwLock::new(HashMap::<Astr, PgPool>::new()));

#[cfg(feature = "postgres")]
pub(crate) async fn get_pg_pool(store: &impl DataSourceTrait) -> PgPool {
    let url = store.to_url();
    let mut pool_map = LazyLock::force(&PG_POOLS).write().await;
    if !pool_map.contains_key(&url) {
        let pool = PgPool::connect(&url).await.unwrap();
        pool_map.insert(url.clone(), pool);
    }
    pool_map.get(&url).clone().unwrap().clone()
}

static MYSQL_POOLS: LazyLock<tokio::sync::RwLock<HashMap<Astr, MySqlPool>>> =
    LazyLock::new(|| tokio::sync::RwLock::new(HashMap::<Astr, MySqlPool>::new()));

pub(crate) async fn get_mysql_pool(store: &impl DataSourceTrait) -> MySqlPool {
    let url = store.to_url();
    let mut pool_map = LazyLock::force(&MYSQL_POOLS).write().await;
    if !pool_map.contains_key(&url) {
        tracing::info!("{}", &url);
        let pool = MySqlPool::connect(&url).await.unwrap();
        pool_map.insert(url.clone(), pool);
    }
    pool_map.get(&url).clone().unwrap().clone()
}

/// 模型定义缓存
static SCHEMA_MAP: LazyLock<tokio::sync::RwLock<HashMap<u64, Arc<VddSchema>>>> =
    LazyLock::new(|| tokio::sync::RwLock::new(HashMap::<u64, Arc<VddSchema>>::new()));

/// 从缓存中获取模型定义 如果缓存中没有 尝试从数据库中获取一次
pub async fn get_schema(model_id: u64) -> Result<Arc<VddSchema>, Astr> {
    let hit = SCHEMA_MAP.read().await.get(&model_id).is_some();
    if !hit {
        let schema = model_db::m_schema(model_id).await;
        if let Ok(schema) = schema {
            SCHEMA_MAP.write().await.insert(model_id, schema);
        }
    }
    if let Some(r) = SCHEMA_MAP.read().await.get(&model_id) {
        Ok(r.clone())
    } else {
        Err("wrong model id".into())
    }
}

/// 强制缓存的模型定义失效
pub(crate) async fn invalidate_schema(model_id: u64) {
    SCHEMA_MAP.write().await.remove(&model_id);
}

/// 为某个自增列获取新值
pub(crate) fn fetch_auto_increment_id(field: &VddField) -> i64 {
    let vk = VddKey::CacheAutoIncreaseKeyId(field.id.clone());
    let v = ROCKSDB
        .get_cf(&vk.db_name().unwrap(), vk.to_string())
        .unwrap();
    match field.field_type.as_ref() {
        VddFieldType::FtAutoIncreaseInt { start, step, limit } => {
            let mut r = match v {
                None => *start - *step as i64,
                Some(v) => i64::from_le_bytes(v.as_slice().try_into().unwrap()),
            };
            r += *step as i64;
            if (*step > 0 && r > *limit) || (*step < 0 && r < *limit) {
                panic!("touched id limit");
            }
            ROCKSDB
                .put_cf(&vk.db_name().unwrap(), vk.to_string(), r.to_le_bytes())
                .unwrap();
            r
        }
        _ => {
            panic!("field rule is not fit for auto increase id field");
        }
    }
}

/// 获取系统提供的自动增加的全局主键值
pub(crate) fn fetch_system_auto_increment_id() -> u64 {
    let vk = VddKey::CacheAutoIncreaseKeyId(EMPTY_FID);
    let v = ROCKSDB
        .get_cf(&vk.db_name().unwrap(), vk.to_string())
        .unwrap();
    let mut r = match v {
        None => 0u64,
        Some(v) => u64::from_le_bytes(v.as_slice().try_into().unwrap()),
    };
    r += 1u64;
    if r == u64::MAX {
        panic!("touched id limit");
    }
    ROCKSDB
        .put_cf(vk.db_name().unwrap(), vk.to_string(), r.to_le_bytes())
        .expect(i18n!(E_ROCKSDB).as_ref());
    r
}
