use std::{
    collections::HashMap,
    sync::{Arc, LazyLock},
};

use chrono::Local;
use rust_rocksdb::WriteBatchWithTransaction;
use visit_dd_core::{
    alog,
    id_factory::ID_FACTORY,
    model::{DataStoreEngine, VddModel},
    proj::{VddLogPart, APP_CONFIG},
    record::{RecordChangeEvent, RecordSubscriber},
    Ares, Astr, FieldId, RecordId, UserId, AC_ADD, AC_DELETE, AC_EDIT, CACHE_LISTENER,
};

use crate::{
    cache::{self, get_mysql_pool, IndexChange},
    keys::VddKey,
    log_db, run_by_store,
    sqlx::SqlxDefineTrait,
    sqlx_run_by_store, ROCKSDB,
};

#[cfg(feature = "postgres")]
use crate::cache::get_pg_pool;

/// 字段值变化情况
pub(crate) struct FiledValueChange {
    pub fid: FieldId,
    pub old_value: Astr,
    pub new_value: Astr,
}

static PARQUET_LISTENERS: LazyLock<tokio::sync::RwLock<HashMap<u64, RecordSubscriber>>> =
    LazyLock::new(|| tokio::sync::RwLock::new(HashMap::<u64, RecordSubscriber>::new()));

/// 探查一个模型的parquet文件是否需要更新 本地存储和远端存储的逻辑不同
pub async fn get_parquet_refresh_flag(model: &VddModel) -> bool {
    let mut listeners = LazyLock::force(&PARQUET_LISTENERS).write().await;
    if listeners.contains_key(&model.id) {
        let sc = listeners.get_mut(&model.id).unwrap();
        match model.store.clone() {
            Some(..) => {
                sc.cts
                    .signed_duration_since(Local::now().naive_local())
                    .num_seconds()
                    > 3600
            }
            None => {
                let events = select_events(&sc);
                sc.reset();
                !events.is_empty()
            }
        }
    } else {
        listeners.insert(model.id, RecordSubscriber::new(model.id, CACHE_LISTENER));
        true
    }
}

pub async fn sc_save(sc: &RecordSubscriber) -> Ares {
    // 系统监听缓存不能存储，其生命周期应与进程匹配
    if sc.subscriber == CACHE_LISTENER {
        return Ok(());
    }
    let mut batch = WriteBatchWithTransaction::<true>::default();
    let mut index_changes = Vec::<IndexChange>::new();
    if APP_CONFIG.meta_store.is_none() {
        impl_rdb::sc_save(&mut batch, sc);
    } else {
        sqlx_run_by_store!(impl_sqlx::sc_save, sc)?
    }
    log_db::save(
        &mut batch,
        &mut index_changes,
        &alog!(
            VddLogPart::LpAction(sc.subscriber.clone(), "订阅数据更新".into()),
            VddLogPart::LpModel(sc.model_id),
        ),
    );
    let ares = ROCKSDB.write(batch).map_err(|e| e.to_string().into());
    if ares.is_ok() {
        cache::apply_index_change(&index_changes)?;
    }
    ares
}

pub async fn sc_delete(model_id: u64, subscriber: &UserId) -> Ares {
    let mut batch = WriteBatchWithTransaction::<true>::default();
    let mut index_changes = Vec::<IndexChange>::new();
    if APP_CONFIG.meta_store.is_none() {
        let vk = VddKey::BoundModelAndSubscribe(model_id, subscriber.clone());
        batch.delete_cf(vk.db_name().unwrap(), vk.to_string());
    } else {
        let ds = APP_CONFIG.meta_store.clone().unwrap();
        match ds.store_engine {
            #[cfg(feature = "postgres")]
            DataStoreEngine::Postgres => {
                let pool = get_pg_pool(ds.as_ref()).await;
                pool.delete_bounds(VddKey::BoundModelAndSubscribe(model_id, subscriber.clone()))
                    .await
                    .unwrap();
            }
            DataStoreEngine::Mysql => {
                let pool = get_mysql_pool(ds.as_ref()).await;
                pool.delete_bounds(VddKey::BoundModelAndSubscribe(model_id, subscriber.clone()))
                    .await
                    .unwrap();
            }
        };
    };
    log_db::save(
        &mut batch,
        &mut index_changes,
        &alog!(
            VddLogPart::LpAction(subscriber.clone(), "取消订阅数据更新".into()),
            VddLogPart::LpModel(model_id),
        ),
    );
    let ares = ROCKSDB.write(batch).map_err(|e| e.to_string().into());
    if ares.is_ok() {
        cache::apply_index_change(&index_changes)?;
    }
    ares
}

pub async fn sc_load(model_id: u64, subscriber: &UserId) -> Result<RecordSubscriber, Astr> {
    run_by_store!(impl_rdb::sc_load, impl_sqlx::sc_load, model_id, subscriber)
}

pub async fn sc_load_in_model(model_id: u64) -> Vec<Arc<RecordSubscriber>> {
    run_by_store!(
        impl_rdb::sc_load_in_model,
        impl_sqlx::sc_load_in_model,
        model_id
    )
}

/// 获取一批模型的订阅客户端个数
pub async fn sc_count_in_models(model_ids: &[u64]) -> Vec<usize> {
    run_by_store!(
        impl_rdb::count_subscription,
        impl_sqlx::sc_counts,
        model_ids
    )
}

pub fn send_change_event(
    batch: &mut WriteBatchWithTransaction<true>,
    index_changes: &mut Vec<IndexChange>,
    model_id: u64,
    record_id: &RecordId,
    action: u8,
) {
    let event = RecordChangeEvent {
        id: ID_FACTORY.lock().unwrap().lazy_generate(),
        ts: Local::now().naive_local(),
        action,
        rid: record_id.clone(),
    };
    let vk = VddKey::EntityRecordChangeEvent(event.id);
    batch.put_cf(
        vk.db_name().unwrap(),
        vk.to_string(),
        serde_json::to_vec(&event).unwrap(),
    );
    index_changes.push(IndexChange::new(
        VddKey::IndexModel2Rce(model_id),
        AC_ADD,
        event.id,
    ));
}

pub fn select_events(subscribe: &RecordSubscriber) -> Vec<Arc<RecordChangeEvent>> {
    let vk = VddKey::IndexModel2Rce(subscribe.model_id);
    let idx = cache::get_index(&vk.to_string());
    let mut r = Vec::<Arc<RecordChangeEvent>>::new();
    idx.iter().for_each(|id| {
        let vk = VddKey::EntityRecordChangeEvent(*id);
        let get_r = ROCKSDB.get_cf(vk.db_name().unwrap(), vk.to_string());
        if let Ok(Some(vu8)) = get_r {
            let event = serde_json::from_slice::<RecordChangeEvent>(&vu8).unwrap();
            if event.ts.ge(&subscribe.cts) {
                r.push(Arc::new(event));
            }
        }
    });
    r
}

/// 当一笔记录被恢复时，将删除操作事件从库中删除
pub fn on_record_restore(
    batch: &mut WriteBatchWithTransaction<true>,
    index_changes: &mut Vec<IndexChange>,
    model_id: u64,
    record_id: &RecordId,
) {
    let vk = VddKey::IndexModel2Rce(model_id);
    let index = cache::get_index(&vk.to_string());
    let mut last_event_id = 0u64;
    index.iter().for_each(|id| {
        let vk = VddKey::EntityRecordChangeEvent(*id);
        let get_r = ROCKSDB.get_cf(vk.db_name().unwrap(), vk.to_string());
        if let Ok(Some(vu8)) = get_r {
            let event = serde_json::from_slice::<RecordChangeEvent>(&vu8).unwrap();
            if event.action == AC_DELETE && &event.rid == record_id {
                last_event_id = event.id;
            }
        }
    });
    if last_event_id > 0 {
        index_changes.push(IndexChange {
            key: vk,
            a: AC_DELETE,
            data: last_event_id,
        });
        let vk2 = VddKey::EntityRecordChangeEvent(last_event_id);
        batch.delete_cf(vk2.db_name().unwrap(), vk2.to_string());
    }
}

pub fn overview_subscription(subscribe: &RecordSubscriber) -> Result<(usize, usize, usize), Astr> {
    let mut count_a = 0usize;
    let mut count_e = 0usize;
    let mut count_d = 0usize;
    select_events(subscribe).iter().for_each(|event| {
        if event.action == AC_ADD {
            count_a += 1;
        } else if event.action == AC_EDIT {
            count_e += 1;
        } else if event.action == AC_DELETE {
            count_d += 1;
        }
    });
    Ok((count_a, count_e, count_d))
}

pub async fn reset_subscription(model_id: u64, subscriber: &UserId) -> Ares {
    let mut r_sub = sc_load(model_id, subscriber).await?;
    r_sub.reset();
    sc_save(&r_sub).await
}

mod impl_rdb {
    use crate::{count_in_key_head, keys::VddKey, load_by_key_head, ROCKSDB};
    use rust_rocksdb::WriteBatchWithTransaction;
    use serde_json::json;
    use std::sync::Arc;
    use visit_dd_core::{record::RecordSubscriber, Astr, UserId, EMPTY_UID};

    pub(super) fn sc_save(batch: &mut WriteBatchWithTransaction<true>, sc: &RecordSubscriber) {
        let vk = VddKey::BoundModelAndSubscribe(sc.model_id, sc.subscriber.clone());
        batch.put_cf(vk.db_name().unwrap(), vk.to_string(), json!(sc).to_string());
    }

    pub(super) fn sc_load(model_id: u64, subscriber: &UserId) -> Result<RecordSubscriber, Astr> {
        let vk = VddKey::BoundModelAndSubscribe(model_id, subscriber.clone());
        let get_res = ROCKSDB.get_cf(vk.db_name().unwrap(), vk.to_string());
        if let Ok(Some(vu8)) = get_res {
            let obj = serde_json::from_slice::<RecordSubscriber>(&vu8).unwrap();
            Ok(obj)
        } else {
            Err(get_res.unwrap_err().to_string().into())
        }
    }

    pub(super) fn sc_load_in_model(model_id: u64) -> Vec<Arc<RecordSubscriber>> {
        let kh = VddKey::BoundModelAndSubscribe(model_id, EMPTY_UID);
        load_by_key_head(&kh)
            .iter()
            .map(|x| {
                let r_sub = serde_json::from_slice::<RecordSubscriber>(x).unwrap();
                Arc::new(r_sub)
            })
            .collect::<Vec<Arc<RecordSubscriber>>>()
    }

    /// 计算一批模型的订阅数
    pub(super) fn count_subscription(model_ids: &[u64]) -> Vec<usize> {
        let mut res = Vec::<usize>::with_capacity(model_ids.len());
        for model_id in model_ids {
            let vk = VddKey::BoundModelAndSubscribe(*model_id, EMPTY_UID);
            let count = count_in_key_head(&vk, |_| true);
            res.push(count);
        }
        res
    }
}

mod impl_sqlx {
    use std::sync::Arc;

    use visit_dd_core::{record::RecordSubscriber, Ares, Astr, UserId, EMPTY_UID};

    use crate::{keys::VddKey, sqlx::SqlxDefineTrait};

    pub(super) async fn sc_save(pool: &impl SqlxDefineTrait, sc: &RecordSubscriber) -> Ares {
        pool.save_bound(
            VddKey::BoundModelAndSubscribe(sc.model_id, sc.subscriber.clone()),
            &serde_json::to_string(sc).unwrap(),
        )
        .await
    }

    pub(super) async fn sc_load(
        pool: &impl SqlxDefineTrait,
        model_id: u64,
        subscriber: &UserId,
    ) -> Result<RecordSubscriber, Astr> {
        let bounds = pool
            .select_bounds(VddKey::BoundModelAndSubscribe(model_id, subscriber.clone()))
            .await;
        let mut r: Option<RecordSubscriber> = None;
        for x in bounds.iter() {
            let sc = serde_json::from_str::<RecordSubscriber>(x).unwrap();
            if &sc.subscriber == subscriber {
                r = Some(sc);
                break;
            }
        }
        if r.is_some() {
            Ok(r.unwrap())
        } else {
            Err("没有找到此用户对此模型的订阅".into())
        }
    }

    pub(super) async fn sc_load_in_model(
        pool: &impl SqlxDefineTrait,
        model_id: u64,
    ) -> Vec<Arc<RecordSubscriber>> {
        let bounds = pool
            .select_bounds(VddKey::BoundModelAndSubscribe(model_id, EMPTY_UID))
            .await;
        bounds
            .iter()
            .map(|x| {
                let sc = serde_json::from_str::<RecordSubscriber>(x).unwrap();
                Arc::new(sc)
            })
            .collect::<Vec<Arc<RecordSubscriber>>>()
    }

    /// 获取一批模型的订阅计数
    pub(super) async fn sc_counts(pool: &impl SqlxDefineTrait, model_ids: &[u64]) -> Vec<usize> {
        let bts = model_ids
            .iter()
            .map(|x| VddKey::BoundModelAndSubscribe(*x, EMPTY_UID))
            .collect::<Vec<VddKey>>();
        let count_map = pool.count_bounds_grouped(&bts).await;
        model_ids
            .iter()
            .map(|x| match count_map.get(x) {
                Some(count) => *count,
                None => 0,
            })
            .collect::<Vec<usize>>()
    }
}
