#[cfg(feature = "postgres")]
use crate::cache::get_pg_pool;
use crate::cache::{get_mysql_pool, IndexChange};
use crate::keys::VddKey;
use crate::sqlx::SqlxDefineTrait;
use crate::value_db::VddValueDdTrait;
use crate::{
    cache, load_by_key_head_with_key, log_db, model_db, record_rule_db, run_by_store, value_db,
    CF_UNIQUE, ROCKSDB,
};
use crate::{count_in_key_head, delete_by_key_head_in_trash};
use rust_rocksdb::WriteBatchWithTransaction;
use serde_json::json;
use std::collections::HashMap;
use std::sync::Arc;
use visit_dd_core::field::VddFieldType::FtComputed;
use visit_dd_core::field::{ComputeAction, VddField, VddFieldRef, VddFieldType};
use visit_dd_core::model::DataStoreEngine;
use visit_dd_core::proj::{VddLogPart, APP_CONFIG};
use visit_dd_core::value::VddValue;
use visit_dd_core::{alog, extract_u64_from_key, i18n, Ares, Astr, FieldId, UserId, EMPTY_RID};

pub trait FieldDbTrait {
    /// 校验计算值的命令合规性
    fn check_501(&self, brothers: &[VddFieldRef])
        -> impl std::future::Future<Output = Ares> + Send;
    /// 获取当前列值个数
    fn count_values(&self) -> usize;
}

/// 判断一些字段是否同一类型，如果是，返回类型，否则返回None
pub(crate) fn is_field_same_type(fields: &[VddFieldRef]) -> Option<Arc<VddFieldType>> {
    let mut ft = 0u16;
    let mut idx = 0;
    for field in fields {
        if idx == 0 {
            ft = Into::<u16>::into(field.field_type.as_ref());
        } else {
            if Into::<u16>::into(field.field_type.as_ref()) != ft {
                return None;
            }
        }
        idx += 1;
    }
    Some(fields.first().unwrap().field_type.clone())
}

impl FieldDbTrait for VddField {
    async fn check_501(&self, brothers: &[VddFieldRef]) -> Ares {
        if let FtComputed(action) = self.field_type.as_ref() {
            if self.multi {
                return Err(i18n!(1108));
            }
            match action.as_ref() {
                ComputeAction::CaConsumeCountByModel(consume_m_id) => {
                    // 计算参数的模型字段主键是不是真的被另外的字段引用了
                    let consumer_schema = cache::get_schema(*consume_m_id).await?;
                    // 获取该字段的消费者字段
                    let consume_field =
                        consumer_schema.get_field_by_provider(self.model_id, Some(self.id.clone()));
                    if consume_field.is_none() {
                        return Err(i18n!(1110));
                    } else {
                        let consume_field = consume_field.unwrap();
                        match consume_field.field_type.as_ref() {
                            VddFieldType::FtModel {
                                provider,
                                display_field,
                            } => {
                                // 消费者字段中的模型id为提供者模型id
                                // 消费者字段中的字段id为模型中的某个字段id
                                if *provider != self.model_id
                                    || display_field != &self.id
                                        && brothers
                                            .iter()
                                            .find(|x| &x.id == display_field)
                                            .is_none()
                                {
                                    return Err(i18n!(1110));
                                }
                            }
                            _ => {
                                return Err(i18n!(1110));
                            }
                        }
                    }
                }
                ComputeAction::CaValueMaxInRecord(f_ids)
                | ComputeAction::CaValueMinInRecord(f_ids) => {
                    if f_ids.len() < 2 {
                        return Err(i18n!(1112));
                    }
                    let fields = f_select_by_ids(f_ids).await;
                    let field_type = is_field_same_type(&fields);
                    if field_type.is_none() {
                        return Err(i18n!(1113));
                    }
                }
                ComputeAction::CaValueSumInRecord(f_ids)
                | ComputeAction::CaValueAvgInRecord(f_ids) => {
                    if f_ids.len() < 2 {
                        return Err(i18n!(1112));
                    }
                    let fields = f_select_by_ids(f_ids).await;
                    let field_type = is_field_same_type(&fields);
                    if field_type.is_none() {
                        return Err(i18n!(1113));
                    }
                    // 验证参与的字段可以参加运算
                    let field_type = field_type.unwrap();
                    match field_type.as_ref() {
                        VddFieldType::FtInt { .. } | VddFieldType::FtFloat { .. } => {}
                        _ => {
                            return Err("暂时只支持数字类型的运算".into());
                        }
                    }
                }
                _ => {}
            }
        }
        Ok(())
    }

    fn count_values(&self) -> usize {
        let key_head = VddKey::BoundFieldValue(self.id.clone(), EMPTY_RID);
        count_in_key_head(&key_head, |_| true)
    }
}

pub async fn f_load(field_id: &FieldId) -> VddFieldRef {
    run_by_store!(impl_rdb::f_load, impl_sqlx::f_load, field_id)
}

pub async fn f_save(field: &VddField, operator: &UserId, is_update: bool) -> Ares {
    let mut batch = WriteBatchWithTransaction::<true>::default();
    let mut index_changes = Vec::<IndexChange>::new();
    if APP_CONFIG.meta_store.is_none() {
        impl_rdb::f_save(&mut batch, &mut index_changes, field)
    } else {
        let ds = APP_CONFIG.meta_store.clone().unwrap();
        match ds.store_engine {
            #[cfg(feature = "postgres")]
            DataStoreEngine::Postgres => {
                impl_sqlx::f_save(&get_pg_pool(ds.as_ref()).await, field, is_update).await?
            }
            DataStoreEngine::Mysql => {
                impl_sqlx::f_save(&get_mysql_pool(ds.as_ref()).await, field, is_update).await?
            }
        };
    }
    cache::update_label_cache(&mut batch, field.id.0, &field.label);
    log_db::save(
        &mut batch,
        &mut index_changes,
        &alog!(
            VddLogPart::LpAction(operator.clone(), "保存字段配置".into()),
            VddLogPart::LpModel(field.model_id),
            VddLogPart::LpField(field.id.clone()),
        ),
    );
    let ares = ROCKSDB.write(batch).map_err(|e| e.to_string().into());
    if ares.is_ok() {
        cache::invalidate_schema(field.model_id).await;
        cache::apply_index_change(&index_changes)?;
    }
    ares
}

pub async fn f_delete(field: &VddField, operator: &UserId) -> Ares {
    let mut batch = WriteBatchWithTransaction::<true>::default();
    let mut index_changes = Vec::<IndexChange>::new();
    if APP_CONFIG.meta_store.is_none() {
        impl_rdb::f_delete(
            &mut batch,
            &mut index_changes,
            field.model_id,
            field.id.clone(),
        )
    } else {
        let ds = APP_CONFIG.meta_store.clone().unwrap();
        match ds.store_engine {
            #[cfg(feature = "postgres")]
            DataStoreEngine::Postgres => {
                get_pg_pool(ds.as_ref())
                    .await
                    .f_delete(field.model_id, field.id.clone())
                    .await?
            }
            DataStoreEngine::Mysql => {
                get_mysql_pool(ds.as_ref())
                    .await
                    .f_delete(field.model_id, field.id.clone())
                    .await?
            }
        };
    };
    // 字段值
    value_db::fv_delete_all(&mut batch, &field.id);
    // 涉及到这个字段的值规则
    record_rule_db::on_delete_field(&mut batch, field.model_id, &field.id);
    // 日志
    log_db::save(
        &mut batch,
        &mut index_changes,
        &alog!(
            VddLogPart::LpAction(operator.clone(), "删除字段配置".into()),
            VddLogPart::LpModel(field.model_id),
            VddLogPart::LpField(field.id.clone()),
        ),
    );
    let ares = ROCKSDB.write(batch).map_err(|e| e.to_string().into());
    if ares.is_ok() {
        // cache::on_fv_cleared(&field.id);
        cache::invalidate_schema(field.model_id).await;
        cache::apply_index_change(&index_changes)?;
    }
    ares
}

pub async fn f_check_with_brothers(field: &VddField, other_fields: &[VddFieldRef]) -> Ares {
    run_by_store!(
        impl_rdb::f_check_with_brothers,
        impl_sqlx::f_check_with_brothers,
        field,
        other_fields
    )
}

pub async fn f_save_some(fields: &[VddFieldRef], operator: &UserId) -> Ares {
    let mut batch = WriteBatchWithTransaction::<true>::default();
    let mut index_changes = Vec::<IndexChange>::new();
    if APP_CONFIG.meta_store.is_none() {
        impl_rdb::f_save_some(&mut batch, &mut index_changes, fields)
    } else {
        let ds = APP_CONFIG.meta_store.clone().unwrap();
        match ds.store_engine {
            #[cfg(feature = "postgres")]
            DataStoreEngine::Postgres => {
                impl_sqlx::f_save_some(&get_pg_pool(ds.as_ref()).await, fields).await?
            }
            DataStoreEngine::Mysql => {
                impl_sqlx::f_save_some(&get_mysql_pool(ds.as_ref()).await, fields).await?
            }
        };
    };
    for x in fields.iter() {
        cache::update_label_cache(&mut batch, x.id.0, &x.label);
        log_db::save(
            &mut batch,
            &mut index_changes,
            &alog!(
                VddLogPart::LpAction(operator.clone(), "导入字段".into()),
                VddLogPart::LpModel(x.model_id),
                VddLogPart::LpField(x.id.clone()),
            ),
        );
    }
    let ares = ROCKSDB.write(batch).map_err(|e| e.to_string().into());
    if ares.is_ok() {
        cache::invalidate_schema(fields[0].model_id).await;
        cache::apply_index_change(&index_changes)?
    }
    ares
}

pub async fn f_select_tagged_in_model(model_id: u64) -> HashMap<Astr, VddFieldRef> {
    let tag_map = model_db::m_select_tags_to_map(true, true, Some(model_id)).await;
    run_by_store!(
        impl_rdb::f_select_tagged_in_model,
        impl_sqlx::f_select_tagged_in_model,
        model_id,
        tag_map
    )
}

pub async fn f_clear_in_model(model_id: u64, operator: &UserId) -> Ares {
    let mut batch = WriteBatchWithTransaction::<true>::default();
    let mut index_changes = Vec::<IndexChange>::new();
    if APP_CONFIG.meta_store.is_none() {
        let field_ids = impl_rdb::f_select_field_ids(model_id);
        for field_id in field_ids {
            impl_rdb::f_delete(&mut batch, &mut index_changes, model_id, field_id);
        }
    } else {
        let ds = APP_CONFIG.meta_store.clone().unwrap();
        match ds.store_engine {
            #[cfg(feature = "postgres")]
            DataStoreEngine::Postgres => {
                impl_sqlx::f_clear_in_model(&get_pg_pool(ds.as_ref()).await, model_id).await?
            }
            DataStoreEngine::Mysql => {
                impl_sqlx::f_clear_in_model(&get_mysql_pool(ds.as_ref()).await, model_id).await?
            }
        };
    }
    log_db::save(
        &mut batch,
        &mut index_changes,
        &alog!(
            VddLogPart::LpAction(operator.clone(), "清空字段".into()),
            VddLogPart::LpModel(model_id),
        ),
    );
    let ares = ROCKSDB.write(batch).map_err(|e| e.to_string().into());
    if ares.is_ok() {
        cache::invalidate_schema(model_id).await;
        cache::apply_index_change(&index_changes)?
    }
    ares
}

pub async fn f_select_field_ids(model_id: u64) -> Vec<FieldId> {
    run_by_store!(
        impl_rdb::f_select_field_ids,
        impl_sqlx::f_select_field_ids,
        model_id
    )
}

pub async fn f_select_by_model(model_id: u64) -> Vec<VddFieldRef> {
    run_by_store!(
        impl_rdb::f_select_by_model,
        impl_sqlx::f_select_by_model,
        model_id
    )
}

pub async fn f_select_by_ids(field_ids: &[FieldId]) -> Vec<VddFieldRef> {
    run_by_store!(
        impl_rdb::f_select_by_ids,
        impl_sqlx::f_select_by_ids,
        field_ids
    )
}

pub async fn f_drop_from_trash(fid: FieldId) -> Ares {
    match APP_CONFIG.meta_store.as_ref() {
        None => impl_rdb::drop(fid),
        Some(ds) => {
            match ds.store_engine {
                #[cfg(feature = "postgres")]
                DataStoreEngine::Postgres => {
                    impl_sqlx::f_drop_from_trash(&get_pg_pool(ds.as_ref()).await, &fid).await?
                }
                DataStoreEngine::Mysql => {
                    impl_sqlx::f_drop_from_trash(&get_mysql_pool(ds.as_ref()).await, &fid).await?
                }
            }
            let mut batch = WriteBatchWithTransaction::<true>::default();
            delete_by_key_head_in_trash(
                &mut batch,
                &VddKey::BoundFieldValue(fid.clone(), EMPTY_RID),
            );
            ROCKSDB.write(batch).map_err(|e| e.to_string().into())
        }
    }
}

/// 当字段被修改为唯一之前，检验已经存在的字段值，看其是否符合唯一性规约，同时把唯一索引维护好
pub fn fit_unique_on_exist_records(
    batch: &mut WriteBatchWithTransaction<true>,
    field: &VddField,
) -> Result<(), Astr> {
    let key_head = VddKey::BoundFieldValue(field.id.clone(), EMPTY_RID);
    let mut err_msg = String::new();
    load_by_key_head_with_key(&key_head).iter().for_each(|x| {
        let v = serde_json::from_slice::<VddValue>(&x.1).unwrap();
        let uk = v.get_unique_key(&field.id);
        let cf_u = ROCKSDB.cf_handle(CF_UNIQUE).unwrap();
        if ROCKSDB.get_cf(&cf_u, &uk).unwrap().is_some() {
            err_msg
                .push_str(format!("字段值 {} 不满足唯一性约束\n", v.get_simple_display()).as_ref());
        } else {
            let rec_id = extract_u64_from_key(&x.0);
            batch.put_cf(&cf_u, &uk, rec_id.to_le_bytes());
        }
    });
    if err_msg.is_empty() {
        Ok(())
    } else {
        Err(err_msg.into())
    }
}

/// 当字段被修改为字符串类新时，转换已经存在的字段值
pub fn fit_to_string_on_exist_records(batch: &mut WriteBatchWithTransaction<true>, fid: &FieldId) {
    let key_head = VddKey::BoundFieldValue(fid.clone(), EMPTY_RID);
    load_by_key_head_with_key(&key_head).iter().for_each(|x| {
        let v = String::from_utf8(x.1.to_vec()).unwrap();
        let v = serde_json::from_str::<VddValue>(&v).unwrap();
        let new_fv = VddValue::S101(v.get_simple_display().to_string());
        batch.put_cf(
            key_head.db_name().unwrap(),
            x.0.as_ref(),
            &json!(new_fv).to_string(),
        );
    });
}

/// 当字段被修改为多重值设置时，转换已经存在的字段值
pub fn fit_s_to_m(batch: &mut WriteBatchWithTransaction<true>, fid: &FieldId) {
    let key_head = VddKey::BoundFieldValue(fid.clone(), EMPTY_RID);
    load_by_key_head_with_key(&key_head).iter().for_each(|x| {
        let v = String::from_utf8(x.1.to_vec()).unwrap();
        let v = serde_json::from_str::<VddValue>(&v).unwrap();
        let new_fv = v.s_to_m().unwrap_or(VddValue::NULL);
        batch.put_cf(
            key_head.db_name().unwrap(),
            x.0.as_ref(),
            &json!(new_fv).to_string(),
        );
    });
}

/// 当字段被修改为单重值设置时，转换已经存在的字段值
pub fn fit_m_to_s(batch: &mut WriteBatchWithTransaction<true>, fid: &FieldId) {
    let key_head = VddKey::BoundFieldValue(fid.clone(), EMPTY_RID);
    load_by_key_head_with_key(&key_head).iter().for_each(|x| {
        let v = String::from_utf8(x.1.to_vec()).unwrap();
        let v = serde_json::from_str::<VddValue>(&v).unwrap();
        let new_fv = v.m_to_s().unwrap_or(VddValue::NULL);
        batch.put_cf(
            key_head.db_name().unwrap(),
            x.0.as_ref(),
            &json!(new_fv).to_string(),
        );
    });
}

pub mod impl_rdb {
    use crate::keys::VddKey;
    use crate::{
        cache, delete_by_key_head_in_trash, delete_by_value, move_to_trash, CF_META, CF_TRASH,
        ROCKSDB,
    };
    use rust_rocksdb::WriteBatchWithTransaction;
    use serde_json::json;
    use std::collections::HashMap;
    use std::sync::Arc;
    use visit_dd_core::field::{VddField, VddFieldRef, VddFieldType, C_FT_MODEL};
    use visit_dd_core::i18n_text::E_ROCKSDB;
    use visit_dd_core::{i18n, Ares, Astr, FieldId, AC_ADD, AC_DELETE, EMPTY_RID};

    pub(super) fn f_load(field_id: &FieldId) -> VddFieldRef {
        let k = VddKey::EntityField(field_id.clone());
        let v_vec = ROCKSDB
            .get_cf(k.db_name().unwrap(), k.to_string())
            .expect(i18n!(E_ROCKSDB).as_ref());
        let field =
            Arc::new(serde_json::from_slice::<VddField>(v_vec.unwrap().as_slice()).unwrap());
        field
    }

    pub(crate) fn f_select_by_model(model_id: u64) -> Vec<VddFieldRef> {
        let mut fields = Vec::<VddFieldRef>::new();
        let f_index = cache::get_index(&VddKey::IndexModel2Field(model_id).to_string());
        let cf_meta = ROCKSDB.cf_handle(CF_META).unwrap();
        let keys = f_index
            .iter()
            .map(|f| (cf_meta, VddKey::EntityField(FieldId(*f)).to_string()));
        ROCKSDB.multi_get_cf(keys).iter().for_each(|item| {
            if let Ok(Some(vu8)) = item {
                // let vu8 = item.clone().unwrap().unwrap();
                let field = serde_json::from_slice::<VddField>(vu8).unwrap();
                fields.push(field.into());
            }
        });
        // for x in cache::get_index(&index_key).iter() {
        //     let field = f_load(&FieldId(*x));
        //     fields.push(field);
        // }
        fields.sort_by(|a, b| a.display_order.cmp(&b.display_order));
        fields
    }

    pub(crate) fn f_select_by_ids(field_ids: &[FieldId]) -> Vec<VddFieldRef> {
        let mut fields = Vec::<VddFieldRef>::new();
        for fid in field_ids.iter() {
            let field = f_load(fid);
            fields.push(field);
        }
        fields.sort_by(|a, b| a.display_order.cmp(&b.display_order));
        fields
    }

    pub(crate) fn f_select_field_ids(model_id: u64) -> Vec<FieldId> {
        cache::get_index(&VddKey::IndexModel2Field(model_id).to_string())
            .iter()
            .map(|x| FieldId(*x))
            .collect::<Vec<FieldId>>()
    }

    pub(crate) fn f_save(
        batch: &mut WriteBatchWithTransaction<true>,
        index_changes: &mut Vec<cache::IndexChange>,
        field: &VddField,
    ) {
        let k = VddKey::EntityField(field.id.clone());
        batch.put_cf(
            k.db_name().unwrap(),
            k.to_string(),
            json!(field).to_string(),
        );
        index_changes.push(cache::IndexChange::new(
            VddKey::IndexModel2Field(field.model_id),
            AC_ADD,
            field.id.0,
        ));
        cache::update_label_cache(batch, field.id.0, &field.label)
    }

    pub(super) fn f_delete(
        batch: &mut WriteBatchWithTransaction<true>,
        index_changes: &mut Vec<cache::IndexChange>,
        model_id: u64,
        field_id: FieldId,
    ) {
        // 字段标记
        delete_by_value(
            batch,
            &VddKey::UniqueFieldTag("".into(), "".into()),
            &field_id.0.to_le_bytes(),
        );
        // 索引
        index_changes.push(cache::IndexChange::new(
            VddKey::IndexModel2Field(model_id),
            AC_DELETE,
            field_id.0,
        ));
        // 实体
        move_to_trash(batch, &VddKey::EntityField(field_id.clone()), None);
    }

    pub(super) fn f_check_with_brothers(
        field: &VddField,
        _other_fields: &[VddFieldRef],
    ) -> Result<(), Astr> {
        let mut err_msg = String::new();
        match field.field_type.as_ref() {
            VddFieldType::FtBool | VddFieldType::FtFile(..) => {
                if field.multi {
                    return Err(i18n!(1115));
                }
            }
            VddFieldType::FtModel {
                provider: lm_id,
                display_field: lf_id,
            } => {
                if field.multi {
                    return Err(i18n!(1117));
                }
                //link型字段的检查
                if *lm_id < 1 || lf_id.0 < 1 {
                    err_msg.push_str(format!("{}\n", i18n!(1118)).as_ref());
                }
                let link_field = f_load(&lf_id);
                if Into::<u16>::into(link_field.field_type.as_ref()) == C_FT_MODEL {
                    err_msg.push_str(format!("{}\n", i18n!(1119)).as_ref());
                }
            }
            _ => {}
        }
        if err_msg.is_empty() {
            Ok(())
        } else {
            Err(Arc::from(err_msg))
        }
    }

    pub(super) fn f_save_some(
        batch: &mut WriteBatchWithTransaction<true>,
        index_changes: &mut Vec<cache::IndexChange>,
        fields: &[VddFieldRef],
    ) {
        fields.iter().for_each(|x| {
            let cf_meta = ROCKSDB.cf_handle(CF_META).unwrap();
            batch.put_cf(
                cf_meta,
                VddKey::EntityField(x.id.clone()).to_string(),
                json!(&x).to_string(),
            );
            index_changes.push(cache::IndexChange::new(
                VddKey::IndexModel2Field(x.model_id),
                AC_ADD,
                x.id.0,
            ));
        });
    }

    pub(super) fn f_select_tagged_in_model(
        model_id: u64,
        tag_map: HashMap<u64, Astr>,
    ) -> HashMap<Astr, VddFieldRef> {
        let fields = f_select_by_model(model_id);
        let mut field_map = HashMap::<Astr, VddFieldRef>::new();
        for field in fields {
            let field_code = tag_map.get(&field.id.0);
            match field_code {
                Some(field_code) => {
                    field_map.insert(field_code.clone(), field.to_owned());
                }
                None => {
                    tracing::warn!("{}", i18n!(1121, &field.id.0.to_string()).as_ref());
                }
            }
        }
        field_map
    }

    /// 从回收站中删除字段，连同回收站里的字段值一起删除
    pub(super) fn drop(fid: FieldId) -> Ares {
        let mut batch = WriteBatchWithTransaction::<true>::default();
        let cf_trash = ROCKSDB.cf_handle(CF_TRASH).unwrap();
        let key = VddKey::EntityField(fid.clone()).to_string();
        batch.delete_cf(&cf_trash, &key);
        let key_head = VddKey::BoundFieldValue(fid, EMPTY_RID);
        delete_by_key_head_in_trash(&mut batch, &key_head);
        ROCKSDB.write(batch).map_err(|e| e.to_string().into())
    }
}

mod impl_sqlx {
    use crate::keys::VddKey;
    use crate::sqlx::SqlxDefineTrait;
    use std::collections::HashMap;
    use std::sync::Arc;
    use visit_dd_core::field::{VddField, VddFieldRef, VddFieldType, C_FT_MODEL};
    use visit_dd_core::{i18n, Ares, Astr, FieldId};

    pub(super) async fn f_load(pool: &impl SqlxDefineTrait, field_id: &FieldId) -> VddFieldRef {
        let sql_r = pool.load_entity(field_id.0).await.unwrap();
        let r = serde_json::from_str::<VddField>(&sql_r).unwrap();
        Arc::new(r)
    }

    pub(super) async fn f_select_by_model(
        pool: &impl SqlxDefineTrait,
        model_id: u64,
    ) -> Vec<VddFieldRef> {
        let mut r = pool
            .load_entities_form_index_query(VddKey::IndexModel2Field(model_id), false)
            .await
            .unwrap()
            .iter()
            .map(|x| {
                let f = serde_json::from_str::<VddField>(x).unwrap();
                Arc::new(f)
            })
            .collect::<Vec<VddFieldRef>>();
        r.sort_by(|a, b| a.display_order.cmp(&b.display_order));
        r
    }

    pub(super) async fn f_select_field_ids(
        pool: &impl SqlxDefineTrait,
        model_id: u64,
    ) -> Vec<FieldId> {
        pool.read_index(VddKey::IndexModel2Field(model_id))
            .await
            .iter()
            .map(|x| FieldId(*x as u64))
            .collect::<Vec<FieldId>>()
    }

    pub(super) async fn f_select_by_ids(
        pool: &impl SqlxDefineTrait,
        field_ids: &[FieldId],
    ) -> Vec<VddFieldRef> {
        let mut r = Vec::<VddFieldRef>::new();
        for field_id in field_ids.iter() {
            let f = f_load(pool, field_id).await;
            r.push(f);
        }
        r
    }

    pub(super) async fn f_save(
        pool: &impl SqlxDefineTrait,
        field: &VddField,
        is_update: bool,
    ) -> Ares {
        if is_update {
            pool.update_entity(field.id.0, &serde_json::to_string(&field).unwrap())
                .await
        } else {
            pool.f_save_new(field).await
        }
    }

    pub(super) async fn f_check_with_brothers(
        pool: &impl SqlxDefineTrait,
        field: &VddField,
        _other_fields: &[VddFieldRef],
    ) -> Ares {
        let mut err_msg = String::new();
        match field.field_type.as_ref() {
            VddFieldType::FtBool => {
                if field.multi {
                    return Err(i18n!(1115));
                }
            }
            VddFieldType::FtFile(..) => {
                if field.multi {
                    return Err(i18n!(1116));
                }
            }
            VddFieldType::FtModel {
                provider: lm_id,
                display_field: lf_id,
            } => {
                if field.multi {
                    return Err(i18n!(1117));
                }
                //link型字段的检查
                if *lm_id < 1 || lf_id.0 < 1 {
                    err_msg.push_str(format!("{}\n", i18n!(1118)).as_ref());
                }
                let link_field = f_load(pool, lf_id).await;
                if Into::<u16>::into(link_field.field_type.as_ref()) == C_FT_MODEL {
                    err_msg.push_str(format!("{}\n", i18n!(1119)).as_ref());
                }
            }
            _ => {}
        }
        if err_msg.is_empty() {
            Ok(())
        } else {
            Err(Arc::from(err_msg))
        }
    }

    pub(super) async fn f_save_some(pool: &impl SqlxDefineTrait, fields: &[VddFieldRef]) -> Ares {
        for field in fields.iter() {
            f_save(pool, field, false).await?;
        }
        Ok(())
    }

    pub(super) async fn f_select_tagged_in_model(
        pool: &impl SqlxDefineTrait,
        model_id: u64,
        tag_map: HashMap<u64, Astr>,
    ) -> HashMap<Astr, VddFieldRef> {
        let fields = f_select_by_model(pool, model_id).await;
        let mut r = HashMap::<Astr, VddFieldRef>::new();
        tag_map.iter().for_each(|x| {
            fields.iter().find(|y| y.id.0 == *x.0).map(|y| {
                r.insert(x.1.clone(), y.clone());
            });
        });
        r
    }

    pub(super) async fn f_clear_in_model(pool: &impl SqlxDefineTrait, model_id: u64) -> Ares {
        pool.delete_entities_from_index_query(VddKey::IndexModel2Field(model_id))
            .await
    }

    pub(super) async fn f_drop_from_trash(pool: &impl SqlxDefineTrait, field_id: &FieldId) -> Ares {
        pool.do_delete_entity(field_id.0).await
    }
}
