--- START OF FILE baseApi.py ---

# Import standard library modules.
import functools
import json
import logging
import os
import re
import traceback
import urllib.parse
from inspect import isfunction

# Import third-party libraries.
import jsonschema
import prison  # A library for parsing RISON.
from flask import (
    abort,
    current_app,
    flash,
    jsonify,
    make_response,
    request,
    send_file,
)
from flask.globals import session
# Import compatibility utilities from Flask-AppBuilder.
from flask_appbuilder._compat import as_unicode
# Import action components from Flask-AppBuilder.
from flask_appbuilder.actions import ActionItem, action
# Import the base REST API class from Flask-AppBuilder.
from flask_appbuilder.api import ModelRestApi
# Import constants used for defining API response keys.
from flask_appbuilder.const import (
    API_ADD_COLUMNS_RES_KEY,
    API_ADD_COLUMNS_RIS_KEY,
    API_ADD_TITLE_RES_KEY,
    API_ADD_TITLE_RIS_KEY,
    API_DESCRIPTION_COLUMNS_RES_KEY,
    API_DESCRIPTION_COLUMNS_RIS_KEY,
    API_EDIT_COLUMNS_RES_KEY,
    API_EDIT_COLUMNS_RIS_KEY,
    API_EDIT_TITLE_RES_KEY,
    API_EDIT_TITLE_RIS_KEY,
    API_FILTERS_RES_KEY,
    API_FILTERS_RIS_KEY,
    API_LABEL_COLUMNS_RES_KEY,
    API_LABEL_COLUMNS_RIS_KEY,
    API_LIST_COLUMNS_RES_KEY,
    API_LIST_COLUMNS_RIS_KEY,
    API_LIST_TITLE_RES_KEY,
    API_LIST_TITLE_RIS_KEY,
    API_ORDER_COLUMN_RIS_KEY,
    API_ORDER_COLUMNS_RES_KEY,
    API_ORDER_COLUMNS_RIS_KEY,
    API_ORDER_DIRECTION_RIS_KEY,
    API_PAGE_INDEX_RIS_KEY,
    API_PAGE_SIZE_RIS_KEY,
    API_PERMISSIONS_RIS_KEY,
    API_SELECT_COLUMNS_RIS_KEY,
    API_SHOW_COLUMNS_RES_KEY,
    API_SHOW_COLUMNS_RIS_KEY,
    API_SHOW_TITLE_RES_KEY,
    API_SHOW_TITLE_RIS_KEY,
    API_URI_RIS_KEY,
)
# Import exception classes from Flask-AppBuilder.
from flask_appbuilder.exceptions import FABException, InvalidOrderByColumnFABException
# Import internationalization functions.
from flask_babel import gettext as __, lazy_gettext as _
# Import Marshmallow for object serialization/deserialization and validation.
from marshmallow import ValidationError, validate
# Import Marshmallow-SQLAlchemy fields for handling relationships.
from marshmallow_sqlalchemy.fields import Related, RelatedList
# Import SQLAlchemy components for database interaction.
from sqlalchemy import create_engine
from sqlalchemy.exc import IntegrityError
from sqlalchemy.orm.attributes import InstrumentedAttribute
from sqlalchemy.orm.properties import ColumnProperty
from sqlalchemy.orm.relationships import RelationshipProperty
# Import exceptions from Werkzeug.
from werkzeug.exceptions import BadRequest
# Import validators from WTForms.
from wtforms import validators

# Import application-specific components.
from myapp.app import app, db

# Get the application configuration.
conf = app.config

# Initialize a logger for this module.
log = logging.getLogger(__name__)
# Define custom API response keys.
API_COLUMNS_INFO_RIS_KEY = 'columns_info'
API_ADD_FIELDSETS_RIS_KEY = 'add_fieldsets'
API_EDIT_FIELDSETS_RIS_KEY = 'edit_fieldsets'
API_SHOW_FIELDSETS_RIS_KEY = 'show_fieldsets'
API_HELP_URL_RIS_KEY = 'help_url'
API_ACTION_RIS_KEY = 'action'
API_ROUTE_RIS_KEY = 'route_base'

API_USER_PERMISSIONS_RIS_KEY = 'user_permissions'
API_RELATED_RIS_KEY = 'related'
API_COLS_WIDTH_RIS_KEY = 'cols_width'
API_EXIST_ADD_ARGS_RIS_KEY = 'exist_add_args'
API_IMPORT_DATA_RIS_KEY = 'import_data'
API_DOWNLOAD_DATA_RIS_KEY = 'download_data'


# This function retrieves a formatted error message.
def get_error_msg():
    # If the configuration allows showing stack traces, format the exception.
    if current_app.config.get('FAB_API_SHOW_STACKTRACE'):
        return traceback.format_exc()
    # Otherwise, return a generic fatal error message.
    return 'Fatal error'


# A decorator to safely wrap API methods with a try-except block.
def safe(f):
    # This inner function will be the actual wrapper.
    def wraps(self, *args, **kwargs):
        try:
            # Attempt to execute the original function.
            return f(self, *args, **kwargs)
        # Handle client-side bad requests.
        except BadRequest as e:
            return self.response_error(400, message=str(e))
        # Handle all other server-side exceptions.
        except Exception as e:
            logging.exception(e)
            return self.response_error(500, message=get_error_msg())

    # Update the wrapper to have the same name and docstring as the original function.
    return functools.update_wrapper(wraps, f)


# A decorator factory for parsing RISON or JSON from the request's query string.
def rison(schema=None):
    """
    Use this decorator to parse URI *Rison* arguments to
    a python data structure, your method gets the data
    structure on kwargs['rison']. Response is HTTP 400
    if *Rison* is not correct::

        class ExampleApi(BaseApi):
            @expose('/risonjson')
            @rison()
            def rison_json(self, **kwargs):
                return self.response(200, result=kwargs['rison'])

    You can additionally pass a JSON schema to
    validate Rison arguments::

        schema = {'type': 'object', 'properties': {'arg1': {'type': 'integer'}}}


        class ExampleApi(BaseApi):
            @expose('/risonjson')
            @rison(schema)
            def rison_json(self, **kwargs):
                return self.response(200, result=kwargs['rison'])
    """

    # The actual decorator function.
    def _rison(f):
        # The wrapper that performs the parsing and validation.
        def wraps(self, *args, **kwargs):
            # Get the RISON-encoded value from the query arguments.
            value = request.args.get(API_URI_RIS_KEY, None)
            kwargs['rison'] = {}
            if value:
                try:
                    # Try to parse the value as RISON.
                    kwargs['rison'] = prison.loads(value)
                except prison.decoder.ParserException:
                    # If RISON parsing fails, try to parse it as JSON if allowed.
                    if current_app.config.get('FAB_API_ALLOW_JSON_QS', True):
                        # Rison failed try json encoded content
                        try:
                            kwargs['rison'] = json.loads(
                                urllib.parse.parse_qs(f'{API_URI_RIS_KEY}={value}').get(
                                    API_URI_RIS_KEY
                                )[0]
                            )
                        except Exception:
                            # If both fail, return a 400 error.
                            return self.response_error(
                                400, message='Not a valid rison/json argument'
                            )
                    else:
                        return self.response_error(400, message='Not a valid rison argument')
            # If a validation schema is provided, validate the parsed data.
            if schema:
                try:
                    jsonschema.validate(instance=kwargs['rison'], schema=schema)
                except jsonschema.ValidationError as e:
                    # Return a 400 error if validation fails.
                    return self.response_error(400, message=f'Not a valid rison schema {e}')
            # Call the original function with the parsed data in kwargs.
            return f(self, *args, **kwargs)

        # Update the wrapper's metadata.
        return functools.update_wrapper(wraps, f)

    return _rison


# A decorator to expose a class method as an API endpoint.
def expose(url='/', methods=('GET',)):
    """
    Use this decorator to expose API endpoints on your API classes.

    :param url:
        Relative URL for the endpoint
    :param methods:
        Allowed HTTP methods. By default only GET is allowed.
    """

    # The actual decorator.
    def wrap(f):
        # Attach the URL and methods to the function object.
        if not hasattr(f, '_urls'):
            f._urls = []
        f._urls.append((url, methods))
        return f

    return wrap


# 在响应体重添加字段和数据
# A decorator factory to map a function to a key for dynamically building API responses.
def merge_response_func(func, key):
    """
        Use this decorator to set a new merging
        response function to HTTP endpoints

        candidate function must have the following signature
        and be childs of BaseApi:
        ```            def merge_some_function(self, response, rison_args):
        ```

    :param func: Name of the merge function where the key is allowed
    :param key: The key name for rison selection
    :return: None
    """

    # The decorator itself.
    def wrap(f):
        # Create a mapping dictionary on the function if it doesn't exist.
        if not hasattr(f, '_response_key_func_mappings'):
            f._response_key_func_mappings = {}
        # Map the provided key to the function that should be called to generate its value.
        f._response_key_func_mappings[key] = func
        return f

    return wrap


# A helper function to create a standardized JSON response.
def json_response(message, status, result):
    return jsonify({'message': message, 'status': status, 'result': result})


# @pysnooper.snoop(depth=5)
# 暴露url+视图函数。视图函数会被覆盖，暴露url也会被覆盖
# This is a custom base class for REST APIs in the application, extending Flask-AppBuilder's ModelRestApi.
class MyappModelRestApi(ModelRestApi):
    # 定义主键列
    # A human-readable title for the model, used for generating UI titles.
    label_title = ''
    # The name of the primary key column for the model.
    primary_key = 'id'
    # Specifies the API type, which can influence behavior.
    api_type = 'json'
    # Allows users to log in through a browser session to access the API.
    allow_browser_login = True
    # A list of base filters to be applied to all queries for this API.
    base_filters = []
    # Default number of items per page for list endpoints.
    page_size = 100
    # Holds the original model object during an operation (e.g., before an update).
    src_item_object = None  # 原始model对象
    # Holds the JSON representation of the original model object.
    src_item_json = {}  # 原始model对象的json
    # A function to perform custom permission checks on an item before editing.
    check_edit_permission = None
    # The datamodel instance that interfaces with the database model.
    datamodel = None
    # A function to post-process the list of items before sending the response.
    post_list = None
    # A function to pre-process incoming JSON data before loading.
    pre_json_load = None
    # Extra fields to be added to the add/edit forms.
    edit_form_extra_fields = {}
    add_form_extra_fields = {}
    # Definitions for grouping fields into fieldsets in the UI.
    add_fieldsets = []
    edit_fieldsets = []
    show_fieldsets = []
    # Hooks to run custom logic before the add/update forms are rendered or an item is shown.
    pre_add_get = None
    pre_update_get = None
    # A URL for help documentation related to this API.
    help_url = None
    pre_show = None
    # Default filters to be applied if none are provided in the request.
    default_filter = {}
    # A dictionary of custom actions available for this API.
    actions = {}
    # A hook to run custom logic before listing items.
    pre_list = None
    # Default user permissions for an item.
    user_permissions = {'add': True, 'edit': True, 'delete': True, 'show': True}
    # Custom query filters for related fields in add/edit forms.
    add_form_query_rel_fields = {}
    edit_form_query_rel_fields = {}
    # A list of related views to be included in the API response.
    related_views = []
    # A function to add more information to the API's _info endpoint response.
    add_more_info = None
    # A list of columns whose values should be "remembered" by the client.
    remember_columns = []
    # View-specific overrides for column labels.
    spec_label_columns = {}
    # Base permissions required to access the API's methods.
    base_permissions = ['can_add', 'can_show', 'can_edit', 'can_list', 'can_delete']
    # A dictionary specifying the width of columns in the UI.
    cols_width = {}
    # Flags to enable/disable data import/export features.
    import_data = False
    download_data = False
    # A hook to run custom logic before processing an uploaded file.
    pre_upload = None
    # A function to define relationships between columns.
    set_columns_related = None

    # def pre_list(self,**kargs):
    #     return

    # @pysnooper.snoop()
    # Creates a response that sends a CSV file for download.
    def csv_response(self, file_path, file_name=None):
        # 下载csv
        # Create a Flask response object to send the file.
        response = make_response(send_file(file_path, as_attachment=True, conditional=True))
        # If no filename is provided, derive it from the file path.
        if not file_name:
            file_name = os.path.basename(file_path)
        # Ensure the filename has a .csv extension.
        if '.csv' not in file_name:
            file_name = file_name + '.csv'
        # Set the Content-Disposition header to trigger a download dialog in the browser.
        response.headers['Content-Disposition'] = f'attachment; filename={file_name}'.format(
            file_name=file_name
        )
        return response

    # 建构响应体
    @staticmethod
    # @pysnooper.snoop()
    def response(code, **kwargs):
        """
            Generic HTTP JSON response method

        :param code: HTTP code (int)
        :param kwargs: Data structure for response (dict)
        :return: HTTP Json response
        """
        # 添flash的信息
        # Retrieve any flashed messages from the session.
        flashes = session.get('_flashes', [])

        # flashes.append((category, message))
        # Clear the flashed messages from the session after retrieval.
        session['_flashes'] = []

        # Create a JSON response from the provided keyword arguments.
        _ret_json = jsonify(kwargs)
        # Create a full response object with the specified HTTP status code.
        resp = make_response(_ret_json, code)
        # Prepare flashed messages to be sent in a custom header.
        flash_json = []
        for f in flashes:
            flash_json.append([f[0], f[1]])
        # Add the flashed messages as a JSON string to the response headers.
        resp.headers['api_flashes'] = json.dumps(flash_json)
        # Set the Content-Type header to indicate a JSON response.
        resp.headers['Content-Type'] = 'application/json; charset=utf-8'
        return resp

    # This method initializes the titles for various API operations (list, add, edit, show).
    def _init_titles(self):
        """
        Init Titles if not defined
        """
        # Call the parent class's method first.
        super(ModelRestApi, self)._init_titles()
        # Get the model's class name.
        class_name = self.datamodel.model_name
        # If a custom label_title is provided, use it to generate titles.
        if self.label_title:
            self.list_title = '遍历 ' + self.label_title
            self.add_title = '添加 ' + self.label_title
            self.edit_title = '编辑 ' + self.label_title
            self.show_title = '查看 ' + self.label_title

        # If titles are not set, generate them automatically.
        if not self.list_title:
            self.list_title = 'List ' + self._prettify_name(class_name)
        if not self.add_title:
            self.add_title = 'Add ' + self._prettify_name(class_name)
        if not self.edit_title:
            self.edit_title = 'Edit ' + self._prettify_name(class_name)
        if not self.show_title:
            self.show_title = 'Show ' + self._prettify_name(class_name)
        # Set the main title to the list title by default.
        self.title = self.list_title

    # @pysnooper.snoop()
    # This method initializes various properties of the API view.
    def _init_properties(self):
        """
        Init Properties
        """
        # Call the parent class's method.
        super()._init_properties()
        # 初始化action自耦段
        # Discover and initialize custom actions defined as methods.
        self.actions = {}
        for attr_name in dir(self):
            func = getattr(self, attr_name)
            if hasattr(func, '_action'):
                action = ActionItem(*func._action, func=func)
                self.actions[action.name] = action

        # 初始化label字段
        # 全局的label
        # Load global label definitions from the model class.
        if hasattr(self.datamodel.obj, 'label_columns') and self.datamodel.obj.label_columns:
            for col in self.datamodel.obj.label_columns:
                self.label_columns[col] = self.datamodel.obj.label_columns[col]

        # 本view特定的label
        # Load view-specific label definitions.
        for col in self.spec_label_columns:
            self.label_columns[col] = self.spec_label_columns[col]

        # Get the primary key name from the datamodel.
        self.primary_key = self.datamodel.get_pk_name()

        # Initialize column width settings.
        self._init_cols_width()

        # 帮助地址
        # Set the help URL based on the model's table name from the configuration.
        self.help_url = (
            conf.get('HELP_URL', {}).get(self.datamodel.obj.__tablename__, '')
            if self.datamodel
            else ''
        )

    # This method initializes the Marshmallow schemas for serialization and deserialization.
    def _init_model_schemas(self):
        # Create Marshmalow schemas if one is not specified

        # for column_name in self.edit_columns:
        #     if column_name not in self.add_columns:
        #         self.add_columns.append(column_name)

        # Create a schema for the list view if not provided.
        if self.list_model_schema is None:
            self.list_model_schema = self.model2schemaconverter.convert(self.list_columns)
        # Create a schema for the add view if not provided.
        if self.add_model_schema is None:
            self.add_model_schema = self.model2schemaconverter.convert(
                self.add_columns, nested=False, enum_dump_by_name=True
            )
        # Create a schema for the edit view if not provided, combining columns from all views.
        if self.edit_model_schema is None:
            self.edit_model_schema = self.model2schemaconverter.convert(
                list(
                    set(
                        self.edit_columns
                        + self.show_columns
                        + self.list_columns
                        + self.search_columns
                    )
                ),
                nested=False,
                enum_dump_by_name=True,
            )
        # Create a schema for the show view if not provided.
        if self.show_model_schema is None:
            self.show_model_schema = self.model2schemaconverter.convert(self.show_columns)

    # @pysnooper.snoop(watch_explode=('value','column_type'))
    # This method automatically determines appropriate UI widths for columns based on their data type.
    def _init_cols_width(self):
        # return
        # Get all columns from the model's table definition.
        columns = self.datamodel.obj.__table__._columns
        for column in columns:
            # Only set width for list columns that don't already have a width defined.
            if (
                column.name not in self.cols_width and column.name in self.list_columns
            ):  # 只需要配置没有配置过的list_columns
                column_type = column.type
                # Set width based on the column's SQLAlchemy type.
                if column_type.__class__.__name__ in [
                    'Integer',
                    'Float',
                    'Numeric',
                    'Integer',
                    'Date',
                    'Enum',
                ]:
                    self.cols_width[column.name] = {'type': 'ellip2', 'width': 100}
                elif column_type.__class__.__name__ in ['Time', 'Datetime']:
                    self.cols_width[column.name] = {'type': 'ellip2', 'width': 300}
                elif column_type.__class__.__name__ in ['String', 'Text']:
                    width = 100
                    # Adjust width for strings based on their defined length.
                    if (
                        column_type.length
                        and column_type.length > 100
                        and column_type.length < 500
                    ):
                        width = column_type.length
                    if column_type.length and column_type.length > 500:
                        width = 500

                    self.cols_width[column.name] = {'type': 'ellip2', 'width': width}

        # Set a default width for any remaining list columns.
        for attr in self.list_columns:
            if attr not in self.cols_width:
                self.cols_width[attr] = {'type': 'ellip2', 'width': 100}

        # 固定常用的几个字段的宽度
        # print(self.cols_width)

    # 将列宽信息加入
    # A merge function to add column width information to the API response.
    def merge_cols_width(self, response, **kwargs):
        response[API_COLS_WIDTH_RIS_KEY] = self.cols_width

    # 将是否批量导入加入
    # A merge function to add flags for import/download operations to the API response.
    def merge_ops_data(self, response, **kwargs):
        response[API_IMPORT_DATA_RIS_KEY] = self.import_data
        response[API_DOWNLOAD_DATA_RIS_KEY] = self.download_data

    # 重新渲染add界面
    # @pysnooper.snoop()
    # A merge function to handle pre-filling the add form with existing arguments from the request.
    def merge_exist_add_args(self, response, **kwargs):
        # Get existing arguments from the query string.
        exist_add_args = request.args.get('exist_add_args', '')
        if exist_add_args:
            exist_add_args = json.loads(exist_add_args)
            # 把这些值转为add_column中的默认值
            # print(response[API_ADD_COLUMNS_RIS_KEY])
            # Update the 'default' value for each add column based on the provided arguments.
            response_add_columns = {}
            for column in response[API_ADD_COLUMNS_RIS_KEY]:
                if column['name'] in exist_add_args and exist_add_args[column['name']]:
                    column['default'] = exist_add_args[column['name']]
                response_add_columns[column['name']] = column
            # 提供字段变换内容
            # Apply any defined column relationship logic.
            if self.set_columns_related:
                try:
                    self.set_columns_related(exist_add_args, response_add_columns)
                    response[API_ADD_COLUMNS_RIS_KEY] = list(response_add_columns.values())
                except Exception as e:
                    print(e)

    # 根据columnsfields 转化为 info的json信息
    # @pysnooper.snoop()
    # Converts a dictionary of column field definitions into a list of JSON info objects.
    def columnsfield2info(self, columnsfields):
        ret = []
        for col_name in columnsfields:
            column_field = columnsfields[col_name]
            # print(column_field)
            col_info = {}
            col_info['name'] = col_name

            column_field_kwargs = column_field.kwargs
            # type 类型 EnumField   values
            # aa = column_field
            # Extract information about the field.
            col_info['type'] = column_field.field_class.__name__.replace('Field', '')
            # ret['description']=column_field_kwargs.get('description','')
            col_info['description'] = self.description_columns.get(
                col_name, column_field_kwargs.get('description', '')
            )
            col_info['label'] = self.label_columns.get(
                col_name, column_field_kwargs.get('label', '')
            )
            col_info['default'] = column_field_kwargs.get('default', '')
            col_info['validators'] = column_field_kwargs.get('validators', [])
            col_info['choices'] = column_field_kwargs.get('choices', [])
            # Extract information from the field's widget.
            if 'widget' in column_field_kwargs:
                col_info['widget'] = (
                    column_field_kwargs['widget']
                    .__class__.__name__.replace('Widget', '')
                    .replace('Field', '')
                    .replace('My', '')
                )
                if (
                    hasattr(column_field_kwargs['widget'], 'readonly')
                    and column_field_kwargs['widget'].readonly
                ):
                    col_info['disable'] = True
                # 处理可选可填类型
                if (
                    hasattr(column_field_kwargs['widget'], 'can_input')
                    and column_field_kwargs['widget'].can_input
                ):
                    col_info['ui-type'] = 'input-select'
                # 处理时间类型
                if (
                    hasattr(column_field_kwargs['widget'], 'is_date')
                    and column_field_kwargs['widget'].is_date
                ):
                    col_info['ui-type'] = 'datePicker'
                # 处理时间类型
                if (
                    hasattr(column_field_kwargs['widget'], 'is_date_range')
                    and column_field_kwargs['widget'].is_date_range
                ):
                    col_info['ui-type'] = 'rangePicker'

            # Standardize the UI information.
            col_info = self.make_ui_info(col_info)
            ret.append(col_info)
        return ret

    # 每个用户对当前记录的权限，base_permissions 是对所有记录的权限
    # This method can be overridden to implement item-specific permission checks.
    def check_item_permissions(self, item):
        # By default, grant all permissions.
        self.user_permissions = {'add': True, 'edit': True, 'delete': True, 'show': True}

    # A merge function to add the view's base permissions to the response.
    def merge_base_permissions(self, response, **kwargs):
        response[API_PERMISSIONS_RIS_KEY] = self.base_permissions
        # if self.appbuilder.sm.has_access(permission, self.class_permission_name)

    # @pysnooper.snoop()
    # A merge function to add item-specific user permissions to the response.
    def merge_user_permissions(self, response, **kwargs):
        # print(self.user_permissions)
        response[API_USER_PERMISSIONS_RIS_KEY] = self.user_permissions

    # add_form_extra_fields  里面的字段要能拿到才对
    # @pysnooper.snoop(watch_explode=())
    # A merge function to add detailed information about the 'add' form fields to the response.
    def merge_add_field_info(self, response, **kwargs):
        _kwargs = kwargs.get('add_columns', {})
        # 将关联字段的查询限制条件加入
        # Apply any custom query filters for related fields.
        if self.add_form_query_rel_fields:
            self.add_query_rel_fields = self.add_form_query_rel_fields

        # Get detailed information for each field in the add form.
        add_columns = self._get_fields_info(
            self.add_columns,
            self.add_model_schema,
            self.add_query_rel_fields,
            **_kwargs,
        )

        response[API_ADD_COLUMNS_RES_KEY] = add_columns

    # @pysnooper.snoop(watch_explode=('edit_columns'))
    # A merge function to add detailed information about the 'edit' form fields to the response.
    def merge_edit_field_info(self, response, **kwargs):
        _kwargs = kwargs.get('edit_columns', {})
        # Apply any custom query filters for related fields.
        if self.edit_form_query_rel_fields:
            self.edit_query_rel_fields = self.edit_form_query_rel_fields
        # Get detailed information for each field in the edit form.
        edit_columns = self._get_fields_info(
            self.edit_columns,
            self.edit_model_schema,
            self.edit_query_rel_fields,
            **_kwargs,
        )
        # 处理retry_info，如果有这种类型，就禁止编辑
        # Special handling for fields marked with 'retry_info' to disable them.
        for column in edit_columns:
            if column.get('retry_info', False):
                column['disable'] = True
        response[API_EDIT_COLUMNS_RES_KEY] = edit_columns

    # @pysnooper.snoop(watch_explode=('edit_columns'))
    # A merge function to add 'add' form fieldset information to the response.
    def merge_add_fieldsets_info(self, response, **kwargs):
        # if self.pre_add_get:
        #     self.pre_add_get()
        add_fieldsets = []
        # If fieldsets are defined, format them for the API response.
        if self.add_fieldsets:
            for group in self.add_fieldsets:
                group_name = group[0]
                group_fieldsets = group[1]
                add_fieldsets.append(
                    {
                        'group': group_name,
                        'expanded': group_fieldsets.get('expanded', True),
                        'fields': group_fieldsets['fields'],
                    }
                )

        response[API_ADD_FIELDSETS_RIS_KEY] = add_fieldsets

    # @pysnooper.snoop(watch_explode=('edit_columns'))
    # A merge function to add 'edit' form fieldset information to the response.
    def merge_edit_fieldsets_info(self, response, **kwargs):
        edit_fieldsets = []
        # If fieldsets are defined, format them for the API response.
        if self.edit_fieldsets:
            for group in self.edit_fieldsets:
                group_name = group[0]
                group_fieldsets = group[1]
                edit_fieldsets.append(
                    {
                        'group': group_name,
                        'expanded': group_fieldsets.get('expanded', True),
                        'fields': group_fieldsets['fields'],
                    }
                )
        response[API_EDIT_FIELDSETS_RIS_KEY] = edit_fieldsets

    # A merge function to add 'show' view fieldset information to the response.
    def merge_show_fieldsets_info(self, response, **kwargs):
        show_fieldsets = []
        # If fieldsets are defined, format them for the API response.
        if self.show_fieldsets:
            for group in self.show_fieldsets:
                group_name = group[0]
                group_fieldsets = group[1]
                show_fieldsets.append(
                    {
                        'group': group_name,
                        'expanded': group_fieldsets.get('expanded', True),
                        'fields': group_fieldsets['fields'],
                    }
                )
        response[API_SHOW_FIELDSETS_RIS_KEY] = show_fieldsets

    # @pysnooper.snoop()
    # A merge function to add information about available search filters to the response.
    def merge_search_filters(self, response, **kwargs):
        # Get possible search fields and all possible operations
        search_filters = {}
        dict_filters = self._filters.get_search_filters()
        # For each searchable column, define its available filter operations.
        for col in self.search_columns:
            search_filters[col] = {}
            search_filters[col]['filter'] = [
                {'name': as_unicode(flt.name), 'operator': flt.arg_name}
                for flt in dict_filters[col]
            ]

            # print(col)
            # print(self.datamodel.list_columns)
            # 对于外键全部可选值返回，或者还需要现场查询(现场查询用哪个字段是个问题)
            # If the column is a foreign key, provide a list of possible values.
            if (
                self.datamodel and self.edit_model_schema
            ):  # 根据edit_column 生成的model_schema，编辑里面才会读取外键对象列表
                # ao = self.edit_model_schema.fields
                if col in self.edit_model_schema.fields:
                    field = self.edit_model_schema.fields[col]
                    # print(field)
                    if isinstance(field, Related) or isinstance(field, RelatedList):
                        filter_rel_field = self.edit_query_rel_fields.get(col, [])
                        # 获取外键对象list
                        # Get the list of related objects for the filter dropdown.
                        search_filters[col]['count'], search_filters[col]['values'] = (
                            self._get_list_related_field(
                                field, filter_rel_field, page=0, page_size=1000
                            )
                        )
                        # if col in self.datamodel.list_columns:
                        #     search_filters[col]["type"] = self.datamodel.list_columns[col].type

                    # Set the field type.
                    search_filters[col]['type'] = (
                        field.__class__.__name__
                        if 'type' not in search_filters[col]
                        else search_filters[col]['type']
                    )

            # 用户可能会自定义字段的操作格式，比如字符串类型，显示和筛选可能是menu
            # Handle custom field definitions from extra fields.
            if col in self.edit_form_extra_fields:
                column_field = self.edit_form_extra_fields[col]
                column_field_kwargs = column_field.kwargs
                # type 类型 EnumField   values
                # aa = column_field
                search_filters[col]['type'] = column_field.field_class.__name__.replace(
                    'Field', ''
                ).replace('My', '')
                search_filters[col]['choices'] = column_field_kwargs.get('choices', [])
                # 选-填 字段在搜索时为填写字段
                # Special handling for certain widget types.
                if (
                    hasattr(column_field_kwargs.get('widget', {}), 'can_input')
                    and column_field_kwargs['widget'].can_input
                ):
                    search_filters[col]['ui-type'] = 'input'
                # 对于那种配置使用过往记录作为可选值的参数进行处理
                if (
                    hasattr(column_field_kwargs.get('widget', {}), 'conten2choices')
                    and column_field_kwargs['widget'].conten2choices
                ):
                    pass

            # Standardize UI info and handle default filter values.
            search_filters[col] = self.make_ui_info(search_filters[col])
            # 多选字段在搜索时为单选字段
            if search_filters[col].get('ui-type', '') == 'select2':
                search_filters[col]['ui-type'] = 'select'

            search_filters[col]['default'] = self.default_filter.get(col, '')
        response[API_FILTERS_RES_KEY] = search_filters

    # A merge function to add the 'add' view title to the response.
    def merge_add_title(self, response, **kwargs):
        response[API_ADD_TITLE_RES_KEY] = self.add_title

    # A merge function to add the 'edit' view title to the response.
    def merge_edit_title(self, response, **kwargs):
        response[API_EDIT_TITLE_RES_KEY] = self.edit_title

    # A generic merge function to add column labels to the response.
    def merge_label_columns(self, response, **kwargs):
        _pruned_select_cols = kwargs.get(API_SELECT_COLUMNS_RIS_KEY, [])
        if _pruned_select_cols:
            columns = _pruned_select_cols
        else:
            # Send the exact labels for the caller operation
            # Determine which set of columns to use based on the calling context.
            if kwargs.get('caller') == 'list':
                columns = self.list_columns
            elif kwargs.get('caller') == 'show':
                columns = self.show_columns
            else:
                columns = self.label_columns  # pragma: no cover
        # Get the JSON-serializable labels.
        response[API_LABEL_COLUMNS_RES_KEY] = self._label_columns_json(columns)

    # A specific merge function for 'list' view labels.
    def merge_list_label_columns(self, response, **kwargs):
        self.merge_label_columns(response, caller='list', **kwargs)

    # A specific merge function for 'show' view labels.
    def merge_show_label_columns(self, response, **kwargs):
        self.merge_label_columns(response, caller='show', **kwargs)

    # @pysnooper.snoop()
    # A merge function to add the list of 'show' columns to the response.
    def merge_show_columns(self, response, **kwargs):
        _pruned_select_cols = kwargs.get(API_SELECT_COLUMNS_RIS_KEY, [])
        # If specific columns were requested, use them. Otherwise, use the default show_columns.
        if _pruned_select_cols:
            response[API_SHOW_COLUMNS_RES_KEY] = _pruned_select_cols
        else:
            response[API_SHOW_COLUMNS_RES_KEY] = self.show_columns

    # A merge function to add column descriptions to the response.
    def merge_description_columns(self, response, **kwargs):
        _pruned_select_cols = kwargs.get(API_SELECT_COLUMNS_RIS_KEY, [])
        # Provide descriptions only for the requested columns, or all if none are specified.
        if _pruned_select_cols:
            response[API_DESCRIPTION_COLUMNS_RES_KEY] = self._description_columns_json(
                _pruned_select_cols
            )
        else:
            # Send all descriptions if cols are or request pruned
            response[API_DESCRIPTION_COLUMNS_RES_KEY] = self._description_columns_json(
                self.description_columns
            )

    # A merge function to add the list of 'list' columns to the response.
    def merge_list_columns(self, response, **kwargs):
        _pruned_select_cols = kwargs.get(API_SELECT_COLUMNS_RIS_KEY, [])
        # If specific columns were requested, use them. Otherwise, use the default list_columns.
        if _pruned_select_cols:
            response[API_LIST_COLUMNS_RES_KEY] = _pruned_select_cols
        else:
            response[API_LIST_COLUMNS_RES_KEY] = self.list_columns

    # A merge function to add the list of orderable columns to the response.
    def merge_order_columns(self, response, **kwargs):
        _pruned_select_cols = kwargs.get(API_SELECT_COLUMNS_RIS_KEY, [])
        # Prune the orderable columns to only include those requested.
        if _pruned_select_cols:
            response[API_ORDER_COLUMNS_RES_KEY] = [
                order_col for order_col in self.order_columns if order_col in _pruned_select_cols
            ]
        else:
            response[API_ORDER_COLUMNS_RES_KEY] = self.order_columns

    # @pysnooper.snoop(watch_explode=('aa'))
    # A merge function to add low-level information about model columns (like data type).
    def merge_columns_info(self, response, **kwargs):
        columns_info = {}
        # Introspect the SQLAlchemy model to get column and relationship types.
        for attr in dir(self.datamodel.obj):
            value = (
                getattr(self.datamodel.obj, attr) if hasattr(self.datamodel.obj, attr) else None
            )
            if type(value) == InstrumentedAttribute:
                if type(value.comparator) == ColumnProperty.Comparator:
                    columns_info[value.key] = {'type': str(value.comparator.type)}
                if type(value.comparator) == RelationshipProperty.Comparator:
                    columns_info[value.key] = {'type': 'Relationship'}
        response[API_COLUMNS_INFO_RIS_KEY] = columns_info

    # A merge function to add the help URL to the response.
    def merge_help_url_info(self, response, **kwargs):
        response[API_HELP_URL_RIS_KEY] = self.help_url

    # @pysnooper.snoop(watch_explode='aa')
    # A merge function to add information about available actions to the response.
    def merge_action_info(self, response, **kwargs):
        actions_info = {}
        # Format the information for each action.
        for attr_name in self.actions:
            action = self.actions[attr_name]
            actions_info[action.name] = {
                'name': action.name,
                'text': action.text,
                'confirmation': action.confirmation,
                'icon': action.icon,
                'multiple': action.multiple,
                'single': action.single,
            }
        response[API_ACTION_RIS_KEY] = actions_info

    # A merge function to add basic route and model info to the response.
    def merge_route_info(self, response, **kwargs):
        response[API_ROUTE_RIS_KEY] = '/' + self.route_base.strip('/') + '/'
        response['primary_key'] = self.primary_key
        response['label_title'] = self.label_title or self._prettify_name(
            self.datamodel.model_name
        )

    # @pysnooper.snoop(watch_explode=())
    # 添加关联model的字段
    # A merge function to add information about related views' fields to the response.
    def merge_related_field_info(self, response, **kwargs):
        try:
            add_info = {}
            if self.related_views:
                # Iterate through each related view class.
                for related_views_class in self.related_views:
                    related_views = related_views_class()
                    related_views._init_model_schemas()
                    # Apply custom query filters for related fields.
                    if related_views.add_form_query_rel_fields:
                        related_views.add_query_rel_fields = (
                            related_views.add_form_query_rel_fields
                        )

                    # print(related_views.add_columns)
                    # print(related_views.add_model_schema)
                    # print(related_views.add_query_rel_fields)
                    # Get the field info for the related view's add form.
                    add_columns = related_views._get_fields_info(
                        cols=related_views.add_columns,
                        model_schema=related_views.add_model_schema,
                        filter_rel_fields=related_views.add_query_rel_fields,
                        **kwargs,
                    )
                    # Add the field info to the response, keyed by the related model's name.
                    add_info[str(related_views.datamodel.obj.__name__).lower()] = add_columns
                    # add_info[related_views.__class__.__name__]=add_columns
            response[API_RELATED_RIS_KEY] = add_info
        except Exception as e:
            print(e)
        pass

    # A merge function to add the 'list' view title to the response.
    def merge_list_title(self, response, **kwargs):
        response[API_LIST_TITLE_RES_KEY] = self.list_title

    # A merge function to add the 'show' view title to the response.
    def merge_show_title(self, response, **kwargs):
        response[API_SHOW_TITLE_RIS_KEY] = self.show_title

    # A merge function for adding miscellaneous extra information via a custom hook.
    def merge_more_info(self, response, **kwargs):
        # 将 配置根据历史填写值作为选项的字段配置出来。
        # Call the add_more_info hook if it's defined.
        if self.add_more_info:
            try:
                self.add_more_info(response, **kwargs)
            except Exception as e:
                print(e)

    # A helper function to create a standardized error response.
    def response_error(self, code, message='error', status=1, result=None):
        result = result or {}
        back_data = {'result': result, 'status': status, 'message': message}
        return self.response(code, **back_data)

    # The main metadata endpoint, which provides all necessary information for the UI.
    @expose('/_info', methods=['GET'])
    # Decorators that chain together the merge functions to build the response.
    @merge_response_func(merge_more_info, 'more_info')
    @merge_response_func(merge_ops_data, API_IMPORT_DATA_RIS_KEY)
    @merge_response_func(merge_exist_add_args, API_EXIST_ADD_ARGS_RIS_KEY)
    @merge_response_func(merge_cols_width, API_COLS_WIDTH_RIS_KEY)
    @merge_response_func(merge_base_permissions, API_PERMISSIONS_RIS_KEY)
    @merge_response_func(merge_user_permissions, API_USER_PERMISSIONS_RIS_KEY)
    @merge_response_func(merge_add_field_info, API_ADD_COLUMNS_RIS_KEY)
    @merge_response_func(merge_edit_field_info, API_EDIT_COLUMNS_RIS_KEY)
    @merge_response_func(merge_add_fieldsets_info, API_ADD_FIELDSETS_RIS_KEY)
    @merge_response_func(merge_edit_fieldsets_info, API_EDIT_FIELDSETS_RIS_KEY)
    @merge_response_func(merge_show_fieldsets_info, API_SHOW_FIELDSETS_RIS_KEY)
    @merge_response_func(merge_search_filters, API_FILTERS_RIS_KEY)
    @merge_response_func(merge_show_label_columns, API_LABEL_COLUMNS_RIS_KEY)
    @merge_response_func(merge_show_columns, API_SHOW_COLUMNS_RIS_KEY)
    @merge_response_func(merge_list_label_columns, API_LABEL_COLUMNS_RIS_KEY)
    @merge_response_func(merge_list_columns, API_LIST_COLUMNS_RIS_KEY)
    @merge_response_func(merge_list_title, API_LIST_TITLE_RIS_KEY)
    @merge_response_func(merge_show_title, API_SHOW_TITLE_RIS_KEY)
    @merge_response_func(merge_add_title, API_ADD_TITLE_RIS_KEY)
    @merge_response_func(merge_edit_title, API_EDIT_TITLE_RIS_KEY)
    @merge_response_func(merge_description_columns, API_DESCRIPTION_COLUMNS_RIS_KEY)
    @merge_response_func(merge_order_columns, API_ORDER_COLUMNS_RIS_KEY)
    @merge_response_func(merge_columns_info, API_COLUMNS_INFO_RIS_KEY)
    @merge_response_func(merge_help_url_info, API_HELP_URL_RIS_KEY)
    @merge_response_func(merge_action_info, API_ACTION_RIS_KEY)
    @merge_response_func(merge_route_info, API_ROUTE_RIS_KEY)
    @merge_response_func(merge_related_field_info, API_RELATED_RIS_KEY)
    def api_info(self, **kwargs):
        _response = {}
        # Combine arguments from RISON and the query string.
        _args = kwargs.get('rison', {})
        _args.update(request.args)
        # Check if an ID is provided to determine if this is for an edit or add context.
        id = _args.get(self.primary_key, '')
        if id:
            # If an ID is present, run pre-update and permission-checking hooks.
            item = self.datamodel.get(id)
            if item and self.pre_update_get:
                try:
                    self.pre_update_get(item)
                except Exception as e:
                    print(e)
            if item and self.check_item_permissions:
                try:
                    self.check_item_permissions(item)
                except Exception as e:
                    print(e)
        elif self.pre_add_get:
            # If no ID, run the pre-add hook.
            try:
                self.pre_add_get()
            except Exception as e:
                print(e)

        # Build the response by calling the mapped merge functions.
        self.set_response_key_mappings(_response, self.api_info, _args, **_args)
        return self.response(200, **_response)

    # API endpoint to get a single item by its primary key.
    @expose('/<int:pk>', methods=['GET'])
    # @pysnooper.snoop(depth=4)
    def api_get(self, pk, **kwargs):
        # Run the pre-show hook if defined.
        if self.pre_show:
            src_item_object = self.datamodel.get(pk, self._base_filters)
            self.pre_show(src_item_object)

        # from flask_appbuilder.models.sqla.interface import SQLAInterface
        # Retrieve the item from the database.
        item = self.datamodel.get(pk, self._base_filters)
        if not item:
            # Return a 404 error if not found.
            return self.response_error(404, 'Not found')

        _response = {}
        # Parse request arguments.
        _args = kwargs.get('rison', {})
        if 'form_data' in request.args:
            _args.update(json.loads(request.args.get('form_data')))

        # Handle requests for a specific set of columns.
        select_cols = _args.get(API_SELECT_COLUMNS_RIS_KEY, [])
        _pruned_select_cols = [col for col in select_cols if col in self.show_columns]
        self.set_response_key_mappings(
            _response,
            self.get,
            _args,
            **{API_SELECT_COLUMNS_RIS_KEY: _pruned_select_cols},
        )
        # Use a dynamically generated schema if specific columns were requested.
        if _pruned_select_cols:
            _show_model_schema = self.model2schemaconverter.convert(_pruned_select_cols)
        else:
            _show_model_schema = self.show_model_schema

        # Format date/time fields.
        if _show_model_schema.fields.get('changed_on'):
            _show_model_schema.fields['changed_on'].dateformat = '%Y-%m-%d %H:%M:%S'
        if _show_model_schema.fields.get('created_on'):
            _show_model_schema.fields['created_on'].dateformat = '%Y-%m-%d %H:%M:%S'

        # Serialize the item using the schema.
        data = _show_model_schema.dump(item, many=False).data
        # Optionally, stringify related objects.
        if int(_args.get('str_related', 0)):
            for key in data:
                if isinstance(data[key], dict):
                    data[key] = str(getattr(item, key))

        # 按show_columns的顺序显示
        # Sort the output data according to the order in show_columns.
        data = sorted(
            data.items(),
            key=lambda kv: self.show_columns.index(kv[0]) if kv[0] in self.show_columns else 1000,
        )
        data = dict(zip([x[0] for x in data], [x[1] for x in data]))

        _response['data'] = data  # item.to_json()
        _response['data'][self.primary_key] = pk

        # Apply a pre-get hook to the response.
        back = self.pre_get(_response)
        # Construct the final response payload.
        back_data = {
            'result': back['data'] if back else _response['data'],
            'status': 0,
            'message': 'success',
        }
        return self.response(200, **back_data)

    # API endpoint to get a list of items.
    @expose('/', methods=['GET'])
    # @pysnooper.snoop(watch_explode=('_response','lst'))
    def api_list(self, **kwargs):
        _response = {}
        # Pre-process the request JSON if a hook is defined.
        if self.pre_json_load:
            req_json = self.pre_json_load(request.json)
        else:
            try:
                req_json = request.json or {}
            except Exception as e:
                print(e)
                req_json = {}

        # Combine arguments from the JSON body and query string.
        _args = req_json or {}
        _args.update(request.args)
        # 应对那些get无法传递body的请求，也可以把body放在url里面
        if 'form_data' in request.args:
            _args.update(json.loads(request.args.get('form_data')))

        # Run the pre-list hook.
        if self.pre_list:
            self.pre_list(**_args)

        # handle select columns
        # Handle requests for a specific set of columns.
        select_cols = _args.get(API_SELECT_COLUMNS_RIS_KEY, [])
        _pruned_select_cols = [col for col in select_cols if col in self.list_columns]
        self.set_response_key_mappings(
            _response,
            self.get_list,
            _args,
            **{API_SELECT_COLUMNS_RIS_KEY: _pruned_select_cols},
        )

        # Use a dynamic schema if specific columns were requested.
        if _pruned_select_cols:
            _list_model_schema = self.model2schemaconverter.convert(_pruned_select_cols)
        else:
            _list_model_schema = self.list_model_schema
        # handle filters
        try:
            # 参数缩写都在每个filter的arg_name
            # Apply filters from the request arguments.
            joined_filters = self._handle_filters_args(_args)
        except FABException as e:
            return self.response_error(400, message=str(e))
        # handle base order
        try:
            # Apply ordering from the request arguments.
            order_column, order_direction = self._handle_order_args(_args)
        except InvalidOrderByColumnFABException as e:
            return self.response_error(400, message=str(e))
        # handle pagination
        # Apply pagination from the request arguments.
        page_index, page_size = self._handle_page_args(_args)
        # Make the query
        # Execute the database query.
        query_select_columns = _pruned_select_cols or self.list_columns
        count, lst = self.datamodel.query(
            joined_filters,
            order_column,
            order_direction,
            page=page_index,
            page_size=page_size,
            select_columns=query_select_columns,
        )
        # Apply the post-list hook to the results.
        if self.post_list:
            lst = self.post_list(lst)
            # 经过 self.post_list，lst中的item可能被过滤掉一些，所以 count 要更新值
            count = len(lst) if lst else 0

        # pks = self.datamodel.get_keys(lst)
        # import marshmallow.schema
        # for item in lst:
        #     if self.datamodel.is_relation(item)
        # aa =
        # item.project = 'aaa'
        # Format date/time fields in the schema.
        if _list_model_schema.fields.get('changed_on'):
            _list_model_schema.fields['changed_on'].dateformat = '%Y-%m-%d %H:%M:%S'
        if _list_model_schema.fields.get('created_on'):
            _list_model_schema.fields['created_on'].dateformat = '%Y-%m-%d %H:%M:%S'

        # Serialize the list of items.
        data = _list_model_schema.dump(lst, many=True).data

        # 把外键换成字符串
        # Optionally, stringify related objects.
        if int(_args.get('str_related', 0)):
            for index in range(len(data)):
                for key in data[index]:
                    if isinstance(data[index][key], dict):
                        data[index][key] = str(getattr(lst[index], key))

        # Build the response payload.
        _response['data'] = data  # [item.to_json() for item in lst]
        # _response["ids"] = pks
        _response['count'] = count  # 这个是总个数
        for index in range(len(lst)):
            _response['data'][index][self.primary_key] = getattr(lst[index], self.primary_key)

        # Apply the pre-get-list hook.
        try:
            self.pre_get_list(_response)
        except Exception as e:
            print(e)

        # Construct the final successful response.
        back_data = {
            'result': _response,  # _response['data']
            'status': 0,
            'message': 'success',
        }
        # print(back_data)
        return self.response(200, **back_data)

    # @pysnooper.snoop()
    # A helper method to convert a JSON dictionary to a model item.
    def json_to_item(self, data):
        # A simple class to hold the result.
        class Back:
            pass

        back = Back()
        try:
            # Create a new model instance from the data.
            item = self.datamodel.obj(**data)
            # for key in data:
            #     if hasattr(item,key):
            #         setattr(item,key,data[key])

            back.data = item
        except Exception as e:
            # If creation fails, store the error.
            back.data = data
            back.errors = str(e)
        return back

    # @expose("/add", methods=["POST"])
    # def add(self):
    # API endpoint to create a new item.
    @expose('/', methods=['POST'])
    # @pysnooper.snoop(watch_explode=('item', 'json_data'))
    def api_add(self):
        self.src_item_json = {}
        # Ensure the request content type is JSON.
        if not request.is_json:
            return self.response_error(400, message='Request is not JSON')
        try:
            # Apply pre-processing hook to the JSON data.
            if self.pre_json_load:
                json_data = self.pre_json_load(request.json)
            else:
                json_data = request.json

            # Load and validate the data using the add schema.
            item = self.add_model_schema.load(json_data)
            # item = self.add_model_schema.load(data)
        except ValidationError as err:
            # Return a 422 Unprocessable Entity error if validation fails.
            return self.response_error(422, message=err.messages)
        # This validates custom Schema with custom validations
        if isinstance(item.data, dict):
            return self.response_error(422, message=item.errors)
        try:
            # Run pre-add hook.
            self.pre_add(item.data)
            # Add the new item to the database.
            self.datamodel.add(item.data, raise_exception=True)
            # Run post-add hook.
            self.post_add(item.data)
            # Serialize the newly created item for the response.
            result_data = self.add_model_schema.dump(item.data, many=False).data
            result_data[self.primary_key] = self.datamodel.get_pk_value(item.data)
            back_data = {'result': result_data, 'status': 0, 'message': 'success'}
            return self.response(
                200,
                **back_data,
            )
        except IntegrityError as e:
            # Handle database integrity errors (e.g., unique constraint violations).
            return self.response_error(422, message=str(e.orig))

    # API endpoint to update an existing item.
    @expose('/<int:pk>', methods=['PUT'])
    # @pysnooper.snoop(watch_explode=('item','data'))
    def api_edit(self, pk):
        # Retrieve the item to be edited.
        item = self.datamodel.get(pk, self._base_filters)
        self.src_item_json = item.to_json()

        # if self.check_redirect_list_url:
        try:
            # Perform custom permission checks.
            if self.check_edit_permission:
                has_permission = self.check_edit_permission(item)
                if not has_permission:
                    return json_response(message='no permission to edit', status=1, result={})

        except Exception as e:
            print(e)
            return json_response(message='check edit permission' + str(e), status=1, result={})

        # Ensure the request content type is JSON.
        if not request.is_json:
            return self.response_error(400, message='Request is not JSON')
        # If the item doesn't exist, return a 404 error.
        if not item:
            return self.response_error(404, message='Not found')
        try:
            # Pre-process the JSON data.
            if self.pre_json_load:
                json_data = self.pre_json_load(request.json)
            else:
                json_data = request.json
            # Merge the incoming data with the existing item's data (PATCH-like behavior).
            data = self._merge_update_item(item, json_data)
            # Load and validate the merged data into the item instance.
            item = self.edit_model_schema.load(data, instance=item)
        except ValidationError as err:
            return self.response_error(422, message=err.messages)
        # This validates custom Schema with custom validations
        if isinstance(item.data, dict):
            return self.response_error(422, message=item.errors)
        # Run pre-update hook.
        self.pre_update(item.data)

        try:
            # Save the changes to the database.
            self.datamodel.edit(item.data, raise_exception=True)
            # Run post-update hook.
            self.post_update(item.data)
            # Serialize the updated item for the response.
            result = self.edit_model_schema.dump(item.data, many=False).data
            result[self.primary_key] = self.datamodel.get_pk_value(item.data)
            back_data = {'status': 0, 'message': 'success', 'result': result}
            return self.response(
                200,
                **back_data,
            )
        except IntegrityError as e:
            # Handle database integrity errors.
            return self.response_error(422, message=str(e.orig))

    # API endpoint to delete an item.
    @expose('/<int:pk>', methods=['DELETE'])
    # @pysnooper.snoop()
    def api_delete(self, pk):
        # Retrieve the item to be deleted.
        item = self.datamodel.get(pk, self._base_filters)
        if not item:
            return self.response_error(404, message='Not found')
        try:
            # Run pre-delete hook.
            self.pre_delete(item)
            # Delete the item from the database.
            self.datamodel.delete(item, raise_exception=True)
            # Run post-delete hook.
            self.post_delete(item)
            back_data = {'status': 0, 'message': 'success', 'result': item.to_json()}
            return self.response(200, **back_data)
        except IntegrityError as e:
            # Handle integrity errors (e.g., foreign key constraints).
            return self.response_error(422, message=str(e.orig))

    # API endpoint to execute a single-item action.
    @expose('/action/<string:name>/<int:pk>', methods=['GET'])
    def single_action(self, name, pk):
        """
        Action method to handle actions from a show view
        """
        pk = self._deserialize_pk_if_composite(pk)
        # Get the action function by name.
        action = self.actions.get(name)
        try:
            # Execute the action on the retrieved item.
            action.func(self.datamodel.get(pk))
            back = {'status': 0, 'result': {}, 'message': 'success'}
            return self.response(200, **back)
        except Exception as e:
            print(e)
            back = {'status': -1, 'message': str(e), 'result': {}}
            return self.response(200, **back)

    # API endpoint to execute a multi-item action.
    @expose('/multi_action/<string:name>', methods=['POST'])
    def multi_action(self, name):
        """
        Action method to handle multiple records selected from a list view
        """
        # Get the primary keys from the request body.
        pks = request.json['ids']
        # Get the action function by name.
        action = self.actions.get(name)
        # Retrieve all items to be acted upon.
        items = [self.datamodel.get(self._deserialize_pk_if_composite(int(pk))) for pk in pks]
        try:
            # Execute the action on the list of items.
            back = action.func(items)
            message = back if isinstance(back, str) else 'success'
            back = {'status': 0, 'result': {}, 'message': message}
            return self.response(200, **back)
        except Exception as e:
            print(e)
            back = {'status': -1, 'message': str(e), 'result': {}}
            return self.response(200, **back)

    # API endpoint to download a CSV template for data import.
    @expose('/download_template/', methods=['GET'])
    def download_template(self):
        # Create a demo CSV string with headers and one example row.
        demostr = (
            ','.join(list(self.add_columns))
            + '\n'
            + ','.join(['xx' for x in list(self.add_columns)])
        )

        # Generate a filename based on the model's table name.
        file_name = self.datamodel.obj.__tablename__
        csv_file = '%s.csv' % file_name
        # Remove the file if it already exists.
        if os.path.exists(csv_file):
            os.remove(csv_file)
        # Write the demo string to the file.
        file = open(csv_file, mode='w', encoding='utf-8-sig')
        file.writelines(demostr)
        file.close()
        csv_file = os.path.abspath(csv_file)
        # Send the file as a downloadable response.
        response = self.csv_response(csv_file, file_name=file_name)
        return response

    # API endpoint to upload and import data from a CSV file.
    @expose('/upload/', methods=['POST'])
    def upload(self):
        # Get the uploaded file from the request.
        csv_file = request.files.get('csv_file')  # FileStorage
        # 文件保存至指定路径
        # Save the file locally.
        i_path = csv_file.filename
        if os.path.exists(i_path):
            os.remove(i_path)
        csv_file.save(i_path)
        # 读取csv，读取header，按行处理
        import csv

        # Open and read the CSV file.
        csv_reader = csv.reader(open(i_path, encoding='utf-8-sig'))
        header = None
        result = []
        # Process each line in the CSV.
        for line in csv_reader:
            if not header:
                header = line
                continue
            # 判断header里面的字段是否在数据库都有
            # Validate that all header columns exist in the model.
            for col_name in header:
                # attr = self.datamodel.obj
                if not hasattr(self.datamodel.obj, col_name):
                    flash('csv首行header与数据库字段不对应', 'warning')
                    back = {
                        'status': 1,
                        'result': [],
                        'message': 'csv首行header与数据库字段不对应',
                    }
                    return self.response(200, **back)

            # 个数不对的去掉
            # Skip rows with incorrect number of columns.
            if len(line) != len(header):
                continue

            # 全是空值的去掉
            # Skip empty rows.
            ll = [one.strip() for one in line if one.strip()]
            if not ll:
                continue

            # Create a dictionary from the header and the current line.
            data = dict(zip(header, line))

            try:
                # Apply pre-upload hook.
                if self.pre_upload:
                    data = self.pre_upload(data)
                # Create a new model instance.
                model = self.datamodel.obj(**data)
                # Run pre-add hook.
                self.pre_add(model)
                # Add to the database session.
                db.session.add(model)
                # Run post-add hook.
                self.post_add(model)
                # Commit the transaction.
                db.session.commit()
                result.append('success')
            except Exception as e:
                # Rollback on error.
                db.session.rollback()
                print(e)
                result.append('fail')

        # Flash a summary of the import results.
        flash(
            '成功导入%s行，失败导入%s行'
            % (
                len([x for x in result if x == 'success']),
                len([x for x in result if x == 'fail']),
            ),
            'warning',
        )
        # Return a JSON response with the results.
        back = {
            'status': 0,
            'message': 'result为上传成功行，共成功%s' % len([x for x in result if x == 'success']),
            'result': result,
        }
        return self.response(200, **back)

    # API endpoint to download all data from the model as a CSV file.
    @expose('/download/', methods=['GET'])
    # @pysnooper.snoop()
    def download(self):
        import pandas

        # Determine the correct database URI, handling multiple binds.
        sqllchemy_uri = conf.get('SQLALCHEMY_DATABASE_URI', '')
        bind_key = getattr(self.datamodel.obj, '__bind_key__', None)
        if bind_key:
            bind_sqllchemy_uri = conf['SQLALCHEMY_BINDS'].get(bind_key, '')
            if bind_sqllchemy_uri:
                sqllchemy_uri = bind_sqllchemy_uri

        import sqlalchemy.engine.url as url

        # Create a SQLAlchemy engine.
        uri = url.make_url(sqllchemy_uri)
        sql_engine = create_engine(uri)
        table_name = self.datamodel.obj.__tablename__
        # sql = 'select `%s` from %s' % ('`,`'.join(self.show_columns), table_name)
        # Formulate a SQL query to select all data from the table.
        sql = 'select * from %s' % (table_name)
        print(sql)
        # Use pandas to execute the query and read the results into a DataFrame.
        results = pandas.read_sql_query(sql, sql_engine)

        # Write the DataFrame to a CSV file.
        file_path = '%s.csv' % table_name
        csv_file = os.path.abspath(file_path)
        if os.path.exists(csv_file):
            os.remove(csv_file)
        results.to_csv(
            csv_file, index=False, sep=',', encoding='utf-8-sig'
        )  # index 是第几行的表示
        # Send the generated CSV file as a downloadable response.
        response = self.csv_response(csv_file, file_name=table_name)
        return response

    # A custom action for bulk deletion.
    @action('muldelete', __('Delete'), __('Delete all Really?'), 'fa-trash', single=False)
    # @pysnooper.snoop(watch_explode=('items'))
    def muldelete(self, items):
        if not items:
            abort(404)
        success = []
        fail = []
        # Iterate over the items to be deleted.
        for item in items:
            try:
                # Run pre-delete hook.
                self.pre_delete(item)
                # Delete from the session.
                db.session.delete(item)
                success.append(item.to_json())
            except Exception as e:
                flash(str(e), 'danger')
                fail.append(item.to_json())
        # Commit all deletions in one transaction.
        db.session.commit()
        # Return a JSON response summarizing the results.
        return json.dumps({'success': success, 'fail': fail}, indent=4, ensure_ascii=False)

    """
    ------------------------------------------------
                HELPER FUNCTIONS
    ------------------------------------------------
    """

    # A helper to deserialize a primary key, with special handling for composite keys.
    def _deserialize_pk_if_composite(self, pk):
        # A custom object hook for JSON deserialization to handle date/datetime objects.
        def date_deserializer(obj):
            if '_type' not in obj:
                return obj

            from dateutil import parser

            if obj['_type'] == 'datetime':
                return parser.parse(obj['value'])
            elif obj['_type'] == 'date':
                return parser.parse(obj['value']).date()
            return obj

        # If the model has a composite primary key, try to load the PK string as JSON.
        if self.datamodel.is_pk_composite():
            try:
                pk = json.loads(pk, object_hook=date_deserializer)
            except Exception:
                pass
        return pk

    # A helper to parse and sanitize pagination arguments from the request.
    def _handle_page_args(self, rison_args):
        """
            Helper function to handle rison page
            arguments, sets defaults and impose
            FAB_API_MAX_PAGE_SIZE

        :param rison_args:
        :return: (tuple) page, page_size
        """
        # Get page index and size from arguments, with defaults.
        page = rison_args.get(API_PAGE_INDEX_RIS_KEY, 0)
        page_size = rison_args.get(API_PAGE_SIZE_RIS_KEY, self.page_size)
        return self._sanitize_page_args(page, page_size)

    # @pysnooper.snoop()
    # A helper to sanitize page and page size values, enforcing maximums.
    def _sanitize_page_args(self, page, page_size):
        _page = page if page and page >= 0 else 0
        _page_size = page_size or self.page_size
        # Get the maximum allowed page size from configuration.
        max_page_size = self.max_page_size or current_app.config.get('FAB_API_MAX_PAGE_SIZE')
        # Accept special -1 to uncap the page size
        if max_page_size == -1:
            if _page_size == -1:
                return None, None
            else:
                return _page, _page_size
        # Enforce the maximum page size.
        if _page_size > max_page_size or _page_size < 1:
            _page_size = max_page_size
        return _page, _page_size

    # A helper to parse ordering arguments from the request.
    def _handle_order_args(self, rison_args):
        """
            Help function to handle rison order
            arguments

        :param rison_args:
        :return:
        """
        order_column = rison_args.get(API_ORDER_COLUMN_RIS_KEY, '')
        order_direction = rison_args.get(API_ORDER_DIRECTION_RIS_KEY, '')
        # If no column is specified, use the base order defined for the view.
        if not order_column and self.base_order:
            return self.base_order
        if not order_column:
            return '', ''
        # Validate that the requested order column is allowed.
        elif order_column not in self.order_columns:
            raise InvalidOrderByColumnFABException(f'Invalid order by column: {order_column}')
        return order_column, order_direction

    # A helper to parse and apply filter arguments from the request.
    def _handle_filters_args(self, rison_args):
        self._filters.clear_filters()
        # Add filters from the RISON arguments.
        self._filters.rest_add_filters(rison_args.get(API_FILTERS_RIS_KEY, []))
        # Return the combined filters (base filters + request filters).
        return self._filters.get_joined_filters(self._base_filters)

    # @pysnooper.snoop(watch_explode=("column"))
    # A helper to create a JSON-serializable dictionary of column descriptions.
    def _description_columns_json(self, cols=None):
        """
        Prepares dict with col descriptions to be JSON serializable
        """
        ret = {}
        cols = cols or []
        # Filter descriptions to only include the specified columns.
        d = {k: v for (k, v) in self.description_columns.items() if k in cols}
        # Translate and encode the descriptions.
        for key, value in d.items():
            ret[key] = as_unicode(_(value).encode('UTF-8'))

        # Add descriptions from extra form fields.
        edit_form_extra_fields = self.edit_form_extra_fields
        for col in edit_form_extra_fields:
            column = edit_form_extra_fields[col]
            if hasattr(column, 'kwargs') and column.kwargs:
                description = column.kwargs.get('description', '')
                if description:
                    ret[col] = description

        return ret

    # A helper to create a JSON-serializable dictionary of column labels.
    def _label_columns_json(self, cols=None):
        """
        Prepares dict with labels to be JSON serializable
        """
        ret = {}
        # 自动生成的label
        cols = cols or []
        d = {k: v for (k, v) in self.label_columns.items() if k in cols}
        for key, value in d.items():
            ret[key] = as_unicode(_(value).encode('UTF-8'))

        # 全局的label
        # Merge labels from the model definition.
        if hasattr(self.datamodel.obj, 'label_columns') and self.datamodel.obj.label_columns:
            for col in self.datamodel.obj.label_columns:
                ret[col] = self.datamodel.obj.label_columns[col]

        # 本view特定的label
        # Merge labels from the view's general label_columns.
        for col in self.label_columns:
            ret[col] = self.label_columns[col]

        # 本view特定的label
        # Merge labels from the view's specific spec_label_columns.
        for col in self.spec_label_columns:
            ret[col] = self.spec_label_columns[col]

        return ret

    # A helper to standardize and enrich field information for the UI.
    def make_ui_info(self, ret):
        # 可序列化处理
        # Make default values JSON-serializable (e.g., convert functions to None).
        if ret.get('default', None) and isfunction(ret['default']):
            ret['default'] = None  # 函数没法序列化

        # 统一处理校验器
        # Convert validator objects into a serializable format.
        local_validators = []
        for v in ret.get('validators', []):
            # print(v)
            # if ret.get('name', '') == "warehouse_level":
            #     print(ret)
            #     print(v)
            #     # from wtforms.validators import DataRequired
            #     print(type(v))
            #     print(v.__class__.__name__)
            val = {}
            val['type'] = v.__class__.__name__

            # Extract parameters for specific validator types.
            if (
                type(v) == validators.Regexp or type(v) == validate.Regexp
            ):  # 一种是数据库的校验器，一种是可视化的校验器
                val['regex'] = str(v.regex.pattern)
            elif type(v) == validators.Length or type(v) == validate.Length:
                val['min'] = v.min
                val['max'] = v.max
            elif type(v) == validators.NumberRange or type(v) == validate.Range:
                val['min'] = v.min
                val['max'] = v.max
            else:
                pass

            local_validators.append(val)
        ret['validators'] = local_validators

        # 统一规范前端type和选择时value
        # 选择器
        # Standardize select/enum fields.
        if ret.get('type', '') in [
            'QuerySelect',
            'Select',
            'Related',
            'MySelectMultiple',
            'SelectMultiple',
            'Enum',
        ]:
            choices = ret.get('choices', [])
            values = ret.get('values', [])
            # Convert choices tuple to a list of dicts.
            if choices:
                values = []
                for choice in choices:
                    if choice and len(choice) == 2:
                        values.append({'id': choice[0], 'value': choice[1]})
            ret['values'] = values
            # Determine the UI type (select or multi-select).
            if not ret.get('ui-type', ''):
                ret['ui-type'] = 'select2' if 'SelectMultiple' in ret['type'] else 'select'

        # 字符串
        # Standardize string/text fields.
        if ret.get('ui-type', '') not in ['list', 'datePicker']:  # list,datePicker 类型，保持原样
            if ret.get('type', '') in [
                'String',
            ]:
                if ret.get('widget', 'BS3Text') == 'BS3Text':
                    ret['ui-type'] = 'input'
                else:
                    ret['ui-type'] = 'textArea'

        # 长文本输入
        if 'text' in ret.get('type', '').lower():
            ret['ui-type'] = 'textArea'
        if 'varchar' in ret.get('type', '').lower():
            ret['ui-type'] = 'input'

        # bool类型
        # Standardize boolean fields.
        if 'boolean' in ret.get('type', '').lower():
            ret['ui-type'] = 'radio'
            ret['values'] = [
                {
                    'id': True,
                    'value': 'yes',
                },
                {
                    'id': False,
                    'value': 'no',
                },
            ]
            ret['default'] = True if ret.get('default', 0) else False

        # 处理正则自动输入
        # Handle special default values that indicate a pattern-matched input.
        default = ret.get('default', None)
        if default and re.match(r'\$\{.*\}', str(default)):
            ret['ui-type'] = 'match-input'

        return ret

    # @pysnooper.snoop(watch_explode=('field_contents'))
    # A detailed helper to get all relevant information about a single Marshmallow field.
    def _get_field_info(self, field, filter_rel_field, page=None, page_size=None):
        """
            Return a dict with field details
            ready to serve as a response

        :param field: marshmallow field
        :return: dict with field details
        """
        ret = {}
        ret['name'] = field.name
        # print(ret["name"])
        # print(type(field))
        # print(field)

        # 根据数据库信息添加
        # Get information from the underlying SQLAlchemy model column.
        if self.datamodel:
            list_columns = self.datamodel.list_columns  # 只有数据库存储的字段，没有外键字段
            if field.name in list_columns:
                column = list_columns[field.name]
                default = column.default
                # print(type(column.type))
                column_type = column.type
                # aa=column_type
                column_type_str = str(column_type.__class__.__name__)
                # For Enum types, extract the possible values.
                if column_type_str == 'Enum':
                    ret['values'] = [{'id': x, 'value': x} for x in column.type.enums]
                # print(column_type)
                # if type(column_type)==
                # print(column.__class__.__name__)
                ret['type'] = column_type_str
                if default:
                    ret['default'] = default.arg

                # print(column)
                # print(column.type)
                # print(type(column))
                # from sqlalchemy.sql.schema import Column
                # # if column
        # Set the 'remember' flag for fields that should be cached on the client.
        if field.name in self.remember_columns:
            ret['remember'] = True
        else:
            ret['remember'] = False
        # Get label and description.
        ret['label'] = _(self.label_columns.get(field.name, ''))
        ret['description'] = _(self.description_columns.get(field.name, ''))
        # Get validators.
        if field.validate and isinstance(field.validate, list):
            ret['validators'] = list(field.validate)
        elif field.validate:
            ret['validators'] = [field.validate]
        else:
            ret['validators'] = []

        # Handles related fields
        # For related fields, fetch the list of possible values.
        if isinstance(field, Related) or isinstance(field, RelatedList):
            ret['count'], ret['values'] = self._get_list_related_field(
                field, filter_rel_field, page=page, page_size=page_size
            )
            # Add a 'required' validator for related fields.
            ret['validators'].append(validators.DataRequired())

        # 如果是外键，都加必填
        # if

        # 对于非数据库中字段使用字段信息描述类型
        # Set basic properties from the Marshmallow field.
        ret['type'] = field.__class__.__name__ if 'type' not in ret else ret['type']
        ret['required'] = field.required
        ret['unique'] = getattr(field, 'unique', False)
        # When using custom marshmallow schemas fields don't have unique property

        # 根据edit_form_extra_fields来确定
        # Override properties with information from custom extra fields.
        if self.edit_form_extra_fields:
            if field.name in self.edit_form_extra_fields:
                column_field = self.edit_form_extra_fields[field.name]
                column_field_kwargs = column_field.kwargs
                # type 类型 EnumField   values
                # aa = column_field
                ret['type'] = column_field.field_class.__name__.replace('Field', '')
                # ret['description']=column_field_kwargs.get('description','')
                ret['description'] = self.description_columns.get(
                    field.name, column_field_kwargs.get('description', '')
                )
                ret['label'] = self.label_columns.get(
                    field.name, column_field_kwargs.get('label', '')
                )
                ret['default'] = column_field_kwargs.get('default', '')
                ret['validators'] = column_field_kwargs.get('validators', ret['validators'])
                ret['choices'] = column_field_kwargs.get('choices', [])
                # Extract information from the widget.
                if 'widget' in column_field_kwargs:
                    ret['widget'] = (
                        column_field_kwargs['widget']
                        .__class__.__name__.replace('Widget', '')
                        .replace('Field', '')
                        .replace('My', '')
                    )
                    # 处理禁止编辑
                    if (
                        hasattr(column_field_kwargs['widget'], 'readonly')
                        and column_field_kwargs['widget'].readonly
                    ):
                        ret['disable'] = True
                    # 处理重新拉取info
                    if (
                        hasattr(column_field_kwargs['widget'], 'retry_info')
                        and column_field_kwargs['widget'].retry_info
                    ):
                        ret['retry_info'] = True

                    # 处理选填类型
                    if (
                        hasattr(column_field_kwargs['widget'], 'can_input')
                        and column_field_kwargs['widget'].can_input
                    ):
                        ret['ui-type'] = 'input-select'

                    # 处理时间类型
                    if (
                        hasattr(column_field_kwargs['widget'], 'is_date')
                        and column_field_kwargs['widget'].is_date
                    ):
                        ret['ui-type'] = 'datePicker'
                    # 处理时间类型
                    if (
                        hasattr(column_field_kwargs['widget'], 'is_date_range')
                        and column_field_kwargs['widget'].is_date_range
                    ):
                        ret['ui-type'] = 'rangePicker'

                    # 处理扩展字段，一个字段存储一个list的值
                    # Handle expandable fields that represent a list of sub-fields.
                    if (
                        hasattr(column_field_kwargs['widget'], 'expand_filed')
                        and column_field_kwargs['widget'].expand_filed
                    ):
                        print(field.name)
                        ret['ui-type'] = 'list'
                        ret['info'] = self.columnsfield2info(
                            column_field_kwargs['widget'].expand_filed
                        )

                    # 处理内容自动填充可取值，对于那种配置使用过往记录作为可选值的参数进行处理
                    if (
                        hasattr(column_field_kwargs['widget'], 'conten2choices')
                        and column_field_kwargs['widget'].conten2choices
                    ):
                        pass

        # 补充数据库model中定义的是否必填
        # Add a 'required' validator if the database column is not nullable.
        columns = [
            column
            for column in self.datamodel.obj.__table__._columns
            if column.name == field.name and hasattr(column, 'nullable') and not column.nullable
        ]
        if columns:
            if 'validators' in ret:
                ret['validators'].append(validators.DataRequired())
            else:
                ret['validators'] = [validators.DataRequired()]

        # print(ret)
        # Final standardization of the UI info.
        ret = self.make_ui_info(ret)

        return ret

    # @pysnooper.snoop()
    # A helper to get detailed info for a list of columns.
    def _get_fields_info(self, cols, model_schema, filter_rel_fields, **kwargs):
        """
            Returns a dict with fields detail
            from a marshmallow schema

        :param cols: list of columns to show info for
        :param model_schema: Marshmallow model schema
        :param filter_rel_fields: expects add_query_rel_fields or
                                    edit_query_rel_fields
        :param kwargs: Receives all rison arguments for pagination
        :return: dict with all fields details
        """
        ret = []
        # Iterate over the specified columns.
        for col in cols:
            page = page_size = None
            # Get any column-specific arguments (e.g., for paginating related fields).
            col_args = kwargs.get(col, {})
            if col_args:
                page = col_args.get(API_PAGE_INDEX_RIS_KEY, None)
                page_size = col_args.get(API_PAGE_SIZE_RIS_KEY, None)

            page_size = 1000
            # Get the detailed info for the current field.
            ret.append(
                self._get_field_info(
                    model_schema.fields[col],
                    filter_rel_fields.get(col, []),
                    page=page,
                    page_size=page_size,
                )
            )
        return ret

    # A helper to query and retrieve values for a related field (for dropdowns, etc.).
    def _get_list_related_field(self, field, filter_rel_field, page=None, page_size=None):
        """
            Return a list of values for a related field

        :param field: Marshmallow field
        :param filter_rel_field: Filters for the related field
        :param page: The page index
        :param page_size: The page size
        :return: (int, list) total record count and list of dict with id and value
        """
        ret = []
        if isinstance(field, Related) or isinstance(field, RelatedList):
            # Get the datamodel for the related model.
            datamodel = self.datamodel.get_related_interface(field.name)
            filters = datamodel.get_filters(datamodel.get_search_columns_list())
            page, page_size = self._sanitize_page_args(page, page_size)
            # Get any custom ordering for the related field.
            order_field = self.order_rel_fields.get(field.name)
            if order_field:
                order_column, order_direction = order_field
            else:
                order_column, order_direction = '', ''
            # Apply any provided filters.
            if filter_rel_field:
                filters = filters.add_filter_list(filter_rel_field)
            # Query the related model.
            count, values = datamodel.query(
                filters, order_column, order_direction, page=page, page_size=page_size
            )
            # Format the results into a list of {id, value} dicts.
            for value in values:
                ret.append({'id': datamodel.get_pk_value(value), 'value': str(value)})
        return count, ret

    # A helper to merge incoming update data with an existing model item.
    def _merge_update_item(self, model_item, data):
        """
            Merge a model with a python data structure
            This is useful to turn PUT method into a PATCH also
        :param model_item: SQLA Model
        :param data: python data structure
        :return: python data structure
        """
        # Serialize the existing item to a dictionary.
        data_item = self.edit_model_schema.dump(model_item, many=False).data
        # For any field present in the existing item but not in the incoming data,
        # copy the existing value. This makes the PUT behave like a PATCH.
        for _col in data_item:
            if _col not in data.keys():
                data[_col] = data_item[_col]
        return data
