--- START OF FILE view_pipeline_v2.py ---

# Import standard library modules.
import datetime
import json
import logging
import re
import time
import uuid

# Import Flask and Flask-AppBuilder components.
from flask import (
    g,
    request,
)
from flask_appbuilder.models.sqla.interface import SQLAInterface
# Import Marshmallow for data validation and serialization.
from marshmallow import ValidationError
# Import SQLAlchemy for database operations.
from sqlalchemy import and_
from sqlalchemy.exc import IntegrityError
# Import YAML for processing pipeline files.
import yaml

# Import custom application components.
from myapp.apis.base import expose
from myapp.app import app, appbuilder, db
from myapp.const.error import CommonErrorCode
from myapp.const.response import BizError, wrap_response
# Import database models.
from myapp.models.model_job import Pipeline, Workflow
from myapp.models.model_reserve_template import ReserveTemplate
# Import Elasticsearch client for log querying.
from myapp.third.es.es import query
# Import utility functions from other view modules.
from myapp.views.view_pipeline import Pipeline_ModelView_Base, dag_to_pipeline, run_pipeline
from myapp.views.view_workflow import Workflow_ModelView_Base

# Import base classes for API and filtering.
from .base import (
    MyappFilter,
    json_response,
)
from .baseApi import MyappModelRestApi


# Get the application configuration.
conf = app.config


# A custom filter for the Pipeline model to apply security and ownership rules.
class Pipeline_Filter(MyappFilter):
    # The apply method modifies the query based on user roles.
    def apply(self, query, func):
        # Get the current user's roles.
        user_roles = [role.name.lower() for role in list(self.get_user_roles())]
        # If the user is an admin, show all pipelines (filtered by a naming convention).
        if 'admin' in user_roles:
            return query.filter(self.model.name.contains('pipeline-'))

        # For non-admin users, only show pipelines they created.
        return query.filter(
            and_(self.model.created_by_fk == g.user.id, self.model.name.contains('pipeline-'))
        )


# This class provides the v2 REST API for the Pipeline model.
class Pipeline_ModelView_Api_V2(Pipeline_ModelView_Base, MyappModelRestApi):
    # Set the data model to Pipeline using the SQLAlchemy interface.
    datamodel = SQLAInterface(Pipeline)
    # Define the base route for this API.
    route_base = '/pipeline_modelview_v2/api'
    # Specify the columns to be returned in the list view.
    list_columns = [
        'name',
        'pipeline_url',
        'id',
        'describe',
        'changed_on',
        'creator',
        'display_name',
        'template_id',
    ]
    # Specify the columns available for adding a new pipeline.
    add_columns = ['name', 'describe', 'template_id', 'display_name']
    # Specify the columns available for editing a pipeline.
    edit_columns = ['pipeline_url', 'id', 'describe', 'modified', 'creator']
    # Specify the columns to be returned in the detail/show view.
    show_columns = [
        'describe',
        'pipeline_url',
        'depends_on_past',
        'max_active_runs',
        'parallelism',
        'dag_json',
        'global_env',
        'parameter',
    ]
    # Specify the columns that are searchable.
    search_columns = [
        'id',
        'created_by',
        'name',
        'describe',
        'schedule_type',
        'project',
        'display_name',
        'template_id',
    ]
    # Apply the custom Pipeline_Filter to all queries for this view.
    base_filters = [['id', Pipeline_Filter, lambda: []]]

    # Hook to pre-process incoming JSON data before deserialization.
    def pre_json_load(self, req_json):
        # If the request contains a 'name', copy it to 'display_name' for user-friendliness.
        if req_json and 'name' in req_json:
            req_json['display_name'] = req_json['name']
        return req_json

    # Hook that runs before a new pipeline item is added to the database.
    def pre_add(self, item):
        """
        param item: a pipeline record
        """
        # Check if a pipeline with the same display name already exists for the current user.
        check_name = (
            db.session.query(Pipeline)
            .filter_by(display_name=item.name)
            .filter_by(created_by_fk=g.user.id)
            .first()
        )
        if check_name:
            # If it exists, raise an integrity error to prevent duplicates.
            raise IntegrityError(orig='名称不能重复', params='', statement='')
        # Generate a unique internal name for the pipeline.
        name = f'p{item.id}-taichu-{g.user.username}-{round(time.time() * 1000)}'.replace('_', '-')
        # Ensure the generated name is valid, otherwise use a UUID.
        if not re.search('^[A-Za-z0-9_-]{1,50}$', name):
            name = str(uuid.uuid4())
        # Set a default project ID if none is provided.
        if not item.project_id:
            item.project_id = 5
        # Set the display name from the user-provided name.
        item.display_name = item.name
        # Set the internal name.
        item.name = name
        # If the pipeline is being created from a template.
        if item.template_id and item.template_id > 0:
            describe = item.describe
            # Query the template from the database.
            template = db.session.query(ReserveTemplate).filter_by(id=item.template_id).first()
            # Create a new Pipeline object by copying properties from the template.
            pipeline = Pipeline(
                name=name,
                display_name=item.name,
                project_id=template.project_id,
                dag_json=template.dag_json,
                describe=describe,
                namespace=template.namespace,
                global_env=template.global_env,
                schedule_type='once',
                cron_time=template.cron_time,
                pipeline_file='',
                pipeline_argo_id=template.pipeline_argo_id,
                node_selector=template.node_selector,
                image_pull_policy=template.image_pull_policy,
                parallelism=template.parallelism,
                alert_status='',
                expand=template.expand,
                parameter=template.parameter,
            )
            # Set creation and modification timestamps.
            pipeline.created_on = datetime.datetime.now()
            pipeline.changed_on = datetime.datetime.now()
            # Replace the original item with the new pipeline object created from the template.
            item = pipeline

    # Expose a custom endpoint for adding a pipeline. This overrides the default FAB add behavior.
    @expose('/', methods=['POST'])
    @wrap_response
    def api_add(self):
        try:
            # Load and validate the incoming JSON data using the Marshmallow schema.
            json_data = request.json
            item = self.add_model_schema.load(json_data)
        except ValidationError:
            raise BizError(CommonErrorCode.INVALID_INPUT_PARAM, '参数缺失')
        # Check for duplicate display names for the current user.
        check_name = (
            db.session.query(Pipeline)
            .filter_by(display_name=item.data.name)
            .filter_by(created_by_fk=g.user.id)
            .first()
        )
        if check_name:
            raise BizError(CommonErrorCode.INVALID_INPUT_PARAM, '名称不能重复')
        # Generate a unique internal name for the pipeline.
        name = f'pipeline-taichu-{g.user.username}-{round(time.time() * 1000)}'.replace('_', '-')
        # Fallback to a UUID-based name if the generated name is invalid.
        if not re.search('^[A-Za-z0-9_-]{1,50}$', name):
            name = f'pipeline-{str(uuid.uuid4())}'
        # Set a default project ID.
        if not item.data.project_id:
            item.data.project_id = 5
        # Set display name and internal name.
        item.data.display_name = item.data.name
        item.data.name = name
        # If creating from a template.
        if item.data.template_id and item.data.template_id > 0:
            # Use a helper method to copy the pipeline from the template.
            pipeline_instance = Pipeline_ModelView_Base()
            new_pipeline = pipeline_instance.copy_pipeline_internal(item.data.template_id)
            # Update the display name and internal name.
            new_pipeline.display_name = item.data.display_name
            new_pipeline.name = name
            db.session.commit()
            # Return the JSON representation of the new pipeline.
            return new_pipeline.to_json()
        else:
            # If not from a template, add the new item directly.
            self.datamodel.add(item.data)
            # Serialize the newly created item for the response.
            result_data = self.add_model_schema.dump(item.data, many=False).data
            result_data[self.primary_key] = self.datamodel.get_pk_value(item.data)
            return result_data

    # Expose an endpoint to trigger a pipeline run.
    @expose('/run_pipeline/<pipeline_id>', methods=['GET'])
    # @check_pipeline_perms
    def run_pipeline(self, pipeline_id, **kwargs):
        logging.info(f'[Pipeline_ModelView_Api_V2][run_pipeline] pipeline_id: {pipeline_id}')
        logging.info(f'[Pipeline_ModelView_Api_V2][run_pipeline] kwargs: {kwargs}')
        # Retrieve the pipeline from the database.
        pipeline = db.session.query(Pipeline).filter_by(id=pipeline_id).first()

        # Clean up old workflow tasks associated with this pipeline.
        pipeline.delete_old_task()

        time.sleep(1)
        # Get the current state of the workflow from the underlying execution engine (e.g., Argo).
        back_crds = pipeline.get_workflow()

        # 把消息加入到源数据库
        # Sync the workflow status back into the application's database.
        for crd in back_crds:
            try:
                workflow = db.session.query(Workflow).filter_by(name=crd['name']).first()
                if not workflow:
                    # If the workflow doesn't exist in our DB, create it.
                    username = ''
                    labels = json.loads(crd['labels'])
                    if 'run-rtx' in labels:
                        username = labels['run-rtx']
                    elif 'upload-rtx' in labels:
                        username = labels['upload-rtx']

                    workflow = Workflow(
                        name=crd['name'],
                        namespace=crd['namespace'],
                        create_time=crd['create_time'],
                        status=crd['status'],
                        annotations=crd['annotations'],
                        labels=crd['labels'],
                        spec=crd['spec'],
                        status_more=crd['status_more'],
                        username=username,
                    )
                    db.session.add(workflow)
                    db.session.commit()
            except Exception as e:
                logging.exception(e)

        # 这里直接删除所有的历史任务流，正在运行的也删除掉
        # Delete the underlying Kubernetes/Argo resources for previous runs.
        self.delete_bind_crd(back_crds)

        # self.delete_workflow(pipeline)
        # Generate the new workflow definition (YAML/JSON) from the pipeline's DAG structure.
        pipeline.pipeline_file = dag_to_pipeline(pipeline, db.session, kind='dag')  # 合成workflow
        pipeline_json = yaml.safe_load(pipeline.pipeline_file)
        # Generate a unique name for this specific run.
        pipeline_json['metadata']['name'] = (
            pipeline_json['metadata']['generateName'] + uuid.uuid4().hex[:5]
        )
        pipeline_json['metadata'].pop('generateName', None)

        # 运行workflow
        # Submit the new workflow definition to the cluster for execution.
        logging.info(f'提交任务流: {pipeline.name}')
        crd_name = pipeline_json.get('metadata', {}).get('name', '')
        run_pipeline(pipeline.project.cluster, pipeline_json)
        # Update the pipeline record with the ID of the new Argo workflow.
        pipeline.pipeline_argo_id = crd_name
        db.session.commit()  # 更新

        # Return a success response with the updated pipeline data.
        back_data = {'status': 0, 'message': 'success', 'data': {'pipeline': pipeline.to_json()}}
        return self.response(200, **back_data)

    # Expose a custom endpoint for editing a pipeline.
    @expose('/edit/<int:pk>', methods=['POST'])
    # @pysnooper.snoop(watch_explode=('item','data'))
    def api_edit(self, pk):
        # Retrieve the item to be edited.
        item = self.datamodel.get(pk, self._base_filters)
        self.src_item_json = item.to_json()
        try:
            # Perform custom permission checks if defined.
            if self.check_edit_permission:
                has_permission = self.check_edit_permission(item)
                if not has_permission:
                    return json_response(message='no permission to edit', status=1, result={})

        except Exception as e:
            print(e)
            return json_response(message='check edit permission' + str(e), status=1, result={})

        # Validate request format and item existence.
        if not request.is_json:
            return self.response_error(400, message='Request is not JSON')
        if not item:
            return self.response_error(404, message='Not found')
        try:
            # Pre-process the incoming JSON data.
            if self.pre_json_load:
                json_data = self.pre_json_load(request.json)
            else:
                json_data = request.json
            # Manually update the attributes of the item from the request data.
            for k in json_data:
                if hasattr(item, k):
                    setattr(item, k, json_data[k])
            # The following lines are commented out, indicating a switch from schema-based loading to manual attribute setting.
            # data = self._merge_update_item(item, json_data)
            # item = self.edit_model_schema.load(data, instance=item)
        except ValidationError as err:
            return self.response_error(422, message=err.messages)
        # This validates custom Schema with custom validations
        # if isinstance(item.data, dict):
        #     return self.response_error(422, message=item.errors)
        # self.pre_update(item.data)

        try:
            # Save the updated item.
            self.datamodel.edit(item, raise_exception=True)
            db.session.commit()
            back_data = {'status': 0, 'message': 'success'}
            return self.response(
                200,
                **back_data,
            )
        except IntegrityError as e:
            # Handle database integrity errors (e.g., unique constraints).
            return self.response_error(422, message=str(e.orig))

    # Expose an endpoint to get the full details of a pipeline.
    @expose('/details/<int:id>', methods=['GET'])
    # @pysnooper.snoop(watch_explode=('item', 'json_data'))
    def api_details(self, id):
        # Retrieve the item from the database.
        item = self.datamodel.get(id, self._base_filters)
        if item is None:
            return self.response_error(422, message=f'id={id} not exists')
        try:
            # Convert the ORM object to a JSON-serializable dictionary.
            result_data = item.to_json()
            back_data = {'result': result_data, 'status': 0, 'message': 'success'}
            return self.response(
                200,
                **back_data,
            )
        except Exception as e:
            logging.error(e)
            return self.response_error(422, message=str(e.orig))

    # Expose an endpoint to save an existing pipeline as a template.
    @expose('/save_to_template/<int:pk>', methods=['POST'])
    @wrap_response
    def save_to_template(self, pk):
        # Use a helper method to copy the pipeline.
        pipeline_instance = Pipeline_ModelView_Base()
        new_pipeline = pipeline_instance.copy_pipeline_internal(pk)
        # Modify the name to indicate it's a template.
        new_pipeline.display_name = f'预置模板-{new_pipeline.display_name}'
        # Set template_id to -1 to signify it's a base template itself.
        new_pipeline.template_id = -1
        db.session.commit()
        return ''


# Register the v2 Pipeline API with Flask-AppBuilder.
appbuilder.add_api(Pipeline_ModelView_Api_V2)


# This class provides the REST API for managing reusable pipeline templates.
class Reserve_Template(Pipeline_ModelView_Base, MyappModelRestApi):
    # Set the data model to ReserveTemplate.
    datamodel = SQLAInterface(ReserveTemplate)
    # Define the base route for this API.
    route_base = '/reserve_pipeline/api'
    # Configure columns for list, add, edit, and show views.
    list_columns = ['name', 'pipeline_url', 'id', 'describe', 'changed_on']
    add_columns = ['name', 'describe']
    edit_columns = [
        'describe',
        'pipeline_url',
        'depends_on_past',
        'max_active_runs',
        'parallelism',
        'dag_json',
        'global_env',
        'parameter',
    ]
    show_columns = [
        'describe',
        'pipeline_url',
        'depends_on_past',
        'max_active_runs',
        'parallelism',
        'dag_json',
        'global_env',
        'parameter',
    ]
    # show_columns = ['name','dag_json', 'describe','namespace','global_env', 'pipeline_file',
    #                 'pipeline_argo_id', 'node_selector', 'image_pull_policy', 'parallelism',
    #                 'alert_status','parameter']

    # Hook that runs before a new template is added.
    def pre_add(self, item):
        # Set a default project ID if not provided.
        if not item.project_id:
            item.project_id = 5

    # Expose the standard add endpoint.
    @expose('/add', methods=['POST'])
    # @pysnooper.snoop(watch_explode=('item', 'json_data'))
    def api_add(self):
        self.src_item_json = {}
        # Basic validation and JSON loading.
        if not request.is_json:
            return self.response_error(400, message='Request is not JSON')
        try:
            if self.pre_json_load:
                json_data = self.pre_json_load(request.json)
            else:
                json_data = request.json

            item = self.add_model_schema.load(json_data)
            # item = self.add_model_schema.load(data)
        except ValidationError as err:
            return self.response_error(422, message=err.messages)
        # This validates custom Schema with custom validations
        if isinstance(item.data, dict):
            return self.response_error(422, message=item.errors)
        try:
            # Run pre-add logic, add the item, run post-add logic, and return the result.
            self.pre_add(item.data)
            self.datamodel.add(item.data, raise_exception=True)
            self.post_add(item.data)
            result_data = self.add_model_schema.dump(item.data, many=False).data
            result_data[self.primary_key] = self.datamodel.get_pk_value(item.data)
            back_data = {'result': result_data, 'status': 0, 'message': 'success'}
            return self.response(
                200,
                **back_data,
            )
        except IntegrityError as e:
            return self.response_error(422, message=str(e.orig))

    # Expose an endpoint to create a new Pipeline instance from a template.
    @expose('/add_workflow/<int:id>', methods=['GET'])
    # @pysnooper.snoop(watch_explode=('item', 'json_data'))
    def api_add_workflow(self, id):
        # Retrieve the template.
        template = self.datamodel.get(id, self._base_filters)
        if not isinstance(template, ReserveTemplate):
            return self.response_error(422, message=f'id={id} not exits')
        try:
            # Create a new Pipeline object by copying attributes from the template.
            pipeline = Pipeline(
                name=template.name.replace('_', '-'),
                project_id=template.project_id,
                dag_json=template.dag_json,
                describe=template.describe,
                namespace=template.namespace,
                global_env=template.global_env,
                schedule_type='once',
                cron_time=template.cron_time,
                pipeline_file='',
                pipeline_argo_id=template.pipeline_argo_id,
                node_selector=template.node_selector,
                image_pull_policy=template.image_pull_policy,
                parallelism=template.parallelism,
                alert_status='',
                expand=template.expand,
                parameter=template.parameter,
            )
            # Generate a unique name for the new pipeline instance.
            pipeline.name = pipeline.name.replace('_', '-') + '-' + uuid.uuid4().hex[:4]
            # Set timestamps.
            pipeline.created_on = datetime.datetime.now()
            pipeline.changed_on = datetime.datetime.now()
            # Save the new pipeline to the database.
            db.session.add(pipeline)
            db.session.commit()
            back_data = {'status': 0, 'message': 'success'}
            return self.response(
                200,
                **back_data,
            )
            # return redirect("/pipeline_modelview_v2/api")
        except Exception as e:
            logging.error(e)
            return self.response_error(422, message=str(e.orig))

    # Expose a custom edit endpoint for templates.
    @expose('/edit/<int:pk>', methods=['POST'])
    # @pysnooper.snoop(watch_explode=('item','data'))
    def api_edit(self, pk):
        # This method has similar logic to the Pipeline's api_edit method.
        item = self.datamodel.get(pk, self._base_filters)
        self.src_item_json = item.to_json()

        # if self.check_redirect_list_url:
        try:
            if self.check_edit_permission:
                has_permission = self.check_edit_permission(item)
                if not has_permission:
                    return json_response(message='no permission to edit', status=1, result={})

        except Exception as e:
            print(e)
            return json_response(message='check edit permission' + str(e), status=1, result={})

        if not request.is_json:
            return self.response_error(400, message='Request is not JSON')
        if not item:
            return self.response_error(404, message='Not found')
        try:
            if self.pre_json_load:
                json_data = self.pre_json_load(request.json)
            else:
                json_data = request.json
            for k in json_data:
                if hasattr(item, k):
                    setattr(item, k, json_data[k])
            # data = self._merge_update_item(item, json_data)
            # item = self.edit_model_schema.load(data, instance=item)
        except ValidationError as err:
            return self.response_error(422, message=err.messages)
        # This validates custom Schema with custom validations
        # if isinstance(item.data, dict):
        #     return self.response_error(422, message=item.errors)
        # self.pre_update(item.data)

        try:
            self.datamodel.edit(item, raise_exception=True)
            db.session.commit()
            back_data = {'status': 0, 'message': 'success'}
            return self.response(
                200,
                **back_data,
            )
        except IntegrityError as e:
            return self.response_error(422, message=str(e.orig))

    # Expose a detail endpoint for templates.
    @expose('/details/<int:id>', methods=['GET'])
    # @pysnooper.snoop(watch_explode=('item', 'json_data'))
    def api_details(self, id):
        template = self.datamodel.get(id, self._base_filters)
        if template is None:
            return self.response_error(422, message=f'id={id} not exists')
        try:
            result_data = template.to_json()
            back_data = {'result': result_data, 'status': 0, 'message': 'success'}
            return self.response(
                200,
                **back_data,
            )
        except Exception as e:
            logging.error(e)
            return self.response_error(422, message=str(e.orig))

    # Expose a delete endpoint for templates.
    @expose('delete/<int:id>', methods=['GET'])
    def api_remove(self, id):
        item = self.datamodel.get(id, self._base_filters)
        if not item:
            return self.response_error(404, message='Not found')
        try:
            self.datamodel.delete(item, raise_exception=True)
            back_data = {'status': 0, 'message': 'success', 'result': item.to_json()}
            return self.response(200, **back_data)
        except Exception as e:
            return self.response_error(422, message=str(e))


# Register the Reserve Template API.
appbuilder.add_api(Reserve_Template)


# This class provides the v2 REST API for the Workflow model (pipeline runs).
class Workflow_ModelView_Api_V2(Workflow_ModelView_Base, MyappModelRestApi):
    # Set the data model to Workflow.
    datamodel = SQLAInterface(Workflow)
    # Define the base route.
    route_base = '/workflow_modelview_v2/api'
    # Configure columns for list and show views.
    list_columns = [
        'id',
        'create_time',
        'change_time',
        'elapsed_time',
        'final_status',
    ]
    show_columns = [
        'name',
        'namespace',
        'create_time',
        'change_time',
        'status',
        'annotations',
        'labels',
        'spec',
        'status_more',
        'info_json',
        'algorithm',
    ]

    # A simple helper method for checking if required parameters exist.
    def check(self, **kwargs):
        for arg in kwargs.keys():
            if kwargs[arg] is None:
                return False, f'expect parameter:{arg}, but get none.'
        return True, 'check success'

    # Expose an endpoint to get the detailed status of a workflow run.
    @expose('/detail/<int:workflow_id>', methods=['POST'])
    @wrap_response
    def detail(self, workflow_id):
        # Retrieve the workflow from the database.
        workflow = db.session.query(Workflow).filter_by(id=workflow_id).first()
        if not workflow:
            raise BizError(CommonErrorCode.NOT_FOUND, '实例不存在')
        # If the workflow is still running, sync its status from the live cluster.
        if workflow.final_status in ['Pending', 'Running']:
            snapshot = json.loads(workflow.snapshot)
            crd = workflow.get_workflow() or {}
            workflow_manifest = json.loads(crd.get('status_more', ''))
            pods = {}
            nodes = workflow_manifest.get('nodes', {})
            # Extract pod statuses from the workflow manifest.
            for node_name in nodes:
                if nodes[node_name]['type'] == 'Pod':
                    pods[node_name] = nodes[node_name]
            # Update the status in the stored DAG snapshot.
            for pod_name in pods:
                task_name = pods[pod_name]['displayName']
                db_task_name = task_name[: task_name.index('(')] if '(' in task_name else task_name
                snapshot['dag_json'][db_task_name]['status'] = pods[pod_name]['phase']
            # Save the updated snapshot back to the database.
            workflow.snapshot = json.dumps(snapshot, indent=4, ensure_ascii=False)
            db.session.commit()
        # Return the full workflow data.
        return workflow.to_json()

    # Expose an endpoint to retrieve logs for a workflow run.
    @expose('/log/<int:workflow_id>', methods=['POST'])
    def log(self, workflow_id):
        # Retrieve the workflow.
        workflow = db.session.query(Workflow).filter_by(id=workflow_id).first()
        json_data = request.json
        logging.debug(f'[Workflow_ModelView_Api_V2][POST][log] request: {json_data}')
        # Construct an Elasticsearch query to find logs for this workflow.
        keyword_list = [
            {'match': {'kubernetes.namespace': 'pipeline'}},
            {'match': {'kubernetes.container.name': 'main'}},
            {'match': {'kubernetes.labels.workflows_argoproj_io/workflow': workflow.name}},
        ]
        # Add a filter for a specific task if requested.
        if 'task_name' in json_data:
            keyword_list.append({'match': {'kubernetes.labels.task-name': json_data['task_name']}})
        # Set the time range for the log query.
        start_timestamp = datetime.datetime.strptime(workflow.create_time, '%Y-%m-%d %H:%M:%S')
        start_timestamp -= datetime.timedelta(hours=8)
        # Execute the Elasticsearch query.
        result, hit, count = query(
            keyword_list=keyword_list, start_timestamp=start_timestamp, size=10000
        )
        # If no logs are found, return a placeholder message.
        if hit == 0:
            current_time = datetime.datetime.now()
            result = [
                {
                    'id': 'il4lKYgB9OSbaxQge0i6',
                    'message': '未查询到日志',
                    'timestamp': f"{current_time.strftime('%Y-%m-%dT%H:%M:%S.%f')[:-3]}Z",
                    'sort': [round(current_time.timestamp() * 1000)],
                }
            ]
            hit = 1
        # Return the logs in a standard response format.
        return self.response(
            200, **{'status': 0, 'result': {'data': result, 'total': hit}, 'message': 'success'}
        )

    # Expose an endpoint to find a workflow by its KFP run ID label.
    @expose('/get', methods=['POST'])
    def get_by_run_id(self):
        json_data = request.json
        logging.debug(f'[Workflow_ModelView_Api_V2][POST][get_by_run_id] request: {json_data}')

        namespace = json_data.get('namespace', None)
        run_id = json_data.get('run_id', None)

        # Query all workflows in the given namespace.
        workflows = db.session.query(Workflow).filter_by(namespace=namespace).all()
        logging.debug(f'workflows len: {len(workflows)} ')
        # Iterate and check the labels of each workflow.
        for workflow in workflows:
            label = json.loads(workflow.labels)
            if 'pipeline/runid' in label.keys():
                if label['pipeline/runid'] == run_id:
                    # If a match is found, return the workflow data.
                    back_data = {'status': 0, 'message': 'success', 'result': workflow.to_json()}
                    return self.response(200, **back_data)

        # Return an error if no match is found.
        return self.response_error(
            422, message=f"can't found workflow record by namespace:{namespace} run_id:{run_id}"
        )

    # Expose an endpoint to stop a running workflow.
    @expose('/stop/<int:id>', methods=['GET'])
    def stop(self, id):
        # Retrieve the workflow.
        workflow = db.session.query(self.datamodel.obj).filter_by(id=id).first()
        logging.debug(f'[Workflow_ModelView_Api_V2][GET][stop] id: {id}, workflow: {workflow}')
        if not workflow:
            return self.response_error(404, message='Not found')

        try:
            # Call the helper method to delete the underlying Kubernetes/Argo resource.
            self.delete_workflow(workflow)
            back_data = {'status': 0, 'message': 'success', 'result': workflow.to_json()}
            return self.response(200, **back_data)
        except Exception as e:
            logging.exception(e)
            return self.response_error(422, message=str(e))


# Register the v2 Workflow API.
appbuilder.add_api(Workflow_ModelView_Api_V2)


# The following is a commented-out section for a Job Template API.
# class Job_Template_ModelView_Api_V2(Job_Template_ModelView_Base, MyappModelRestApi):
#     datamodel = SQLAInterface(Job_Template)
#     route_base = '/job_template_modelview_v2/api'
#     add_columns = [
#         'project',
#         'images',
#         'name',
#         'version',
#         'describe',
#         'workdir',
#         'entrypoint',
#         'volume_mount',
#         'args',
#         'env',
#         'hostAliases',
#         'privileged',
#         'accounts',
#         'expand',
#     ]
#     edit_columns = add_columns
#     # list_columns = ['project','name','version','creator','modified']
#     list_columns = [
#         'project',
#         'name',
#         'version',
#         'describe',
#         'images',
#         'workdir',
#         'entrypoint',
#         'args',
#         'demo',
#         'env',
#         'hostAliases',
#         'privileged',
#         'accounts',
#         'created_by',
#         'changed_by',
#         'created_on',
#         'changed_on',
#         'expand',
#     ]
#     show_columns = [
#         'project',
#         'name',
#         'version',
#         'describe',
#         'images',
#         'workdir',
#         'entrypoint',
#         'args',
#         'demo',
#         'env',
#         'hostAliases',
#         'privileged',
#         'expand',
#     ]
#
#     def post_list(self, items):
#         def sort_expand_index(items, db_session):
#             all = {0: []}
#
#             # 根据不同平台，决定要展示的模版类型
#             if is_private():
#                 show_types = ['common', 'private']
#             else:
#                 show_types = ['common', 'hw-cloud']
#             logging.debug(
#                 f' {show_types}'
#             )
#
#             for item in items:
#                 try:
#                     if item.type in show_types:  # 根据不同平台，显示不同的任务模版
#                         if item.expand:
#                             index = (
#                                 float(json.loads(item.expand).get('index', 0))
#                                 + float(json.loads(item.project.expand).get('index', 0)) * 1000
#                             )
#                             if index:
#                                 if index in all:
#                                     all[index].append(item)
#                                 else:
#                                     all[index] = [item]
#                             else:
#                                 all[0].append(item)
#                         else:
#                             all[0].append(item)
#                     else:
#                         logging.debug(
#                             f'[Job_Template_ModelView_Api_V2][post_list] item: {item}, type:'
#                             f' {item.type}, no show'
#                         )
#                 except Exception as e:
#                     logging.exception(e)
#
#             back = []
#             for index in sorted(all):
#                 back.extend(all[index])
#                 # 当有小数的时候自动转正
#                 # if float(index)!=int(index):
#                 #     pass
#             return back
#
#         return sort_expand_index(items, db.session)
#
#
# appbuilder.add_api(Job_Template_ModelView_Api_V2)
