--- START OF FILE view_pipeline.py ---

# Import standard library modules.
import copy
import datetime
import json
import logging
import time
import urllib.parse
import uuid

# Import components from Flask for handling requests and responses.
from flask import flash, g, make_response, redirect, request
# Import action decorator from Flask-AppBuilder for creating custom actions in views.
from flask_appbuilder.actions import action
# Import various form field widgets from Flask-AppBuilder.
from flask_appbuilder.fieldwidgets import (
    BS3TextAreaFieldWidget,
    BS3TextFieldWidget,
    Select2ManyWidget,
    Select2Widget,
)
# Import utility for converting SQLAlchemy models to forms.
from flask_appbuilder.forms import GeneralModelConverter
# Import the SQLAlchemy data interface.
from flask_appbuilder.models.sqla.interface import SQLAInterface
# Import internationalization functions.
from flask_babel import gettext as __, lazy_gettext as _
# Import SQLAlchemy functions for building queries.
from sqlalchemy import or_
# Import SQLAlchemy exception types.
from sqlalchemy.exc import InvalidRequestError
# Import WTForms fields for creating custom forms.
from wtforms import BooleanField, IntegerField, SelectField, StringField
from wtforms.ext.sqlalchemy.fields import QuerySelectField
# Import WTForms validators.
from wtforms.validators import DataRequired, Length, Regexp
# Import YAML for processing pipeline definitions.
import yaml

# Import application-specific components.
from myapp.apis.base import expose
from myapp.app import app, appbuilder, db, security_manager
from myapp.const.response import BizError, wrap_response
# Import custom form widgets and fields.
from myapp.forms import MyBS3TextAreaFieldWidget, MySelectMultipleField
# Import database models.
from myapp.models.model_job import Pipeline, RunHistory, Workflow
from myapp.models.model_team import Project
# Import the Kubernetes client wrapper.
from myapp.third.k8s import py_k8s
# Import core utility functions.
from myapp.utils import core

# Import business logic for pipeline handling.
from ..biz.pipeline.dag import dag_to_pipeline, run_pipeline
# Import custom error constants.
from ..const.error import CommonErrorCode
# Import base classes and utility functions for views.
from .base import DeleteMixin, MyappFilter, MyappModelView, get_user_roles, json_response
from .baseApi import MyappModelRestApi


# Get the application configuration.
conf = app.config


# A custom filter for the Pipeline model to apply security and ownership rules.
class Pipeline_Filter(MyappFilter):
    # @pysnooper.snoop()
    # The apply method modifies the query based on user roles and project membership.
    def apply(self, query, func):
        # Get the current user's roles.
        user_roles = [role.name.lower() for role in list(self.get_user_roles())]
        # If the user is an admin, they can see all pipelines.
        if 'admin' in user_roles:
            return query

        # For non-admin users, get the list of projects they are a member of.
        join_projects_id = security_manager.get_join_projects_id(db.session)
        # public_project_id =
        # logging.info(join_projects_id)
        # Filter the query to only include pipelines in the user's projects.
        return query.filter(
            or_(
                self.model.project_id.in_(join_projects_id),
                # self.model.project.name.in_(['public'])
            )
        )


# This is a base class containing common configurations for Pipeline views (both UI and API).
class Pipeline_ModelView_Base:
    label_title = '任务流'
    # Set the data model to Pipeline using the SQLAlchemy interface.
    datamodel = SQLAInterface(Pipeline)
    # URL to redirect to after certain actions, fetched from config.
    check_redirect_list_url = conf.get('MODEL_URLS', {}).get('pipeline', '')

    # Define the base permissions for this view.
    base_permissions = ['can_show', 'can_edit', 'can_list', 'can_delete', 'can_add']
    # Set the default sorting order for the list view.
    base_order = ('changed_on', 'desc')
    # order_columns = ['id','changed_on']
    # Specify the columns that can be used for sorting.
    order_columns = ['id']

    # Define the columns to display in the list view.
    list_columns = ['id', 'project', 'pipeline_url', 'creator', 'modified']
    # Define custom widths for specific columns in the list view UI.
    cols_width = {
        'id': {'type': 'ellip2', 'width': 100},
        'project': {'type': 'ellip2', 'width': 200},
        'pipeline_url': {'type': 'ellip2', 'width': 500},
        'modified': {'type': 'ellip2', 'width': 150},
    }
    # Specify the columns to display in the add form.
    add_columns = ['project', 'name', 'describe']
    # Specify the columns to display in the edit form.
    edit_columns = [
        'project',
        'name',
        'describe',
        'schedule_type',
        'cron_time',
        'depends_on_past',
        'max_active_runs',
        'expired_limit',
        'parallelism',
        'global_env',
        'alert_status',
        'alert_user',
        'parameter',
        'cronjob_start_time',
    ]
    # Specify the columns to display in the show/detail view.
    show_columns = [
        'project',
        'name',
        'describe',
        'schedule_type',
        'cron_time',
        'depends_on_past',
        'max_active_runs',
        'expired_limit',
        'parallelism',
        'global_env',
        'dag_json',
        'pipeline_file',
        'pipeline_argo_id',
        'version_id',
        'run_id',
        'created_by',
        'changed_by',
        'created_on',
        'changed_on',
        'expand',
        'parameter',
        'alert_status',
        'alert_user',
        'cronjob_start_time',
    ]
    # Specify the columns that are searchable.
    search_columns = ['id', 'created_by', 'name', 'describe', 'schedule_type', 'project']

    # Apply the custom Pipeline_Filter to all queries for this view.
    base_filters = [['id', Pipeline_Filter, lambda: []]]
    # Initialize the model-to-form converter.
    conv = GeneralModelConverter(datamodel)

    # Define extra fields to customize the add form, overriding the defaults generated from the model.
    add_form_extra_fields = {
        'name': StringField(
            _(datamodel.obj.lab('name')),
            description='英文名(小写字母、数字、- 组成)，最长50个字符',
            widget=BS3TextFieldWidget(),
            validators=[Regexp(r'^[a-z][a-z0-9\-]*[a-z0-9]$'), Length(1, 54), DataRequired()],
        ),
        'describe': StringField(
            _(datamodel.obj.lab('describe')),
            description='中文描述',
            widget=BS3TextFieldWidget(),
            validators=[DataRequired()],
        ),
        'project': QuerySelectField(
            _(datamodel.obj.lab('project')),
            allow_blank=True,
            widget=Select2Widget(),
        ),
        'dag_json': StringField(
            _(datamodel.obj.lab('dag_json')),
            default='{}',
            widget=MyBS3TextAreaFieldWidget(
                rows=10
            ),  # 传给widget函数的是外层的field对象，以及widget函数的参数
        ),
        'namespace': StringField(
            _(datamodel.obj.lab('namespace')),
            description='部署task所在的命名空间(目前无需填写)',
            default='pipeline',
            widget=BS3TextFieldWidget(),
        ),
        'node_selector': StringField(
            _(datamodel.obj.lab('node_selector')),
            description='部署task所在的机器(目前无需填写)',
            widget=BS3TextFieldWidget(),
            default=datamodel.obj.node_selector.default.arg,
        ),
        'image_pull_policy': SelectField(
            _(datamodel.obj.lab('image_pull_policy')),
            description='镜像拉取策略(always为总是拉取远程镜像，IfNotPresent为若本地存在则使用本地镜像)',
            widget=Select2Widget(),
            default='Always',
            choices=[['Always', 'Always'], ['IfNotPresent', 'IfNotPresent']],
        ),
        'depends_on_past': BooleanField(
            _(datamodel.obj.lab('depends_on_past')),
            description='任务运行是否依赖上一次的示例状态',
            default=True,
        ),
        'max_active_runs': IntegerField(
            _(datamodel.obj.lab('max_active_runs')),
            description='当前pipeline可同时运行的任务流实例数目',
            widget=BS3TextFieldWidget(),
            default=1,
            validators=[DataRequired()],
        ),
        'expired_limit': IntegerField(
            _(datamodel.obj.lab('expired_limit')),
            description='定时调度最新实例限制数目，0表示不限制',
            widget=BS3TextFieldWidget(),
            default=1,
            validators=[DataRequired()],
        ),
        'parallelism': IntegerField(
            _(datamodel.obj.lab('parallelism')),
            description='一个任务流实例中可同时运行的task数目',
            widget=BS3TextFieldWidget(),
            default=3,
            validators=[DataRequired()],
        ),
        'global_env': StringField(
            _(datamodel.obj.lab('global_env')),
            description=(
                '公共环境变量会以环境变量的形式传递给每个task，可以配置多个公共环境变量，'
                '每行一个，支持datetime/creator/runner/uuid/pipeline_id等变量'
                ' 例如：USERNAME={{creator}}'
            ),
            widget=BS3TextAreaFieldWidget(),
        ),
        'schedule_type': SelectField(
            _(datamodel.obj.lab('schedule_type')),
            default='once',
            description='调度类型，once仅运行一次，crontab周期运行，crontab配置保存一个小时候后才生效',
            widget=Select2Widget(),
            choices=[['once', 'once'], ['crontab', 'crontab']],
        ),
        'cron_time': StringField(
            _(datamodel.obj.lab('cron_time')),
            description='周期任务的时间设定 * * * * * 表示为 minute hour day month week',
            widget=BS3TextFieldWidget(),
        ),
        'alert_status': MySelectMultipleField(
            label=_(datamodel.obj.lab('alert_status')),
            widget=Select2ManyWidget(),
            choices=[
                [x, x]
                for x in [
                    'Created',
                    'Pending',
                    'Running',
                    'Succeeded',
                    'Failed',
                    'Unknown',
                    'Waiting',
                    'Terminated',
                ]
            ],
            description='选择通知状态',
            validators=[
                Length(0, 400),
            ],
        ),
        'alert_user': StringField(
            label=_(datamodel.obj.lab('alert_user')),
            widget=BS3TextFieldWidget(),
            description='选择通知用户，每个用户使用逗号分隔',
        ),
    }

    # Use the same extra fields for the edit form.
    edit_form_extra_fields = add_form_extra_fields

    # 检测是否具有编辑权限，只有creator和admin可以编辑
    # Method to check if the current user has permission to edit a pipeline.
    def check_edit_permission(self, item):
        # Get the current user's roles.
        user_roles = [role.name.lower() for role in list(get_user_roles())]
        # Admins have permission.
        if 'admin' in user_roles:
            return True
        # The creator of the item has permission.
        if g.user and g.user.username and hasattr(item, 'created_by'):
            if g.user.username == item.created_by.username:
                return True
        # If no permission, flash a warning message.
        flash('just creator can edit/delete ', 'warning')
        return False

    # 验证args参数,并自动排版dag_json
    # @pysnooper.snoop(watch_explode=('item'))
    # Method to validate pipeline arguments and format the DAG JSON.
    def pipeline_args_check(self, item):
        # Validate the pipeline name format.
        core.validate_str(item.name, 'name')
        # Ensure dag_json is a valid JSON string.
        if not item.dag_json:
            item.dag_json = '{}'
        core.validate_json(item.dag_json)

        # 校验task的关系，没有闭环，并且顺序要对。没有配置的，自动没有上游，独立
        # @pysnooper.snoop()
        # Helper function to topologically sort the DAG nodes.
        def order_by_upstream(dag_json):
            order_dag = {}
            tasks_name = list(dag_json.keys())  # 如果没有配全的话，可能只有局部的task
            i = 0
            # Perform a topological sort to detect cycles and order tasks.
            while tasks_name:
                i += 1
                if i > 100:  # 不会有100个依赖关系
                    # Break to prevent infinite loops in case of a cycle.
                    break
                for task_name in tasks_name:
                    # 没有上游的情况
                    # If a task has no upstream dependencies.
                    if not dag_json[task_name]:
                        order_dag[task_name] = {}
                        tasks_name.remove(task_name)
                        continue
                    # 没有上游的情况
                    elif (
                        'upstream' not in dag_json[task_name]
                        or not dag_json[task_name]['upstream']
                    ):
                        order_dag[task_name] = {}
                        tasks_name.remove(task_name)
                        continue
                    # 如果有上游依赖的话，先看上游任务是否已经加到里面了。
                    # If all upstream tasks are already in the sorted DAG, add the current task.
                    upstream_all_ready = True
                    for upstream_task_name in dag_json[task_name]['upstream']:
                        if upstream_task_name not in order_dag:
                            upstream_all_ready = False
                    if upstream_all_ready:
                        order_dag[task_name] = dag_json[task_name]
                        tasks_name.remove(task_name)
            # If the sorted DAG doesn't contain all original tasks, there's a cycle.
            if list(dag_json.keys()).sort() != list(order_dag.keys()).sort():
                flash('dag pipeline 存在循环或未知上游', category='warning')
                raise Exception('dag pipeline 存在循环或未知上游')
            return order_dag

        # 配置上缺少的默认上游
        # Ensure all tasks associated with the pipeline are present in the DAG JSON.
        dag_json = json.loads(item.dag_json)
        tasks = item.get_tasks(db.session)
        if tasks and dag_json:
            for task in tasks:
                if task.name not in dag_json:
                    dag_json[task.name] = {'upstream': []}
        # Reformat the DAG JSON with sorted tasks and indentation.
        item.dag_json = json.dumps(
            order_by_upstream(copy.deepcopy(dag_json)), ensure_ascii=False, indent=4
        )
        # 生成workflow，如果有id，
        # Generate the executable pipeline file (e.g., Argo Workflow YAML).
        if item.id and item.get_tasks():
            item.pipeline_file = dag_to_pipeline(item, db.session)
        else:
            item.pipeline_file = None

    # 合并上下游关系
    # @pysnooper.snoop(watch_explode=('pipeline'))
    # Method to merge upstream relationships from form arguments into the dag_json field.
    def merge_upstream(self, pipeline):
        logging.info(pipeline)

        dag_json = {}
        # 根据参数生成args字典。一层嵌套的形式
        # This seems to be for a specific form submission pattern where task dependencies are sent as 'task.<task_name>' arguments.
        for arg in pipeline.__dict__:
            if len(arg) > 5 and arg[:5] == 'task.':
                task_upstream = getattr(pipeline, arg)
                dag_json[arg[5:]] = {'upstream': task_upstream if task_upstream else []}
        if dag_json:
            pipeline.dag_json = json.dumps(dag_json)

    # @pysnooper.snoop(watch_explode=('item'))
    # Hook that runs before a new pipeline is added to the database.
    def pre_add(self, item):
        # Assign to the 'public' project if no project is specified or if it's not an organization-type project.
        if not item.project or item.project.type != 'org':
            project = (
                db.session.query(Project).filter_by(name='public').filter_by(type='org').first()
            )
            if project:
                item.project = project

        # Sanitize the pipeline name.
        item.name = item.name.replace('_', '-')[0:54].lower().strip('-')
        # item.alert_status = ','.join(item.alert_status)
        # Validate arguments and generate the DAG.
        self.pipeline_args_check(item)
        # Set timestamps and default values.
        item.create_datetime = datetime.datetime.now()
        item.change_datetime = datetime.datetime.now()
        item.cronjob_start_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
        item.parameter = json.dumps({}, indent=4, ensure_ascii=False)

    # @pysnooper.snoop()
    # Hook that runs before a pipeline is updated in the database.
    def pre_update(self, item):
        # Validate and format the 'expand' JSON field.
        if item.expand:
            core.validate_json(item.expand)
            item.expand = json.dumps(json.loads(item.expand), indent=4, ensure_ascii=False)
        else:
            item.expand = '{}'
        # Sanitize the name.
        item.name = item.name.replace('_', '-')[0:54].lower()
        # item.alert_status = ','.join(item.alert_status)
        # Merge upstream relationships from the form.
        self.merge_upstream(item)
        # Validate arguments and regenerate the DAG.
        self.pipeline_args_check(item)
        # Update timestamp.
        item.change_datetime = datetime.datetime.now()
        # Format the 'parameter' JSON field.
        if item.parameter:
            item.parameter = json.dumps(json.loads(item.parameter), indent=4, ensure_ascii=False)
        else:
            item.parameter = '{}'

        # If the schedule is changed to crontab or the cron time is modified, reset the start time.
        if (
            item.schedule_type == 'crontab' and self.src_item_json.get('schedule_type') == 'once'
        ) or (item.cron_time != self.src_item_json.get('cron_time', '')):
            item.cronjob_start_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')

        # 限制提醒
        # Add a warning if a cronjob is scheduled on a public, non-dedicated cluster.
        if item.schedule_type == 'crontab':
            if not item.project.node_selector:
                flash('无法保障公共集群的稳定性，定时任务请选择专门的日更集群项目组', 'warning')
            else:
                org = item.project.node_selector.replace('org=', '')
                if not org or org == 'public':
                    flash(
                        '无法保障公共集群的稳定性，定时任务请选择专门的日更集群项目组', 'warning'
                    )

    # Hook that runs on GET requests for the edit view.
    def pre_update_get(self, item):
        # Fix/update the dag_json and expand fields before rendering the form.
        item.dag_json = item.fix_dag_json()
        item.expand = json.dumps(item.fix_expand(), indent=4, ensure_ascii=False)
        db.session.commit()

    # 删除前先把下面的task删除了
    # @pysnooper.snoop()
    # Hook that runs before a pipeline is deleted.
    def pre_delete(self, pipeline):
        # Delete all tasks associated with this pipeline first.
        tasks = pipeline.get_tasks()
        for task in tasks:
            db.session.delete(task)
        db.session.commit()
        # Mark the pipeline as deprecated instead of a hard delete.
        if '(废弃)' not in pipeline.describe:
            pipeline.describe += '(废弃)'
        # Disable future runs.
        pipeline.schedule_type = 'once'
        pipeline.expand = ''
        pipeline.dag_json = '{}'
        db.session.commit()

    # Custom API endpoint to list pipelines created by the current user.
    @expose('/my/list/')
    def my(self):
        try:
            user_id = g.user.id
            if user_id:
                pipelines = db.session.query(Pipeline).filter_by(created_by_fk=user_id).all()
                back = []
                for pipeline in pipelines:
                    back.append(pipeline.to_json())
                return json_response(message='success', status=0, result=back)
        except Exception as e:
            print(e)
            return json_response(message=str(e), status=-1, result={})

    # Custom API endpoint to list demo pipelines.
    @expose('/demo/list/')
    def demo(self):
        try:
            # Demo pipelines are identified by a parameter in their JSON.
            pipelines = (
                db.session.query(Pipeline)
                .filter(Pipeline.parameter.contains('"demo": "true"'))
                .all()
            )
            back = []
            for pipeline in pipelines:
                back.append(pipeline.to_json())
            return json_response(message='success', status=0, result=back)
        except Exception as e:
            print(e)
            return json_response(message=str(e), status=-1, result={})

    # 删除手动发起的workflow，不删除定时任务发起的workflow
    # Method to delete Kubernetes custom resources for manually triggered workflows.
    def delete_bind_crd(self, crds):
        for crd in crds:
            try:
                run_id = json.loads(crd['labels']).get('run-id', '')
                if run_id:
                    # 定时任务发起的不能清理
                    # Check if the run was triggered by a cronjob; if so, don't delete it.
                    run_history = db.session.query(RunHistory).filter_by(run_id=run_id).first()
                    if run_history:
                        continue

                    # Get the workflow record and its associated pipeline to determine the cluster.
                    db_crd = db.session.query(Workflow).filter_by(name=crd['name']).first()
                    pipeline = db_crd.pipeline
                    if pipeline:
                        k8s_client = py_k8s.K8s(pipeline.project.cluster.get('KUBECONFIG', ''))
                    else:
                        k8s_client = py_k8s.K8s()

                    # Call the Kubernetes client to delete the workflow custom resource.
                    k8s_client.delete_workflow(
                        all_crd_info=conf.get('CRD_INFO', {}),
                        namespace=crd['namespace'],
                        run_id=run_id,
                    )
                    # Update the status in our database.
                    if db_crd:
                        db_crd.status = 'Deleted'
                        db_crd.change_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
                        db.session.commit()
            except Exception as e:
                print(e)

    # Decorator to check for pipeline permissions before executing a function.
    def check_pipeline_perms(user_fun):
        # @pysnooper.snoop()
        def wraps(*args, **kwargs):
            pipeline_id = int(kwargs.get('pipeline_id', '0'))
            if not pipeline_id:
                response = make_response('pipeline_id not exist')
                response.status_code = 404
                return response

            # Admins have access to all pipelines.
            user_roles = [role.name.lower() for role in g.user.roles]
            if 'admin' in user_roles:
                return user_fun(*args, **kwargs)

            # Check if the user is a member of the pipeline's project.
            join_projects_id = security_manager.get_join_projects_id(db.session)
            pipeline = db.session.query(Pipeline).filter_by(id=pipeline_id).first()
            if pipeline.project.id in join_projects_id:
                return user_fun(*args, **kwargs)

            # If no permission, return a 403 Forbidden response.
            response = make_response('no perms to run pipeline %s' % pipeline_id)
            response.status_code = 403
            return response

        return wraps

    # A class method to allow internal calls to run a pipeline.
    @classmethod
    def run_pipeline_internal(cls, pipeline_id):
        cls.run_pipeline(pipeline_id)

    # # @event_logger.log_this
    # Expose an endpoint to trigger a pipeline run.
    @expose('/run_pipeline/<pipeline_id>', methods=['GET', 'POST'])
    # @check_pipeline_perms
    def run_pipeline(self, pipeline_id, **kwargs):
        logging.info(f'pipeline_id: {pipeline_id}, {kwargs}')
        # This method's logic is very similar to the one in the v2 API.
        pipeline = db.session.query(Pipeline).filter_by(id=pipeline_id).first()
        # Clean up old workflow instances.
        pipeline.delete_old_task()
        time.sleep(1)
        # Get current workflow status from the cluster.
        back_crds = pipeline.get_workflow()
        logging.info(f'BACK_CRDS: {back_crds}')

        # 把消息加入到源数据库
        # Sync the status back to the local database.
        for crd in back_crds:
            try:
                workflow = db.session.query(Workflow).filter_by(name=crd['name']).first()
                if not workflow:
                    username = ''
                    labels = json.loads(crd['labels'])
                    if 'run-rtx' in labels:
                        username = labels['run-rtx']
                    elif 'upload-rtx' in labels:
                        username = labels['upload-rtx']

                    workflow = Workflow(
                        name=crd['name'],
                        namespace=crd['namespace'],
                        create_time=crd['create_time'],
                        status=crd['status'],
                        display_status=crd['status'],
                        annotations=crd['annotations'],
                        labels=crd['labels'],
                        spec=crd['spec'],
                        status_more=crd['status_more'],
                        username=username,
                    )
                    db.session.add(workflow)
                    db.session.commit()
            except Exception as e:
                print(e)

        # 这里直接删除所有的历史任务流，正在运行的也删除掉
        # Delete old, manually triggered workflow resources.
        self.delete_bind_crd(back_crds)

        # 合成workflow
        # Generate the executable pipeline definition.
        pipeline.pipeline_file = dag_to_pipeline(pipeline, db.session, **kwargs)
        pipeline_json = yaml.safe_load(pipeline.pipeline_file)
        # Generate a unique name for the run.
        pipeline_json['metadata']['name'] = (
            pipeline_json['metadata']['generateName'] + uuid.uuid4().hex[:5]
        )
        pipeline_json['metadata'].pop('generateName', None)

        # 运行workflow
        # Submit the workflow to the cluster.
        logging.info(f'提交任务流: {pipeline.name}')
        crd_name = run_pipeline(pipeline.project.cluster, pipeline_json)
        # Update the pipeline record with the new run ID.
        pipeline.pipeline_argo_id = crd_name
        db.session.commit()  # 更新
        # Redirect the user to the log view for the new run.
        return redirect('/pipeline_modelview/web/log/%s' % pipeline_id)

    # # @event_logger.log_this
    # Expose an endpoint to show the visual DAG editor.
    @expose('/web/<pipeline_id>', methods=['GET'])
    def web(self, pipeline_id):
        pipeline = db.session.query(Pipeline).filter_by(id=pipeline_id).first()

        # Fix/update various JSON fields before rendering the editor.
        pipeline.dag_json = pipeline.fix_dag_json()  # 修正 dag_json
        pipeline.expand = json.dumps(
            pipeline.fix_expand(), indent=4, ensure_ascii=False
        )  # 修正 前端expand字段缺失
        pipeline.expand = json.dumps(
            pipeline.fix_position(), indent=4, ensure_ascii=False
        )  # 修正 节点中心位置到视图中间

        db.session.commit()
        print(pipeline_id)
        # Construct the URL for the frontend editor.
        url = (
            '/static/appbuilder/vison/index.html?pipeline_id=%s' % pipeline_id
        )  # 前后端集成完毕，这里需要修改掉
        # Redirect to a generic frontend handler for external links.
        return redirect('/frontend/showOutLink?url=%s' % urllib.parse.quote(url, safe=''))
        # 返回模板
        # return self.render_template('link.html', data=data)

    # # @event_logger.log_this
    # Expose an endpoint to show the logs for the latest run.
    @expose('/web/log/<pipeline_id>', methods=['GET'])
    def web_log(self, pipeline_id):
        pipeline = db.session.query(Pipeline).filter_by(id=pipeline_id).first()
        if pipeline.run_id:
            # Construct the URL to the Kubeflow Pipelines UI.
            data = {
                'url': pipeline.project.cluster.get('PIPELINE_URL')
                + 'runs/details/'
                + pipeline.run_id,
                'target': 'div.page_f1flacxk:nth-of-type(0)',
                'delay': 500,
                'loading': True,
            }
            # 返回模板
            # Render a template to embed or link to the external UI.
            if pipeline.project.cluster['NAME'] == conf.get('ENVIRONMENT'):
                return self.render_template('link.html', data=data)
            else:
                return self.render_template('external_link.html', data=data)
        else:
            flash('no running instance', 'warning')
            return redirect('/pipeline_modelview/web/%s' % pipeline.id)

    # # @event_logger.log_this
    # Expose an endpoint to show the monitoring dashboard (Grafana).
    @expose('/web/monitoring/<pipeline_id>', methods=['GET'])
    def web_monitoring(self, pipeline_id):
        pipeline = db.session.query(Pipeline).filter_by(id=int(pipeline_id)).first()
        if pipeline.run_id:
            # Construct the URL to the Grafana dashboard for this pipeline.
            url = (
                pipeline.project.cluster.get('GRAFANA_HOST', '').strip('/')
                + conf.get('GRAFANA_TASK_PATH')
                + pipeline.name
            )
            return redirect(url)
        else:
            flash('no running instance', 'warning')
            return redirect('/pipeline_modelview/web/%s' % pipeline.id)

    # # @event_logger.log_this
    # Expose an endpoint to show the Kubernetes pods for the pipeline.
    @expose('/web/pod/<pipeline_id>', methods=['GET'])
    def web_pod(self, pipeline_id):
        pipeline = db.session.query(Pipeline).filter_by(id=pipeline_id).first()
        # Construct the URL to the Kubernetes Dashboard, pre-filtered for this pipeline's pods.
        data = {
            'url': pipeline.project.cluster.get('K8S_DASHBOARD_CLUSTER', '')
            + '#/search?namespace=%s&q=%s'
            % (conf.get('PIPELINE_NAMESPACE'), pipeline.name.replace('_', '-').lower()),
            'target': 'div.kd-chrome-container.kd-bg-background',
            'delay': 500,
            'loading': True,
        }
        # 返回模板
        # Render a template to embed or link to the dashboard.
        if pipeline.project.cluster['NAME'] == conf.get('ENVIRONMENT'):
            return self.render_template('link.html', data=data)
        else:
            return self.render_template('external_link.html', data=data)

    # Expose an endpoint to view the run history of a pipeline.
    @expose('/web/runhistory/<pipeline_id>', methods=['GET'])
    def web_runhistory(self, pipeline_id):
        # Construct a URL to the RunHistory view, pre-filtered for this pipeline.
        url = (
            conf.get('MODEL_URLS', {}).get('runhistory', '')
            + '?filter='
            + urllib.parse.quote(
                json.dumps([{'key': 'pipeline', 'value': pipeline_id}], ensure_ascii=False)
            )
        )
        print(url)
        return redirect(url)

    # Expose an endpoint to view the raw workflow objects for a pipeline.
    @expose('/web/workflow/<pipeline_id>', methods=['GET'])
    def web_workflow(self, pipeline_id):
        # Construct a URL to the Workflow view, pre-filtered for this pipeline.
        url = (
            conf.get('MODEL_URLS', {}).get('workflow', '')
            + '?filter='
            + urllib.parse.quote(
                json.dumps(
                    [{'key': 'labels', 'value': '"pipeline-id": "%s"' % pipeline_id}],
                    ensure_ascii=False,
                )
            )
        )
        print(url)
        return redirect(url)

    # Class method for internal use to copy a pipeline.
    @classmethod
    def copy_pipeline_internal(cls, pipeline_id):
        pipeline = db.session.query(Pipeline).filter_by(id=pipeline_id).first()
        if not pipeline:
            raise BizError(
                CommonErrorCode.INTERNAL_ERROR, '系统异常，请联系技术支持或提交反馈信息'
            )
        return cls.copy_db(pipeline)

    # @pysnooper.snoop(watch_explode=('expand'))
    # Static method that contains the core logic for copying a pipeline and its tasks.
    @staticmethod
    def copy_db(pipeline):
        # Clone the pipeline object itself.
        new_pipeline = pipeline.clone()
        expand = json.loads(pipeline.expand) if pipeline.expand else {}
        rand_str = uuid.uuid4().hex[:4]
        # Create a new unique name and display name.
        new_pipeline.name = new_pipeline.name[:90] + '-' + rand_str
        new_pipeline.display_name = new_pipeline.display_name[:90] + '-' + rand_str
        # Reset timestamps.
        new_pipeline.created_on = datetime.datetime.now()
        new_pipeline.changed_on = datetime.datetime.now()
        db.session.add(new_pipeline)
        db.session.commit()

        # Helper function to update task IDs in the frontend layout data ('expand' field).
        def change_node(src_task_id, des_task_id):
            for node in expand:
                if 'source' not in node:
                    # 位置信息换成新task的id
                    if int(node['id']) == int(src_task_id):
                        node['id'] = str(des_task_id)
                else:
                    if int(node['source']) == int(src_task_id):
                        node['source'] = str(des_task_id)
                    if int(node['target']) == int(src_task_id):
                        node['target'] = str(des_task_id)

        # 复制绑定的task，并绑定新的pipeline
        # Clone each associated task and link it to the new pipeline.
        for task in pipeline.get_tasks():
            new_task = task.clone()
            new_task.pipeline_id = new_pipeline.id
            new_task.create_datetime = datetime.datetime.now()
            new_task.change_datetime = datetime.datetime.now()
            db.session.add(new_task)
            db.session.commit()
            # Update the task ID in the layout data.
            change_node(task.id, new_task.id)

        # Save the updated layout data.
        new_pipeline.expand = json.dumps(expand)
        db.session.commit()
        return new_pipeline

    # # @event_logger.log_this
    # Expose an endpoint to copy a pipeline via the API.
    @expose('/copy_pipeline/<pipeline_id>', methods=['GET', 'POST'])
    @wrap_response
    def copy_pipeline(self, pipeline_id):
        logging.info(f'copy_pipeline, pipeline_id={pipeline_id}')
        try:
            pipeline = db.session.query(Pipeline).filter_by(id=pipeline_id).first()
            # Call the core copy logic.
            new_pipeline = self.copy_db(pipeline)
            # Return the data of the new pipeline.
            return new_pipeline.to_json()
            # return redirect('/pipeline_modelview/web/%s'%new_pipeline.id)
        except InvalidRequestError:
            db.session.rollback()
        except Exception as e:
            logging.error(e)
        raise BizError(CommonErrorCode.INTERNAL_ERROR, '系统异常，请联系技术支持或提交反馈信息')

    # Define a custom action for the list view to allow copying multiple pipelines.
    @action(
        'copy',
        __('Copy Pipeline'),
        confirmation=__('Copy Pipeline'),
        icon='fa-copy',
        multiple=True,
        single=False,
    )
    def copy(self, pipelines):
        if not isinstance(pipelines, list):
            pipelines = [pipelines]
        try:
            # Iterate and copy each selected pipeline.
            for pipeline in pipelines:
                self.copy_db(pipeline)
        except InvalidRequestError:
            db.session.rollback()
        except Exception as e:
            logging.error(e)
            raise e

        # Redirect back to the list view.
        return redirect(request.referrer)


# This is the final class for the standard web UI view of Pipelines.
class Pipeline_ModelView(Pipeline_ModelView_Base, MyappModelView, DeleteMixin):
    datamodel = SQLAInterface(Pipeline)
    # base_order = ("changed_on", "desc")
    # order_columns = ['changed_on']


# Register the view, but don't add it to the menu.
appbuilder.add_view_no_menu(Pipeline_ModelView)


# 添加api
# This is the final class for the v1 REST API of Pipelines.
class Pipeline_ModelView_Api(Pipeline_ModelView_Base, MyappModelRestApi):
    datamodel = SQLAInterface(Pipeline)
    route_base = '/pipeline_modelview/api'
    # Define columns for list, add, and edit API endpoints.
    list_columns = ['id', 'project', 'pipeline_url', 'creator', 'modified']
    add_columns = ['project', 'name', 'describe']
    edit_columns = [
        'project',
        'name',
        'describe',
        'schedule_type',
        'cron_time',
        'depends_on_past',
        'max_active_runs',
        'parallelism',
        'dag_json',
        'global_env',
        'alert_status',
        'alert_user',
        'expand',
        'parameter',
        'cronjob_start_time',
    ]

    # Hook that runs on GET requests for the add view/info endpoint.
    def pre_add_get(self):
        # Set a default filter for the info endpoint to show only the user's own pipelines.
        self.default_filter = {'created_by': g.user.id}


# Register the v1 Pipeline API.
appbuilder.add_api(Pipeline_ModelView_Api)
