--- START OF FILE view_task.py ---

# Import standard library modules.
import datetime
import json
import logging
import os
import time
import uuid

# Import components from Flask.
from flask import flash, g, redirect
# Import various form field widgets from Flask-AppBuilder.
from flask_appbuilder.fieldwidgets import (
    BS3PasswordFieldWidget,
    BS3TextFieldWidget,
    DatePickerWidget,
    DateTimePickerWidget,
    Select2ManyWidget,
    Select2Widget,
)
# Import a utility for converting SQLAlchemy models to forms.
from flask_appbuilder.forms import GeneralModelConverter
# Import the SQLAlchemy data interface from Flask-AppBuilder.
from flask_appbuilder.models.sqla.interface import SQLAInterface
# Import the lazy translation function from Flask-Babel.
from flask_babel import lazy_gettext as _
# Import the FileField for file uploads from Flask-WTF.
from flask_wtf.file import FileField
# Import Jinja2 for template rendering.
from jinja2 import BaseLoader, DebugUndefined, Environment
# Import various field types from WTForms for creating forms.
from wtforms import (
    BooleanField,
    DateField,
    DateTimeField,
    FloatField,
    IntegerField,
    SelectField,
    SelectMultipleField,
    StringField,
)
# Import a specific field for querying SQLAlchemy relationships from WTForms.
from wtforms.ext.sqlalchemy.fields import QuerySelectField
# Import validators from WTForms.
from wtforms.validators import DataRequired, Length, Regexp

# Import base API functionalities
# 导入基础API功能
from myapp.apis.base import expose
# Import application instance, appbuilder, and database instance
# 导入应用实例、appbuilder和数据库实例
from myapp.app import app, appbuilder, db
# Import custom form widgets and fields
# 导入自定义表单组件和字段
from myapp.forms import (
    MyBS3TextAreaFieldWidget,
    MyBS3TextFieldWidget,
    MyJSONField,
    MyLineSeparatedListField,
)
# Import Job_Template, Pipeline, and Task models
# 导入Job_Template、Pipeline和Task模型
from myapp.models.model_job import Job_Template, Pipeline, Task
# Import Kubernetes client
# 导入Kubernetes客户端
from myapp.third.k8s.py_k8s import K8s
# Import core utilities
# 导入核心工具
from myapp.utils import core
# Import CompactCRUDMixin for CRUD operations
# 导入CompactCRUDMixin用于CRUD操作
from myapp.views.base import CompactCRUDMixin

# Import resource utility functions
# 导入资源工具函数
from ..utils.resource import get_resource
# Import base view classes for custom filter, model view, and user roles retrieval
# 导入基础视图类，用于自定义过滤器、模型视图和用户角色检索
from .base import (
    MyappModelView,
    get_user_roles,
)
# Import base API class
# 导入基础API类
from .baseApi import MyappModelRestApi


# Get application configuration
# 获取应用配置
conf = app.config


# Base class for Task ModelView
# Task ModelView的基类
class Task_ModelView_Base:
    # Title for the view
    # 视图的标题
    label_title = '任务'
    # Data model interface for Task
    # Task的数据模型接口
    datamodel = SQLAInterface(Task)
    # Redirect URL after certain operations
    # 某些操作后的重定向URL
    check_redirect_list_url = '/pipeline_modelview/edit/'
    # Help URL for the model
    # 模型的帮助URL
    help_url = conf.get('HELP_URL', {}).get(datamodel.obj.__tablename__, '') if datamodel else ''
    # Columns to display in the list view
    # 列表视图中显示的列
    list_columns = [
        'name',
        'label',
        'pipeline',
        'job_template',
        'volume_mount',
        'resource_memory',
        'resource_cpu',
        'resource_gpu',
        'timeout',
        'retry',
        'created_on',
        'changed_on',
        'monitoring',
        'expand',
    ]
    # Commented out: alternative list columns
    # 已注释：备用列表列
    # list_columns = ['name','label','job_template_url','volume_mount','debug','run','clear','log']
    # Column widths for display
    # 显示的列宽
    cols_width = {
        'name': {'type': 'ellip2', 'width': 300},
        'label': {'type': 'ellip2', 'width': 300},
        'pipeline': {'type': 'ellip2', 'width': 300},
        'job_template': {'type': 'ellip2', 'width': 300},
        'volume_mount': {'type': 'ellip2', 'width': 600},
        'args': {'type': 'ellip2', 'width': 400},
        'resource_memory': {'type': 'ellip2', 'width': 100},
        'resource_cpu': {'type': 'ellip2', 'width': 100},
        'resource_gpu': {'type': 'ellip2', 'width': 100},
        'timeout': {'type': 'ellip2', 'width': 100},
        'retry': {'type': 'ellip2', 'width': 100},
        'created_on': {'type': 'ellip2', 'width': 300},
        'changed_on': {'type': 'ellip2', 'width': 300},
        'monitoring': {'type': 'ellip2', 'width': 300},
        'expand': {'type': 'ellip2', 'width': 300},
    }
    # Columns to display in the show view
    # 详细视图中显示的列
    show_columns = [
        'name',
        'label',
        'pipeline',
        'job_template',
        'volume_mount',
        'command',
        'overwrite_entrypoint',
        'working_dir',
        'args_html',
        'resource_memory',
        'resource_cpu',
        'resource_gpu',
        'timeout',
        'retry',
        'skip',
        'created_by',
        'changed_by',
        'created_on',
        'changed_on',
        'monitoring_html',
    ]
    # Columns to display in the add form
    # 添加表单中显示的列
    add_columns = [
        'job_template',
        'name',
        'label',
        'pipeline',
        'volume_mount',
        'command',
        'working_dir',
        'skip',
    ]
    # Columns to display in the edit form (same as add_columns)
    # 编辑表单中显示的列（与add_columns相同）
    edit_columns = add_columns
    # Default order for listing items
    # 列表项的默认排序
    base_order = ('id', 'desc')
    # Columns that can be used for ordering
    # 可用于排序的列
    order_columns = ['id']
    # Columns that can be used for searching
    # 可用于搜索的列
    search_columns = ['pipeline']

    # General model converter for the datamodel
    # 数据模型的通用模型转换器
    conv = GeneralModelConverter(datamodel)

    # Extra fields for the add form
    # 添加表单的额外字段
    add_form_extra_fields = {
        'args': StringField(
            _(datamodel.obj.lab('args')),
            widget=MyBS3TextAreaFieldWidget(rows=10),
        ),
        'pipeline': QuerySelectField(
            datamodel.obj.lab('pipeline'),
            query_factory=lambda: db.session.query(Pipeline),
            allow_blank=True,
            widget=Select2Widget(extra_classes='readonly'),
        ),
        'job_template': QuerySelectField(
            datamodel.obj.lab('job_template'),
            query_factory=lambda: db.session.query(Job_Template),
            allow_blank=True,
            widget=Select2Widget(),
        ),
        'name': StringField(
            label=_(datamodel.obj.lab('name')),
            description='英文名(小写字母、数字、- 组成)，最长50个字符',
            widget=BS3TextFieldWidget(),
            validators=[Regexp(r'^[a-z][a-z0-9\-]*[a-z0-9]$'), Length(1, 54), DataRequired()],
        ),
        'label': StringField(
            label=_(datamodel.obj.lab('label')),
            description='中文名',
            widget=BS3TextFieldWidget(),
            validators=[DataRequired()],
        ),
        'volume_mount': StringField(
            label=_(datamodel.obj.lab('volume_mount')),
            description=(
                '外部挂载，'
                '格式:$pvc_name1(pvc):/$container_path1,$hostpath1(hostpath):/$container_path2,4G(memory):/dev/shm,注意pvc会自动挂载对应目录下的个人rtx子目录'
            ),
            widget=BS3TextFieldWidget(),
            default='kubeflow-user-workspace(pvc):/mnt,kubeflow-archives(pvc):/archives',
        ),
        'working_dir': StringField(
            label=_(datamodel.obj.lab('working_dir')),
            description='工作目录，容器启动的初始所在目录，不填默认使用Dockerfile内定义的工作目录',
            widget=BS3TextFieldWidget(),
        ),
        'command': StringField(
            label=_(datamodel.obj.lab('command')),
            description='启动命令',
            widget=BS3TextFieldWidget(),
        ),
        'overwrite_entrypoint': BooleanField(
            label=_(datamodel.obj.lab('overwrite_entrypoint')),
            description='启动命令是否覆盖Dockerfile中ENTRYPOINT，不覆盖则叠加。',
        ),
        'node_selector': StringField(
            label=_(datamodel.obj.lab('node_selector')),
            description='运行当前task所在的机器',
            widget=BS3TextFieldWidget(),
            default=Task.node_selector.default.arg,
            validators=[DataRequired()],
        ),
        'resource_memory': StringField(
            label=_(datamodel.obj.lab('resource_memory')),
            default=Task.resource_memory.default.arg,
            description='内存的资源使用限制，示例1G，10G， 最大100G，如需更多联系管理员',
            widget=BS3TextFieldWidget(),
            validators=[DataRequired()],
        ),
        'resource_cpu': StringField(
            label=_(datamodel.obj.lab('resource_cpu')),
            default=Task.resource_cpu.default.arg,
            description='cpu的资源使用限制(单位核)，示例 0.4，10，最大50核，如需更多联系管理员',
            widget=BS3TextFieldWidget(),
            validators=[DataRequired()],
        ),
        'timeout': IntegerField(
            label=_(datamodel.obj.lab('timeout')),
            default=Task.timeout.default.arg,
            description='task运行时长限制，为0表示不限制(单位s)',
            widget=BS3TextFieldWidget(),
        ),
        'retry': IntegerField(
            label=_(datamodel.obj.lab('retry')),
            default=Task.retry.default.arg,
            description='task重试次数',
            widget=BS3TextFieldWidget(),
        ),
        'outputs': StringField(
            label=_(datamodel.obj.lab('outputs')),
            default=Task.outputs.default.arg,
            description='task输出文件，支持容器目录文件和minio存储路径',
            widget=MyBS3TextAreaFieldWidget(rows=3),
        ),
    }

    # Add resource_gpu field to extra fields
    # 将resource_gpu字段添加到额外字段
    add_form_extra_fields['resource_gpu'] = StringField(
        _(datamodel.obj.lab('resource_gpu')),
        default='0',
        description=(
            'gpu的资源使用限制(单位卡)，示例:1，2，训练任务每个容器独占整卡。申请具体的卡型号，'
            '可以类似 1(V100),目前支持T4/V100/A100/VGPU'
        ),
        widget=BS3TextFieldWidget(),
    )

    # Edit form extra fields are the same as add form extra fields
    # 编辑表单的额外字段与添加表单的额外字段相同
    edit_form_extra_fields = add_form_extra_fields

    # Process form request
    # 处理表单请求
    # @pysnooper.snoop(watch_explode=('form'))
    def process_form(self, form, is_created):
        # from flask_appbuilder.forms import DynamicForm
        # Remove 'job_describe' field if present
        # 如果存在'job_describe'字段，则删除
        if 'job_describe' in form._fields:
            del form._fields['job_describe']  # Do not process this field / 不处理这个字段

    # Check if user has edit permission (only creator and admin can edit)
    # 检查用户是否具有编辑权限（只有创建者和管理员可以编辑）
    def check_edit_permission(self, item):
        # Get user roles
        # 获取用户角色
        user_roles = [role.name.lower() for role in list(get_user_roles())]
        # If user is admin, grant permission
        # 如果用户是管理员，则授予权限
        if 'admin' in user_roles:
            return True
        # If user is the creator of the pipeline, grant permission
        # 如果用户是管道的创建者，则授予权限
        if g.user and g.user.username and item.pipeline and hasattr(item.pipeline, 'created_by'):
            if g.user.username == item.pipeline.created_by.username:
                return True
        # Otherwise, flash a warning and deny permission
        # 否则，闪烁警告并拒绝权限
        flash('just creator can edit/delete ', 'warning')
        return False

    # Validate task arguments
    # 验证任务参数
    # @pysnooper.snoop(watch_explode=('item'))
    def task_args_check(self, item):
        # Validate task name and arguments JSON
        # 验证任务名称和参数JSON
        core.validate_str(item.name, 'name')
        core.validate_json(item.args)
        task_args = json.loads(item.args)
        job_args = json.loads(item.job_template.args)
        # Validate and update task arguments
        # 验证并更新任务参数
        item.args = json.dumps(
            core.validate_task_args(task_args, job_args), indent=4, ensure_ascii=False
        )

        # Check volume_mount format
        # 检查volume_mount格式
        if item.volume_mount and ':' not in item.volume_mount:
            raise Exception('volume_mount is not valid, must contain : or null')

    # Merge arguments into a nested dictionary structure
    # 将参数合并到嵌套字典结构中
    # @pysnooper.snoop(watch_explode=('item'))
    def merge_args(self, item, action):
        logging.info(item)

        # Helper function to nest a flat dictionary once
        # 辅助函数，用于将扁平字典嵌套一次
        # 将字段合并为字典
        # @pysnooper.snoop()
        def nest_once(inp_dict):
            out = {}
            if isinstance(inp_dict, dict):
                for key, val in inp_dict.items():
                    if '.' in key:
                        keys = key.split('.')
                        sub_dict = out
                        for sub_key_index in range(len(keys)):
                            sub_key = keys[sub_key_index]
                            # If there are more nested dictionaries
                            # 下面还有字典的情况
                            if sub_key_index != len(keys) - 1:
                                if sub_key not in sub_dict:
                                    sub_dict[sub_key] = {}
                            else:
                                sub_dict[sub_key] = val
                            sub_dict = sub_dict[sub_key]

                    else:
                        out[key] = val
            return out

        args_json_column = {}
        # Generate args dictionary from item attributes
        # 根据项目属性生成args字典
        # 根据参数生成args字典。一层嵌套的形式
        for arg in item.__dict__:
            if arg[:5] == 'args.':
                task_attr_value = getattr(item, arg)
                # If action is 'update' and value is None, use original value from item.args
                # 如果是add
                # 用户没做任何修改，比如文件未做修改或者输入为空，那么后端采用不修改的方案
                if task_attr_value is None and action == 'update':  # Must not be None / 必须不是None
                    # logging.info(item.args)
                    src_attr = arg[5:].split('.')  # Multi-level sub-attribute / 多级子属性
                    sub_src_attr = json.loads(item.args)
                    for sub_key in src_attr:
                        sub_src_attr = sub_src_attr[sub_key] if sub_key in sub_src_attr else ''
                    args_json_column[arg] = sub_src_attr
                # If action is 'add' and value is None, set to empty string
                # 如果是add
                # 用户没做任何修改，比如文件未做修改或者输入为空，那么后端采用不修改的方案
                elif task_attr_value is None and action == 'add':  # Must not be None / 必须不是None
                    args_json_column[arg] = ''
                else:
                    args_json_column[arg] = task_attr_value

        # If args_json_column is populated, merge into item.args
        # 如果args_json_column已填充，则合并到item.args
        # 如果是合并成的args
        if args_json_column:
            # Convert flat parameters to nested JSON format
            # 将一层嵌套的参数形式，改为多层嵌套的json形似
            des_merge_args = nest_once(args_json_column)
            item.args = json.dumps(des_merge_args.get('args', {}))
        # If item.args is empty, initialize as empty JSON object
        # 如果是原始完成的args
        elif not item.args:
            item.args = '{}'

    # Pre-add hook: called before adding a new item
    # 预添加钩子：在添加新项目之前调用
    # @pysnooper.snoop(watch_explode=('item'))
    def pre_add(self, item):
        # Format task name
        # 格式化任务名称
        item.name = item.name.replace('_', '-')[0:54].lower()
        # Validate job_template
        # 验证job_template
        if item.job_template is None:
            raise Exception('Job Template 为必选')

        # Set volume_mount from pipeline project configuration
        # 从管道项目配置设置volume_mount
        item.volume_mount = item.pipeline.project.volume_mount  # Default to project configuration / 默认使用项目的配置

        # Merge job_template's volume_mount if not already present
        # 如果job_template的volume_mount不存在，则合并
        if (
            item.job_template.volume_mount
            and item.job_template.volume_mount not in item.volume_mount
        ):
            if item.volume_mount:
                item.volume_mount += ',' + item.job_template.volume_mount
            else:
                item.volume_mount = item.job_template.volume_mount
        # Check and set resource limits
        # 检查并设置资源限制
        item.resource_memory = core.check_resource_memory(item.resource_memory)
        item.resource_cpu = core.check_resource_cpu(item.resource_cpu)
        # Merge arguments and perform task argument check
        # 合并参数并执行任务参数检查
        self.merge_args(item, 'add')
        self.task_args_check(item)
        # Special handling for '深度学习' (Deep Learning) project
        # '深度学习'项目的特殊处理
        if item.job_template.project.name == '深度学习':
            task_args = json.loads(item.args)
            resource = get_resource(task_args.get('flavor_id'))
            auto_stop = task_args.get('auto_stop', 0)
            item.resource_cpu = resource.get('cpu')
            item.resource_memory = resource.get('mem')
            item.resource_gpu = resource.get('gpu')
            item.timeout = int(auto_stop) * 3600
        # Set creation and change timestamps
        # 设置创建和更改时间戳
        item.create_datetime = datetime.datetime.now()
        item.change_datetime = datetime.datetime.now()

        # Adjust node_selector based on GPU resource
        # 根据GPU资源调整node_selector
        if core.get_gpu(item.resource_gpu)[0]:
            item.node_selector = item.node_selector.replace('cpu=true', 'gpu=true')
        else:
            item.node_selector = item.node_selector.replace('gpu=true', 'cpu=true')

    # Pre-update hook: called before updating an existing item
    # 预更新钩子：在更新现有项目之前调用
    # @pysnooper.snoop(watch_explode=('item'))
    def pre_update(self, item):
        # Format task name
        # 格式化任务名称
        item.name = item.name.replace('_', '-')[0:54].lower()
        # Convert resource_gpu to uppercase if present
        # 如果存在resource_gpu，则转换为大写
        if item.resource_gpu:
            item.resource_gpu = str(item.resource_gpu).upper()
        # Validate job_template
        # 验证job_template
        if item.job_template is None:
            raise Exception('Job Template 为必选')

        # If volume_mount is invalid, use original value
        # 如果volume_mount无效，则使用原始值
        if item.volume_mount and ':' not in item.volume_mount:
            item.volume_mount = self.src_item_json.get('volume_mount', '')

        # Clean and join volume_mount entries
        # 清理并连接volume_mount条目
        if item.volume_mount:
            item.volume_mount = ','.join(
                [x.strip() for x in item.volume_mount.split(',') if x.strip()]
            )

        # Validate and format outputs and expand JSON fields
        # 验证并格式化outputs和expand JSON字段
        if item.outputs:
            core.validate_json(item.outputs)
            item.outputs = json.dumps(json.loads(item.outputs), indent=4, ensure_ascii=False)
        if item.expand:
            core.validate_json(item.expand)
            item.expand = json.dumps(json.loads(item.expand), indent=4, ensure_ascii=False)

        # Check and set resource limits, using original values if not provided
        # 检查并设置资源限制，如果未提供则使用原始值
        item.resource_memory = core.check_resource_memory(
            item.resource_memory, self.src_item_json.get('resource_memory', None)
        )
        item.resource_cpu = core.check_resource_cpu(
            item.resource_cpu, self.src_item_json.get('resource_cpu', None)
        )

        # Merge arguments and perform task argument check
        # 合并参数并执行任务参数检查
        self.merge_args(item, 'update')
        self.task_args_check(item)
        # Special handling for '深度学习' (Deep Learning) project
        # '深度学习'项目的特殊处理
        if item.job_template.project.name == '深度学习':
            task_args = json.loads(item.args)
            resource = get_resource(task_args.get('flavor_id'))
            auto_stop = task_args.get('auto_stop', 0)
            item.resource_cpu = resource.get('cpu')
            item.resource_memory = resource.get('mem')
            item.resource_gpu = resource.get('gpu')
            item.timeout = int(auto_stop) * 3600
        # Set change timestamp
        # 设置更改时间戳
        item.change_datetime = datetime.datetime.now()

        # Adjust node_selector based on GPU resource
        # 根据GPU资源调整node_selector
        if core.get_gpu(item.resource_gpu)[0]:
            item.node_selector = item.node_selector.replace('cpu=true', 'gpu=true')
        else:
            item.node_selector = item.node_selector.replace('gpu=true', 'cpu=true')

    # Commented out: post_add hook (would update pipeline info, but causes session errors)
    # 已注释：post_add钩子（会更新管道信息，但会导致会话错误）
    # 添加和删除task以后要更新pipeline的信息,会报错is not bound to a Session
    # # @pysnooper.snoop()
    # def post_add(self, item):
    #     item.pipeline.pipeline_file = dag_to_pipeline(item.pipeline, db.session)
    #     pipeline_argo_id, version_id = upload_pipeline(item.pipeline)
    #     if pipeline_argo_id:
    #         item.pipeline.pipeline_argo_id = pipeline_argo_id
    #     if version_id:
    #         item.pipeline.version_id = version_id
    #     db.session.commit()

    # Commented out: post_update hook (would update pipeline info, but causes session errors)
    # 已注释：post_update钩子（会更新管道信息，但会导致会话错误）
    # # @pysnooper.snoop(watch_explode=('item'))
    # def post_update(self, item):
    #     # if type(item)==UnmarshalResult:
    #     #     pass
    #     item.pipeline.pipeline_file = dag_to_pipeline(item.pipeline, db.session)
    #     pipeline_argo_id, version_id = upload_pipeline(item.pipeline)
    #     if pipeline_argo_id:
    #         item.pipeline.pipeline_argo_id = pipeline_argo_id
    #     if version_id:
    #         item.pipeline.version_id = version_id
    #     # db.session.update(item)
    #     db.session.commit()

    # Pre-delete hook: called before deleting an item, sets redirect URL and pipeline object
    # 预删除钩子：在删除项目之前调用，设置重定向URL和管道对象
    # 因为删除就找不到pipeline了
    def pre_delete(self, item):
        self.check_redirect_list_url = '/pipeline_modelview/edit/' + str(item.pipeline.id)
        self.pipeline = item.pipeline

    # Widget configuration mapping for different field types
    # 不同字段类型的组件配置映射
    widget_config = {
        'int': MyBS3TextFieldWidget,
        'float': MyBS3TextFieldWidget,
        'bool': None,
        'str': MyBS3TextFieldWidget,
        'text': MyBS3TextAreaFieldWidget,
        'json': MyBS3TextAreaFieldWidget,
        'date': DatePickerWidget,
        'datetime': DateTimePickerWidget,
        'password': BS3PasswordFieldWidget,
        'enum': Select2Widget,
        'multiple': Select2ManyWidget,
        'file': None,
        'dict': None,
        'list': None,
    }

    # Field configuration mapping for different field types
    # 不同字段类型的字段配置映射
    field_config = {
        'int': IntegerField,
        'float': FloatField,
        'bool': BooleanField,
        'str': StringField,
        'text': StringField,
        'json': MyJSONField,  # MyJSONField (if using text field, it will be re-encoded string on backend) / MyJSONField 如果使用文本字段，传到后端的是又编过一次码的字符串
        'date': DateField,
        'datetime': DateTimeField,
        'password': StringField,
        'enum': SelectField,
        'multiple': SelectMultipleField,
        'file': FileField,
        'dict': None,
        'list': MyLineSeparatedListField,
    }

    # Run a Kubernetes pod for a task
    # 为任务运行Kubernetes Pod
    def run_pod(
        self,
        task,
        k8s_client,
        run_id,
        namespace,
        pod_name,
        image,
        working_dir,
        command,
        args
    ):
        # Task environment variables from job template
        # 任务环境变量来自作业模板
        task_env = task.job_template.env + '\n' if task.job_template.env else ''

        # System environment variables
        # 系统环境变量
        task_env += 'KFJ_TASK_ID=' + str(task.id) + '\n'
        task_env += 'KFJ_TASK_NAME=' + str(task.name) + '\n'
        task_env += 'KFJ_TASK_NODE_SELECTOR=' + str(task.get_node_selector()) + '\n'
        task_env += 'KFJ_TASK_VOLUME_MOUNT=' + str(task.volume_mount) + '\n'
        task_env += 'KFJ_TASK_IMAGES=' + str(task.job_template.images) + '\n'
        task_env += 'KFJ_TASK_RESOURCE_CPU=' + str(task.resource_cpu) + '\n'
        task_env += 'KFJ_TASK_RESOURCE_MEMORY=' + str(task.resource_memory) + '\n'
        task_env += 'KFJ_TASK_RESOURCE_GPU=' + str(task.resource_gpu.replace('+', '')) + '\n'
        task_env += 'KFJ_TASK_PROJECT_NAME=' + str(task.pipeline.project.name) + '\n'
        task_env += 'KFJ_PIPELINE_ID=' + str(task.pipeline_id) + '\n'
        task_env += 'KFJ_RUN_ID=' + run_id + '\n'
        task_env += 'KFJ_CREATOR=' + str(task.pipeline.created_by.username) + '\n'
        task_env += 'KFJ_RUNNER=' + str(g.user.username) + '\n'
        task_env += 'KFJ_PIPELINE_NAME=' + str(task.pipeline.name) + '\n'
        task_env += 'KFJ_NAMESPACE=pipeline' + '\n'
        task_env += 'GPU_TYPE=%s' % os.environ.get('GPU_TYPE', 'NVIDIA') + '\n'

        # Template string renderer
        # 模板字符串渲染器
        def template_str(src_str):
            rtemplate = Environment(loader=BaseLoader, undefined=DebugUndefined).from_string(
                src_str
            )
            des_str = rtemplate.render(
                creator=task.pipeline.created_by.username,
                datetime=datetime,
                runner=(
                    g.user.username
                    if g and g.user and g.user.username
                    else task.pipeline.created_by.username
                ),
                uuid=uuid,
                pipeline_id=task.pipeline.id,
                pipeline_name=task.pipeline.name,
                cluster_name=task.pipeline.project.cluster['NAME'],
            )
            return des_str

        # Global environment variables from pipeline and platform
        # 来自管道和平台的全局环境变量
        pipeline_global_env = (
            template_str(task.pipeline.global_env.strip()) if task.pipeline.global_env else ''
        )  # Render first to avoid date inconsistencies / 优先渲染，不然里面如果有date就可能存在不一致的问题
        pipeline_global_env = [
            env.strip() for env in pipeline_global_env.split('\n') if '=' in env.strip()
        ]
        for env in pipeline_global_env:
            key, value = env[: env.index('=')], env[env.index('=') + 1 :]
            if key not in task_env:
                task_env += key + '=' + value + '\n'

        platform_global_envs = json.loads(
            template_str(json.dumps(conf.get('GLOBAL_ENV', {}), indent=4, ensure_ascii=False))
        )
        for global_env_key in platform_global_envs:
            if global_env_key not in task_env:
                task_env += global_env_key + '=' + platform_global_envs[global_env_key] + '\n'
        new_args = []
        if args:
            for arg in args:
                new_args.append(template_str(arg))

        volume_mount = task.volume_mount

        # Resource limits from job template or task
        # 来自作业模板或任务的资源限制
        resource_cpu = (
            task.job_template.get_env('TASK_RESOURCE_CPU')
            if task.job_template.get_env('TASK_RESOURCE_CPU')
            else task.resource_cpu
        )
        resource_gpu = (
            task.job_template.get_env('TASK_RESOURCE_GPU')
            if task.job_template.get_env('TASK_RESOURCE_GPU')
            else task.resource_gpu
        )
        resource_memory = (
            task.job_template.get_env('TASK_RESOURCE_MEMORY')
            if task.job_template.get_env('TASK_RESOURCE_MEMORY')
            else task.resource_memory
        )
        # Host aliases from configuration and job template
        # 来自配置和作业模板的主机别名
        hostAliases = conf.get('HOSTALIASES')
        if task.job_template.hostAliases:
            hostAliases += '\n' + task.job_template.hostAliases
        # Create debug pod in Kubernetes
        # 在Kubernetes中创建调试Pod
        k8s_client.create_debug_pod(
            namespace,
            name=pod_name,
            labels={
                'pipeline': task.pipeline.name,
                'task': task.name,
                'user': g.user.username,
                'run-id': run_id,
                'pod-type': 'task',
            },
            command=command,
            args=new_args,
            volume_mount=volume_mount,
            working_dir=working_dir,
            node_selector=task.get_node_selector(),
            resource_memory=resource_memory,
            resource_cpu=resource_cpu,
            resource_gpu=resource_gpu,
            image_pull_policy=conf.get('IMAGE_PULL_POLICY', 'Always'),
            image_pull_secrets=[task.job_template.images.repository.hubsecret],
            image=image,
            hostAliases=hostAliases,
            env=task_env,
            privileged=task.job_template.privileged,
            accounts=task.job_template.accounts,
            username=task.pipeline.created_by.username,
        )

    # Debug task API endpoint
    # 调试任务API端点
    # @event_logger.log_this
    @expose('/debug/<task_id>', methods=['GET', 'POST'])
    def debug(self, task_id):
        # Query task by ID
        # 按ID查询任务
        task = db.session.query(Task).filter_by(id=task_id).first()
        # Check permissions for custom job templates
        # 检查自定义作业模板的权限
        if task.job_template.name != conf.get('CUSTOMIZE_JOB'):
            if not g.user.is_admin and task.job_template.created_by.username != g.user.username:
                message = '仅管理员或当前任务模板创建者，可启动debug模式'
                flash(message, 'warning')
                return self.response(400, **{'status': 1, 'result': {}, 'message': message})

                # return redirect('/pipeline_modelview/web/%s' % str(task.pipeline.id))

        # Initialize Kubernetes client and namespace
        # 初始化Kubernetes客户端和命名空间
        k8s_client = K8s(task.pipeline.project.cluster.get('KUBECONFIG', ''))
        namespace = conf.get('PIPELINE_NAMESPACE')
        # Construct pod name
        # 构造Pod名称
        pod_name = (
            'debug-' + task.pipeline.name.replace('_', '-') + '-' + task.name.replace('_', '-')
        )
        pod_name = pod_name.lower()[:60].strip('-')
        # Get existing pod
        # 获取现有Pod
        pod = k8s_client.get_pods(namespace=namespace, pod_name=pod_name)
        # print(pod)
        if pod:
            pod = pod[0]
        # If historical pod exists and is in 'Succeeded' status, delete it
        # 有历史非运行态，直接删除
        # if pod and (pod['status']!='Running' and pod['status']!='Pending'):
        if pod and pod['status'] == 'Succeeded':
            k8s_client.delete_pods(namespace=namespace, pod_name=pod_name)
            time.sleep(2)
            pod = None
        # If no historical pod or not running, create a new one
        # 没有历史或者没有运行态，直接创建
        if not pod or pod['status'] != 'Running':
            run_id = 'debug-' + str(uuid.uuid4().hex)
            command = [
                'sh',
                '-c',
                (
                    'sleep 7200 && hour=`date +%H` && while [ $hour -ge 06 ];do sleep'
                    ' 3600;hour=`date +%H`;done'
                ),
            ]
            self.run_pod(
                task=task,
                k8s_client=k8s_client,
                run_id=run_id,
                namespace=namespace,
                pod_name=pod_name,
                image=(
                    json.loads(task.args)['images']
                    if task.job_template.name == conf.get('CUSTOMIZE_JOB')
                    else task.job_template.images.name
                ),
                working_dir='/mnt',
                command=command,
                args=None,
            )

        # Wait for pod to start running
        # 等待Pod启动运行
        try_num = 30
        message = '启动时间过长，一分钟后刷新此页面'
        while try_num > 0:
            pod = k8s_client.get_pods(namespace=namespace, pod_name=pod_name)
            # print(pod)
            if pod:
                pod = pod[0]
            # If historical pod exists and is not running, update message
            # 有历史非运行态，直接删除
            if pod:
                if pod['status'] == 'Running':
                    break
                else:
                    try:
                        message = (
                            '启动时间过长，一分钟后刷新此页面'
                            + ', status:'
                            + pod['status']
                            + ', message:'
                            + json.dumps(
                                pod['status_more']['conditions'], indent=4, ensure_ascii=False
                            )
                        )
                    except Exception as e:
                        print(e)
            try_num = try_num - 1
            time.sleep(2)
        # If pod fails to start within timeout, flash warning
        # 如果Pod在超时时间内未能启动，则闪烁警告
        if try_num == 0:
            flash(message, 'warning')
            return self.response(400, **{'status': 1, 'result': {}, 'message': message})
            # return redirect('/pipeline_modelview/web/%s'%str(task.pipeline.id))

        # Redirect to pod log view
        # 重定向到Pod日志视图
        return redirect(
            '/myapp/web/debug/%s/%s/%s/%s'
            % (task.pipeline.project.cluster['NAME'], namespace, pod_name, pod_name)
        )

    # Run task API endpoint
    # 运行任务API端点
    @expose('/run/<task_id>', methods=['GET', 'POST'])
    # @pysnooper.snoop(watch_explode=('ops_args',))
    def run_task(self, task_id):
        # Query task by ID
        # 按ID查询任务
        task = db.session.query(Task).filter_by(id=task_id).first()

        # Initialize Kubernetes client and namespace
        # 初始化Kubernetes客户端和命名空间
        k8s_client = K8s(task.pipeline.project.cluster.get('KUBECONFIG', ''))
        namespace = conf.get('PIPELINE_NAMESPACE')
        # Construct pod name
        # 构造Pod名称
        pod_name = (
            'run-' + task.pipeline.name.replace('_', '-') + '-' + task.name.replace('_', '-')
        )
        pod_name = pod_name.lower()[:60].strip('-')
        # Get existing pod
        # 获取现有Pod
        pod = k8s_client.get_pods(namespace=namespace, pod_name=pod_name)
        # print(pod)
        if pod:
            pod = pod[0]
        # If historical pod exists, delete it and associated workflow
        # 有历史，直接删除
        if pod:
            run_id = pod['labels'].get('run-id', '')
            if run_id:
                k8s_client.delete_workflow(
                    all_crd_info=conf.get('CRD_INFO', {}), namespace=namespace, run_id=run_id
                )

            k8s_client.delete_pods(namespace=namespace, pod_name=pod_name)
            delete_time = datetime.datetime.now()
            # Wait for pod deletion
            # 等待Pod删除
            while pod:
                time.sleep(2)
                pod = k8s_client.get_pods(namespace=namespace, pod_name=pod_name)
                check_date = datetime.datetime.now()
                if (check_date - delete_time).seconds > 60:
                    message = '超时，请稍后重试'
                    flash(message, category='warning')
                    return self.response(400, **{'status': 1, 'result': {}, 'message': message})
                    # return redirect('/pipeline_modelview/web/%s' % str(task.pipeline.id))

        # If no historical pod or not running, create a new one
        # 没有历史或者没有运行态，直接创建
        if not pod:
            command = None
            if task.job_template.entrypoint:
                command = task.job_template.entrypoint
            if task.command:
                command = task.command
            if command:
                command = command.split(' ')
                command = [com for com in command if com]
            ops_args = []

            task_args = json.loads(task.args) if task.args else {}

            # Process task arguments
            # 处理任务参数
            for task_attr_name in task_args:
                # Add parameter name
                # 添加参数名
                if isinstance(task_args[task_attr_name], bool):
                    if task_args[task_attr_name]:
                        ops_args.append('%s' % str(task_attr_name))
                # Add parameter value for dict or list types
                # 添加字典或列表类型的参数值
                elif isinstance(task_args[task_attr_name], dict) or isinstance(
                    task_args[task_attr_name], list
                ):
                    ops_args.append('%s' % str(task_attr_name))
                    ops_args.append(
                        '%s' % json.dumps(task_args[task_attr_name], ensure_ascii=False)
                    )
                # If parameter value is empty, skip
                # 如果参数值为空，则跳过
                elif not task_args[task_attr_name]:  # If parameter value is empty, do not add / 如果参数值为空，则都不添加
                    pass
                else:
                    ops_args.append('%s' % str(task_attr_name))
                    ops_args.append(
                        '%s' % str(task_args[task_attr_name])
                    )  # Different handling for different parameter types (e.g., bool has only parameter, no value) / 这里应该对不同类型的参数名称做不同的参数处理，比如bool型，只有参数，没有值

            # print(ops_args)
            run_id = 'run-' + str(task.pipeline.id) + '-' + str(task.id)

            self.run_pod(
                task=task,
                k8s_client=k8s_client,
                run_id=run_id,
                namespace=namespace,
                pod_name=pod_name,
                image=(
                    json.loads(task.args)['images']
                    if task.job_template.name == conf.get('CUSTOMIZE_JOB')
                    else task.job_template.images.name
                ),
                working_dir=(
                    json.loads(task.args)['workdir']
                    if task.job_template.name == conf.get('CUSTOMIZE_JOB')
                    else task.job_template.workdir
                ),
                command=(
                    ['bash', '-c', json.loads(task.args)['command']]
                    if task.job_template.name == conf.get('CUSTOMIZE_JOB')
                    else command
                ),
                args=None if task.job_template.name == conf.get('CUSTOMIZE_JOB') else ops_args,
            )

        # Wait for pod to start
        # 等待Pod启动
        try_num = 5
        while try_num > 0:
            pod = k8s_client.get_pods(namespace=namespace, pod_name=pod_name)
            # print(pod)
            if pod:
                break
            try_num = try_num - 1
            time.sleep(2)
        # If pod fails to start within timeout, flash warning
        # 如果Pod在超时时间内未能启动，则闪烁警告
        if try_num == 0:
            message = '启动时间过长，一分钟后重试'
            flash(message, 'warning')
            return self.response(400, **{'status': 1, 'result': {}, 'message': message})
            # return redirect('/pipeline_modelview/web/%s' % str(task.pipeline.id))

        # Redirect to pod log view
        # 重定向到Pod日志视图
        return redirect(
            '/myapp/web/log/%s/%s/%s'
            % (task.pipeline.project.cluster['NAME'], namespace, pod_name)
        )

    # Clear task API endpoint (deletes running and debug pods)
    # 清理任务API端点（删除运行中和调试中的Pod）
    @expose('/clear/<task_id>', methods=['GET', 'POST'])
    def clear_task(self, task_id):
        # Query task by ID
        # 按ID查询任务
        task = db.session.query(Task).filter_by(id=task_id).first()

        # Initialize Kubernetes client and namespace
        # 初始化Kubernetes客户端和命名空间
        k8s_client = K8s(task.pipeline.project.cluster.get('KUBECONFIG', ''))
        namespace = conf.get('PIPELINE_NAMESPACE')

        # Delete running container
        # 删除运行时容器
        pod_name = (
            'run-' + task.pipeline.name.replace('_', '-') + '-' + task.name.replace('_', '-')
        )
        pod_name = pod_name.lower()[:60].strip('-')
        pod = k8s_client.get_pods(namespace=namespace, pod_name=pod_name)
        # print(pod)
        if pod:
            pod = pod[0]
        # If historical pod exists, delete it and associated workflow/pods
        # 有历史，直接删除
        if pod:
            k8s_client.delete_pods(namespace=namespace, pod_name=pod['name'])
            run_id = pod['labels'].get('run-id', '')
            if run_id:
                k8s_client.delete_workflow(
                    all_crd_info=conf.get('CRD_INFO', {}), namespace=namespace, run_id=run_id
                )
                k8s_client.delete_pods(namespace=namespace, labels={'run-id': run_id})
                time.sleep(2)

        # Delete debug container
        # 删除debug容器
        pod_name = (
            'debug-' + task.pipeline.name.replace('_', '-') + '-' + task.name.replace('_', '-')
        )
        pod_name = pod_name.lower()[:60].strip('-')
        pod = k8s_client.get_pods(namespace=namespace, pod_name=pod_name)
        # print(pod)
        if pod:
            pod = pod[0]
        # If historical pod exists, delete it and associated workflow/pods
        # 有历史，直接删除
        if pod:
            k8s_client.delete_pods(namespace=namespace, pod_name=pod['name'])
            run_id = pod['labels'].get('run-id', '')
            if run_id:
                k8s_client.delete_workflow(
                    all_crd_info=conf.get('CRD_INFO', {}), namespace=namespace, run_id=run_id
                )
                k8s_client.delete_pods(namespace=namespace, labels={'run-id': run_id})
                time.sleep(2)
        flash('删除完成', category='success')
        # self.update_redirect()
        return redirect('/pipeline_modelview/web/%s' % str(task.pipeline.id))

    # Log task API endpoint
    # 任务日志API端点
    @expose('/log/<task_id>', methods=['GET', 'POST'])
    def log_task(self, task_id):
        # Query task by ID
        # 按ID查询任务
        task = db.session.query(Task).filter_by(id=task_id).first()

        # Initialize Kubernetes client and namespace
        # 初始化Kubernetes客户端和命名空间
        k8s = K8s(task.pipeline.project.cluster.get('KUBECONFIG', ''))
        namespace = conf.get('PIPELINE_NAMESPACE')
        # Construct running pod name
        # 构造运行中的Pod名称
        running_pod_name = (
            'run-' + task.pipeline.name.replace('_', '-') + '-' + task.name.replace('_', '-')
        )
        pod_name = running_pod_name.lower()[:60].strip('-')
        # Get running pod
        # 获取运行中的Pod
        pod = k8s.get_pods(namespace=namespace, pod_name=pod_name)
        if pod:
            pod = pod[0]
            # Redirect to pod log view
            # 重定向到Pod日志视图
            return redirect(
                '/myapp/web/log/%s/%s/%s'
                % (task.pipeline.project.cluster['NAME'], namespace, pod_name)
            )

        # If no running pod found, flash warning and redirect to pipeline web view
        # 如果未找到运行中的Pod，则闪烁警告并重定向到管道网页视图
        flash('未检测到当前task正在运行的容器', category='success')
        return redirect('/pipeline_modelview/web/%s' % str(task.pipeline.id))


# Task ModelView class, inheriting from base, CompactCRUDMixin, and MyappModelView
# Task ModelView类，继承自基类、CompactCRUDMixin和MyappModelView
class Task_ModelView(Task_ModelView_Base, CompactCRUDMixin, MyappModelView):
    # Data model interface for Task
    # Task的数据模型接口
    datamodel = SQLAInterface(Task)


# Add the Task_ModelView to the appbuilder without a menu entry
# 将Task_ModelView添加到appbuilder，不带菜单项
appbuilder.add_view_no_menu(Task_ModelView)


# Task ModelView API class, inheriting from base and MyappModelRestApi
# Task ModelView API类，继承自基类和MyappModelRestApi
class Task_ModelView_Api(Task_ModelView_Base, MyappModelRestApi):
    # Data model interface for Task
    # Task的数据模型接口
    datamodel = SQLAInterface(Task)
    # Route base for the API endpoint
    # API端点的路由基础
    route_base = '/task_modelview/api'
    # Commented out: alternative list columns
    # 已注释：备用列表列
    # list_columns = ['name','label','job_template_url','volume_mount','debug']
    # Columns to display in the list view
    # 列表视图中显示的列
    list_columns = [
        'name',
        'label',
        'pipeline',
        'job_template',
        'volume_mount',
        'node_selector',
        'command',
        'overwrite_entrypoint',
        'working_dir',
        'args',
        'resource_memory',
        'resource_cpu',
        'resource_gpu',
        'timeout',
        'retry',
        'created_by',
        'changed_by',
        'created_on',
        'changed_on',
        'monitoring',
        'expand',
    ]
    # Columns to display in the add form
    # 添加表单中显示的列
    add_columns = [
        'name',
        'label',
        'job_template',
        'pipeline',
        'working_dir',
        'command',
        'args',
        'volume_mount',
        'node_selector',
        'resource_memory',
        'resource_cpu',
        'resource_gpu',
        'timeout',
        'retry',
        'skip',
        'expand',
    ]
    # Columns to display in the edit form (same as add_columns)
    # 编辑表单中显示的列（与add_columns相同）
    edit_columns = add_columns
    # Columns to display in the show view
    # 详细视图中显示的列
    show_columns = [
        'name',
        'label',
        'pipeline',
        'job_template',
        'volume_mount',
        'node_selector',
        'command',
        'overwrite_entrypoint',
        'working_dir',
        'args',
        'resource_memory',
        'resource_cpu',
        'resource_gpu',
        'timeout',
        'retry',
        'skip',
        'created_by',
        'changed_by',
        'created_on',
        'changed_on',
        'monitoring',
        'expand',
    ]


# Add the Task_ModelView_Api to the appbuilder
# 将Task_ModelView_Api添加到appbuilder
appbuilder.add_api(Task_ModelView_Api)```
