# Import standard Python libraries.
import copy
import datetime
import json
import logging
import os
import random

# Import necessary components from Flask and Flask-AppBuilder.
from flask import Markup
from flask_appbuilder import Model
from flask_appbuilder.models.decorators import renders
# Import third-party libraries.
import numpy
import pydash
# Import various components from SQLAlchemy to define the database schema.
from sqlalchemy import (
    Boolean,
    Column,
    DateTime,
    Enum,
    ForeignKey,
    Integer,
    String,
    Text,
)
# Import the relationship function from SQLAlchemy's ORM to define relationships between models.
from sqlalchemy.orm import relationship

# Import application-specific configurations and helper modules.
from myapp.config import CRD_INFO, GRAFANA_TASK_PATH, MODEL_URLS
from myapp.models.base import MyappModelBase
from myapp.models.helpers import AuditMixinNullable
from myapp.third.k8s import py_k8s
from myapp.third.k8s.py_karmada import get_k8s_client_by_region
from myapp.third.modelarts.training_job import ModelArtsJob
from myapp.utils.env import is_private
from myapp.utils.exception import log_exception
from myapp.utils.storage import cube_bucket


# Get the metadata object from the base Model class. Metadata stores information about the database schema.
metadata = Model.metadata


# Define the Repository class, which maps to the 'repository' table.
# This model represents a Docker image repository.
class Repository(Model, AuditMixinNullable, MyappModelBase):
    # Specifies the name of the database table for this model.
    __tablename__ = 'repository'
    # Defines the primary key column for the table.
    id = Column(Integer, primary_key=True)
    # Defines a column for the unique name of the repository.
    name = Column(String(200), unique=True, nullable=False)
    # Defines a column for the server address (URL) of the repository.
    server = Column(String(200), nullable=False)
    # Defines a column for the username used to access the repository.
    user = Column(String(100), nullable=False)
    # Defines a column for the password or access token for the repository.
    password = Column(String(100), nullable=False)
    # Defines a column for the name of the Kubernetes secret used for pulling images from this repository.
    hubsecret = Column(String(100))
    # Defines a column for the deployment region.
    region = Column(
        String(100), nullable=False, default='default', server_default='default', comment='地区'
    )

    # Defines the official string representation of a Repository instance.
    def __repr__(self):
        return self.name

    # Defines custom labels for columns for display in the UI.
    label_columns_spec = {
        'server': '域名',
        'user': '用户名',
        'hubsecret': 'k8s hubsecret',
    }
    # Inherits and updates the base label columns with specific ones for this model.
    label_columns = MyappModelBase.label_columns.copy()
    label_columns.update(label_columns_spec)


# Define the Images class, which maps to the 'images' table.
# This model represents a specific Docker image.
class Images(Model, AuditMixinNullable, MyappModelBase):
    # Specifies the name of the database table for this model.
    __tablename__ = 'images'
    # Defines the primary key column for the table.
    id = Column(Integer, primary_key=True)
    # Defines a foreign key to the 'project' table to categorize the image.
    project_id = Column(Integer, ForeignKey('project.id'))
    # Defines a many-to-one relationship with the Project model.
    project = relationship('Project', foreign_keys=[project_id])

    # Defines a column for the name of the Docker image.
    name = Column(String(500), nullable=False)
    # Defines a column for a description of the image.
    describe = Column(String(1000), nullable=False)
    # Defines a foreign key to the 'repository' table.
    repository_id = Column(Integer, ForeignKey('repository.id'))
    # Defines a many-to-one relationship with the Repository model.
    repository = relationship('Repository', foreign_keys=[repository_id])
    # Defines a column for the default entrypoint of the image.
    entrypoint = Column(String(200))
    # Defines a column to store the Dockerfile content.
    dockerfile = Column(Text)
    # Defines a column for the Git repository path where the source code is located.
    gitpath = Column(String(200))
    # Defines a soft-delete flag.
    is_deleted = Column(Integer, nullable=False, default=0, comment='是否删除')
    # Defines a column for the image version or tag.
    image_version = Column(String(200), nullable=False, comment='镜像版本')
    # Defines a flag to indicate if this is a preset/system image.
    is_preset = Column(Integer, nullable=False, default=0, comment='是否预置镜像')
    # Defines a column to categorize the image type.
    type = Column(
        String(100), nullable=False, default='default', server_default='default', comment='类别'
    )
    # Defines a column for the deployment region.
    region = Column(
        String(100), nullable=False, default='default', server_default='default', comment='地区'
    )

    # Defines custom labels for UI display.
    label_columns_spec = {
        'project': '功能分类',
    }
    label_columns = MyappModelBase.label_columns.copy()
    label_columns.update(label_columns_spec)

    # This property renders the image name as a link to its Git path if available.
    @property
    def images_url(self):
        if self.gitpath:
            return Markup(f'<a href="{self.gitpath}">{self.name}</a>')
        return self.name

    # Defines the official string representation of an Images instance.
    def __repr__(self):
        return self.name

    # This property returns the full image name including the tag (e.g., 'ubuntu:latest').
    @property
    def repository_with_tag(self):
        name = self.name.strip()
        image_version = 'latest'
        if self.image_version:
            tag = self.image_version.strip()
            if len(tag) > 0:
                image_version = tag

        return f'{name}:{image_version}'

    # This method renders the image version as a hyperlink.
    def get_image_version(self):
        if self.image_version:
            return Markup(f'<a href="{self.image_version}">{self.image_version}</a>')
        return self.image_version


# Define the Job_Template class, which maps to the 'job_template' table.
# This model represents a template for creating and running jobs.
class Job_Template(Model, AuditMixinNullable, MyappModelBase):
    # Specifies the name of the database table for this model.
    __tablename__ = 'job_template'
    # Defines the primary key column for the table.
    id = Column(Integer, primary_key=True)
    # Defines a foreign key to the 'project' table for categorization.
    project_id = Column(Integer, ForeignKey('project.id'))
    # Defines a many-to-one relationship with the Project model.
    project = relationship('Project', foreign_keys=[project_id])
    # Defines a column for the unique name of the template.
    name = Column(String(500), nullable=False, unique=True)
    # Defines a column for the version status of the template.
    version = Column(Enum('Release', 'Alpha'), nullable=False, default='Release')
    # Defines a column for the type of environment the template is for.
    type = Column(
        Enum('common', 'private', 'hw-cloud'), nullable=False, default='common'
    )  # common: 公共; private: 私有云; hw-cloud: 公有云
    # Defines a foreign key to the 'images' table, specifying the base image for the job.
    images_id = Column(Integer, ForeignKey('images.id'))
    # Defines a many-to-one relationship with the Images model.
    images = relationship(Images, foreign_keys=[images_id])
    # Defines a column for host aliases (/etc/hosts entries) for the job's pod.
    hostAliases = Column(Text)  # host文件
    # Defines a column for a description of the template.
    describe = Column(String(500), nullable=False)
    # Defines a column for the working directory inside the container.
    workdir = Column(String(400))
    # Defines a column for the entrypoint or command to run.
    entrypoint = Column(String(1024))
    # Defines a column for default arguments to the command.
    args = Column(Text)
    # Defines a column for default environment variables.
    env = Column(Text)  # 默认自带的环境变量
    # Defines a column for volume mounts that are required by the template.
    volume_mount = Column(String(400), default='')  # 强制必须挂载
    # Defines a boolean flag for running the container in privileged mode.
    privileged = Column(Boolean, default=False)  # 是否启用特权模式
    # Defines a column to specify a Kubernetes service account to use.
    accounts = Column(String(100))  # 使用k8s账户
    # Defines a column for a demonstration or example of how to use the template.
    demo = Column(Text)
    # Defines a column for extra configuration, likely as a JSON string.
    expand = Column(Text(65536), default='{}')
    # Defines a column to specify the operation type (e.g., running a container or a serverless function).
    op_type = Column(Enum('container', 'function'), default='container')
    # Defines a column to store the code for a serverless function.
    function_str = Column(Text(65536), default='')

    # Defines custom labels for UI display.
    label_columns_spec = {
        'project': '功能分类',
    }
    label_columns = MyappModelBase.label_columns.copy()
    label_columns.update(label_columns_spec)

    # Defines the official string representation of a Job_Template instance.
    def __repr__(self):
        return self.name  # +"(%s)"%self.version

    # Converts the model instance to a dictionary.
    def as_dict(self):
        return {c.name: str(getattr(self, c.name)) for c in self.__table__.columns}

    # Renders the 'args' field as a preformatted code block in the UI.
    @renders('args')
    def args_html(self):
        return Markup('<pre><code>' + self.args + '</code></pre>')

    # Renders the 'demo' field as a preformatted code block.
    @renders('demo')
    def demo_html(self):
        return Markup('<pre><code>' + self.demo + '</code></pre>')

    # Renders the 'expand' field as a preformatted code block.
    @renders('expand')
    def expand_html(self):
        return Markup('<pre><code>' + self.expand + '</code></pre>')

    # Renders the 'name' field with a tooltip showing the description.
    @renders('name')
    def name_title(self):
        return Markup(
            '<a data-toggle="tooltip" rel="tooltip" title'
            f' data-original-title="{self.describe}">{self.name}</a>'
        )

    # Renders the associated image name as a link to the image's detail page.
    @property
    def images_url(self):
        return Markup(
            '<a target=_blank'
            f' href="/images_modelview/show/{self.images.id}">{self.images.name}</a>'
        )

    # A helper method to parse the 'env' field and retrieve the value of a specific environment variable.
    # import pysnooper
    # @pysnooper.snoop()
    def get_env(self, name):
        if self.env and name in self.env:
            envs = self.env.split('\n')
            for env in envs:
                if name in env:
                    return env[env.index('=') + 1 :].strip()
        else:
            return None

    # Creates a new, unsaved copy of the current job template instance.
    def clone(self):
        return Job_Template(
            name=self.name,
            version=self.version,
            project_id=self.project_id,
            images_id=self.images_id,
            describe=self.describe,
            args=self.args,
            demo=self.demo,
            expand=self.expand,
        )


# Define the Pipeline class, which maps to the 'pipeline' table.
# This model represents a workflow or a Directed Acyclic Graph (DAG) of tasks.
class Pipeline(Model, AuditMixinNullable, MyappModelBase):
    # Specifies the name of the database table for this model.
    __tablename__ = 'pipeline'
    # Defines the primary key column for the table.
    id = Column(Integer, primary_key=True)
    # Defines a column for the unique internal name of the pipeline.
    name = Column(String(100), nullable=False, unique=True)
    # Defines a column for a user-facing display name.
    display_name = Column(String(100), nullable=False, default='')
    # Defines a column for a description of the pipeline.
    describe = Column(String(200), nullable=False)
    # Defines a foreign key to the 'project' table for categorization.
    project_id = Column(Integer, ForeignKey('project.id'), nullable=False)  # 定义外键
    # Defines a many-to-one relationship with the Project model.
    project = relationship('Project', foreign_keys=[project_id])
    # Defines a column to store the DAG structure as a JSON string.
    dag_json = Column(Text, nullable=False, default='{}')
    # Defines a column for the Kubernetes namespace where the pipeline will run.
    namespace = Column(String(100), default='pipeline')
    # Defines a column for global environment variables for the pipeline.
    global_env = Column(String(500), default='')
    # Defines a column for the schedule type (run once or on a cron schedule).
    schedule_type = Column(Enum('once', 'crontab'), nullable=False, default='once')
    # Defines a column for the cron expression if 'schedule_type' is 'crontab'.
    cron_time = Column(String(100))  # 调度周期
    # Defines a column for the start time for backfilling cron jobs.
    cronjob_start_time = Column(String(300), default='')
    # Defines a column to store the compiled pipeline definition file (e.g., Argo YAML).
    pipeline_file = Column(Text(655360), default='')
    # Defines a column for the ID of the pipeline in an external system like Kubeflow Pipelines or Argo.
    pipeline_argo_id = Column(String(100))
    # Defines a column for the version ID of the uploaded pipeline.
    version_id = Column(String(100))
    # Defines a column for the ID of the last or current run.
    run_id = Column(String(100))
    # Defines a column for Kubernetes node selectors to control where tasks are scheduled.
    node_selector = Column(String(100), default='cpu=true,train=true')
    # Defines a column for the image pull policy for containers in the pipeline.
    image_pull_policy = Column(Enum('Always', 'IfNotPresent'), nullable=False, default='Always')
    # Defines a column for the maximum number of parallel tasks within a single pipeline run.
    parallelism = Column(Integer, nullable=False, default=1)  # 同一个pipeline，最大并行的task数目
    # Defines a column for which statuses should trigger an alert.
    alert_status = Column(
        String(100), default='Pending,Running,Succeeded,Failed,Terminated'
    )  # 哪些状态会报警Pending,Running,Succeeded,Failed,Unknown,Waiting,Terminated
    # Defines a column for the users to notify on alert.
    alert_user = Column(String(300), default='')
    # Defines a column for extra UI-related data, like node positions.
    expand = Column(Text(65536), default='[]')
    # Defines a boolean for Airflow-like dependency on past successful runs.
    depends_on_past = Column(Boolean, default=False)
    # Defines a column for the maximum number of concurrent pipeline runs.
    max_active_runs = Column(Integer, nullable=False, default=3)  # 最大同时运行的pipeline实例
    # Defines a column for the number of completed runs to retain.
    expired_limit = Column(
        Integer, nullable=False, default=1
    )  # 过期保留个数，此数值有效时，会优先使用，覆盖max_active_runs的功能
    # Defines a column for pipeline-level parameters.
    parameter = Column(Text(65536), default='{}')
    # Defines a column to link to a preset template.
    template_id = Column(Integer, default='0')  # -1 预置模板，0 正常，>0 指向预置模板
    # Defines a column for the deployment region.
    region = Column(
        String(100), nullable=False, default='default', server_default='default', comment='地区'
    )

    # Defines the official string representation of a Pipeline instance.
    def __repr__(self):
        return self.name

    # Renders the pipeline's description as a link to its web UI.
    @property
    def pipeline_url(self):
        pipeline_url = '/pipeline_modelview/web/' + str(self.id)
        return Markup(f'<a target=_blank href="{pipeline_url}">{self.describe}</a>')

    # Renders a "run" link to trigger the pipeline manually.
    @property
    def run_pipeline(self):
        pipeline_run_url = '/pipeline_modelview/run_pipeline/' + str(self.id)
        return Markup(f'<a target=_blank href="{pipeline_run_url}">run</a>')

    # Renders a "log" link for the last run of the pipeline.
    @property
    def log(self):
        if self.run_id:
            pipeline_url = '/pipeline_modelview/web/log/%s' % self.id
            return Markup(f'<a target=_blank href="{pipeline_url}">日志</a>')
        else:
            return Markup('日志')

    # Renders a link to view the pods associated with the pipeline.
    @property
    def pod(self):
        url = '/pipeline_modelview/web/pod/%s' % self.id
        return Markup(f'<a target=_blank href="{url}">pod</a>')

    # Renders the 'dag_json' field as a preformatted code block.
    @renders('dag_json')
    def dag_json_html(self):
        dag_json = self.dag_json or '{}'
        return Markup('<pre><code>' + dag_json + '</code></pre>')

    # Renders the 'expand' field as a preformatted code block.
    @renders('expand')
    def expand_html(self):
        return Markup('<pre><code>' + self.expand + '</code></pre>')

    # Renders the 'parameter' field as a preformatted code block.
    @renders('parameter')
    def parameter_html(self):
        return Markup('<pre><code>' + self.parameter + '</code></pre>')

    # Renders the 'pipeline_file' field as a preformatted code block.
    @renders('pipeline_file')
    def pipeline_file_html(self):
        pipeline_file = self.pipeline_file or ''
        return Markup('<pre><code>' + pipeline_file + '</code></pre>')

    # This method retrieves all Task objects associated with this pipeline.
    # 获取pipeline中的所有task
    def get_tasks(self, dbsession=None):
        if not dbsession:
            from myapp.app import db

            dbsession = db.session
        return dbsession.query(Task).filter_by(pipeline_id=self.id).all()

    # This method deletes tasks from the database that are no longer part of the pipeline's definition.
    # @pysnooper.snoop()
    def delete_old_task(self, dbsession=None):
        if not dbsession:
            from myapp.app import db

            dbsession = db.session
        try:
            expand_tasks = json.loads(self.expand) if self.expand else []
            tasks = dbsession.query(Task).filter_by(pipeline_id=self.id).all()
            tasks_id = [
                int(expand_task['id'])
                for expand_task in expand_tasks
                if expand_task.get('id', '').isdecimal()
            ]
            for task in tasks:
                if task.id not in tasks_id:
                    dbsession.delete(task)
                    dbsession.commit()
        except Exception as e:
            print(e)

    # This method queries the Kubernetes API to get the current workflow CRDs for this pipeline.
    # 获取当期运行时workflow的数量
    def get_workflow(self):
        back_crds = []
        try:
            k8s_client = py_k8s.K8s(self.project.cluster.get('KUBECONFIG', ''))
            crd_info = CRD_INFO.get('workflow', {})
            logging.info('CRD_INFO')
            logging.info(crd_info)
            if crd_info:
                crds = k8s_client.get_crd(
                    group=crd_info['group'],
                    version=crd_info['version'],
                    plural=crd_info['plural'],
                    namespace=self.namespace,
                    label_selector='pipeline-id=%s' % str(self.id),
                )
                logging.info('CRDS in get_workflow()')
                logging.info(crds)
                for crd in crds:
                    if crd.get('labels', '{}'):
                        labels = json.loads(crd['labels'])
                        if labels.get('pipeline-id', '') == str(self.id):
                            back_crds.append(crd)
                            logging.info('Append crd has pipeline-id:' + str(self.id))
                            logging.info(crd)
            return back_crds
        except Exception as e:
            print(e)
        return back_crds

    # Renders a link to the list of running instances (workflows) for this pipeline.
    @property
    def run_instance(self):
        url = r'/workflow_modelview/list/?_flt_2_labels="pipeline-id"%3A+"' + '%s"' % self.id
        # print(url)
        return Markup(f"<a href='{url}'>{self.schedule_type}</a>")  # k8s有长度限制

    # This method sanitizes the 'dag_json' to ensure it's consistent with the actual tasks in the pipeline.
    # 这个dag可能不对，所以要根据真实task纠正一下
    def fix_dag_json(self, dbsession=None):
        if not dbsession:
            from myapp.app import db

            dbsession = db.session
        if not self.dag_json:
            return '{}'
        dag = json.loads(self.dag_json)
        # 如果添加了task，但是没有保存pipeline，就自动创建dag
        # If a task has been added but the pipeline not saved, auto-create the DAG.
        if not dag:
            tasks = self.get_tasks(dbsession)
            if tasks:
                dag = {}
                for task in tasks:
                    dag[task.name] = {}
                dag_json = json.dumps(dag, indent=4, ensure_ascii=False)
                return dag_json
            else:
                return '{}'

        # 清理dag中不存在的task
        # Clean up tasks in the DAG that no longer exist.
        if dag:
            tasks = self.get_tasks(dbsession)
            all_task_names = [task.name for task in tasks]
            # 先把没有加入的task加入到dag
            # First, add any new tasks to the DAG.
            for task in tasks:
                if task.name not in dag:
                    dag[task.name] = {}

            # 把已经删除了的task移除dag
            # Remove deleted tasks from the DAG.
            dag_back = copy.deepcopy(dag)
            for dag_task_name in dag_back:
                if dag_task_name not in all_task_names:
                    del dag[dag_task_name]

            # 将已经删除的task从其他task的上游依赖中删除
            # Remove deleted tasks from the upstream dependencies of other tasks.
            for dag_task_name in dag:
                upstream_tasks = (
                    dag[dag_task_name]['upstream'] if 'upstream' in dag[dag_task_name] else []
                )
                new_upstream_tasks = []
                for upstream_task in upstream_tasks:
                    if upstream_task in all_task_names:
                        new_upstream_tasks.append(upstream_task)

                dag[dag_task_name]['upstream'] = new_upstream_tasks
            
            dag_json = json.dumps(dag, indent=4, ensure_ascii=False)

            return dag_json

    # This method adjusts the positions of nodes in the UI to center the graph.
    # 自动聚焦到视图中央
    # @pysnooper.snoop()
    def fix_position(self):
        expand_tasks = json.loads(self.expand) if self.expand else []
        if not expand_tasks:
            expand_tasks = []
        x = []
        y = []
        for item in expand_tasks:
            if 'position' in item:
                if item['position'].get('x', 0):
                    x.append(int(item['position'].get('x', 0)))
                    y.append(int(item['position'].get('y', 0)))
        x_dist = 400 - numpy.mean(x) if x else 0
        y_dist = 300 - numpy.mean(y) if y else 0
        for item in expand_tasks:
            if 'position' in item:
                if item['position'].get('x', 0):
                    item['position']['x'] = int(item['position']['x']) + x_dist
                    item['position']['y'] = int(item['position']['y']) + y_dist

        return expand_tasks

    # This method generates the 'expand' JSON field required by the frontend for rendering the DAG.
    # 生成前端锁需要的扩展字段
    def fix_expand(self, dbsession=None):
        if not dbsession:
            from myapp.app import db

            dbsession = db.session
        # 补充expand 的基本节点信息（节点和关系）
        # Supplement 'expand' with basic node and edge information.
        tasks_src = self.get_tasks(dbsession)
        tasks = {}
        for task in tasks_src:
            tasks[str(task.id)] = task

        expand_tasks = json.loads(self.expand) if self.expand else []
        if not expand_tasks:
            expand_tasks = []
        expand_copy = copy.deepcopy(expand_tasks)

        # 已经不存在的task要删掉
        # Delete tasks that no longer exist.
        for item in expand_copy:
            # 节点类型
            # Node type
            if 'data' in item:
                if item['id'] not in tasks:
                    expand_tasks.remove(item)

            # 上下游关系类型
            # Edge type
            else:
                # if item['source'] not in tasks or item['target'] not in tasks:
                expand_tasks.remove(item)  # 删除所有的上下游关系，后面全部重新
                                            # Delete all existing edges; they will be recreated below.

        # 增加新的task的位置
        # Add positions for new tasks.
        for task_id in tasks:
            exist = False
            for item in expand_tasks:
                if 'data' in item and item['id'] == str(task_id):
                    exist = True
                    break
            if not exist:
                # if task_id not in expand_tasks:
                expand_tasks.append(
                    {
                        'id': str(task_id),
                        'type': 'dataSet',
                        'position': {
                            'x': random.randint(100, 1000),
                            'y': random.randint(100, 1000),
                        },
                        'data': {
                            'name': tasks[task_id].name,
                            'label': tasks[task_id].label,
                        },
                    }
                )

        # 重写所有task的上下游关系
        # Recreate all edges based on the current DAG.
        dag_json = json.loads(self.dag_json)
        for task_name in dag_json:
            upstreams = dag_json[task_name].get('upstream', [])
            if upstreams:
                for upstream_name in upstreams:
                    upstream_task_id = [
                        task_id for task_id in tasks if tasks[task_id].name == upstream_name
                    ][0]
                    task_id = [task_id for task_id in tasks if tasks[task_id].name == task_name][0]
                    if upstream_task_id and task_id:
                        expand_tasks.append(
                            {
                                'source': str(upstream_task_id),
                                'arrowHeadType': 'arrow',
                                'target': str(task_id),
                                'id': self.name
                                + '__edge-%snull-%snull' % (upstream_task_id, task_id),
                            }
                        )
        return expand_tasks

    # Creates a new, unsaved copy of the current pipeline instance.
    # @pysnooper.snoop()
    def clone(self):
        return Pipeline(
            name=self.name.replace('_', '-'),
            display_name=self.display_name.replace('_', '-'),
            project_id=self.project_id,
            dag_json=self.dag_json,
            describe=self.describe,
            namespace=self.namespace,
            global_env=self.global_env,
            schedule_type='once',
            cron_time=self.cron_time,
            pipeline_file='',
            pipeline_argo_id=self.pipeline_argo_id,
            node_selector=self.node_selector,
            image_pull_policy=self.image_pull_policy,
            parallelism=self.parallelism,
            alert_status='',
            expand=self.expand,
            parameter=self.parameter,
        )


# Define the Task class, which maps to the 'task' table.
# This model represents a single task or node within a Pipeline.
class Task(Model, AuditMixinNullable, MyappModelBase):
    # Specifies the name of the database table for this model.
    __tablename__ = 'task'
    # Defines the primary key column for the table.
    id = Column(Integer, primary_key=True)
    # Defines a column for the name of the task, which should be unique within a pipeline.
    name = Column(String(100), nullable=False)
    # Defines a column for a user-friendly label or alias for the task.
    label = Column(String(100), nullable=False)  # 别名
    # Defines a foreign key to the 'job_template' table.
    job_template_id = Column(Integer, ForeignKey('job_template.id'))  # 定义外键
    # Defines a many-to-one relationship with the Job_Template model.
    job_template = relationship('Job_Template', foreign_keys=[job_template_id])
    # Defines a foreign key to the parent 'pipeline' table.
    pipeline_id = Column(Integer, ForeignKey('pipeline.id'))  # 定义外键
    # Defines a many-to-one relationship with the Pipeline model.
    pipeline = relationship('Pipeline', foreign_keys=[pipeline_id])
    # Defines a column to override the working directory from the template.
    working_dir = Column(String(1000), default='')
    # Defines a column to override the command from the template.
    command = Column(String(1000), default='')
    # Defines a boolean flag to specify if the command should overwrite the image's entrypoint.
    overwrite_entrypoint = Column(Boolean, default=False)  # 是否覆盖入口
    # Defines a column for task-specific arguments.
    args = Column(Text)
    # Defines a column for task-specific volume mounts.
    volume_mount = Column(
        String(2000), default='kubeflow-user-workspace(pvc):/mnt,kubeflow-archives(pvc):/archives'
    )  # 挂载
    # Defines a column for task-specific node selectors.
    node_selector = Column(String(100), default='cpu=true,train=true')  # 挂载
    # Defines a column for memory resource requests/limits.
    resource_memory = Column(String(100), default='2G')
    # Defines a column for CPU resource requests/limits.
    resource_cpu = Column(String(100), default='2')
    # Defines a column for GPU resource requests/limits.
    resource_gpu = Column(String(100), default='0')
    # Defines a column for a task timeout in seconds (0 means no timeout).
    timeout = Column(Integer, nullable=False, default=0)
    # Defines a column for the number of times to retry the task on failure.
    retry = Column(Integer, nullable=False, default=0)
    # Defines a column to specify output artifacts to be saved.
    outputs = Column(
        Text, default='{}'
    )  # task的输出，会将输出复制到minio上   {'prediction': '/output.txt'}
    # Defines a column for monitoring-related information for this task.
    monitoring = Column(Text, default='{}')  # 该任务的监控信息
    # Defines a column for extra configuration data.
    expand = Column(Text(65536), default='')
    # Defines a boolean flag to skip this task during a pipeline run.
    skip = Column(Boolean, default=False)  # 是否跳过
    # Defines a column for the deployment region.
    region = Column(
        String(100), nullable=False, default='default', server_default='default', comment='地区'
    )
    # Used by F.A.B.'s import/export feature to link to the parent pipeline.
    export_parent = 'pipeline'

    # Defines the official string representation of a Task instance.
    def __repr__(self):
        return self.name

    # Converts the model instance and its related job template to a dictionary.
    def as_dict(self):
        d = {c.name: str(getattr(self, c.name)) for c in self.__table__.columns}
        d['job_template'] = self.job_template.as_dict()
        return d

    # This property constructs the full input path for the task based on its arguments.
    @property
    def input_path(self):
        d = json.loads(self.args)
        input_path = d.get('input_path', '')
        if is_private():
            return f'/mnt/{cube_bucket}/{input_path}'
        else:
            return f'obs://{cube_bucket}/{input_path}'

    # Renders a "debug" link for the task.
    @property
    def debug(self):
        return Markup(f'<a target=_blank href="/task_modelview/debug/{self.id}">debug</a>')

    # Renders a "run" link to execute the task independently.
    @property
    def run(self):
        return Markup(f'<a target=_blank href="/task_modelview/run/{self.id}">run</a>')

    # Renders a "clear" link, likely to clear task logs or state.
    @property
    def clear(self):
        return Markup(f'<a href="/task_modelview/clear/{self.id}">clear</a>')

    # Renders a "log" link to view task logs.
    @property
    def log(self):
        return Markup(f'<a target=_blank href="/task_modelview/log/{self.id}">log</a>')

    # A getter method for the node selector.
    def get_node_selector(self):
        return self.node_selector

    # Renders the 'args' field as a preformatted code block.
    @renders('args')
    def args_html(self):
        return Markup('<pre><code>' + self.args + '</code></pre>')

    # Renders the 'expand' field as a preformatted code block.
    @renders('expand')
    def expand_html(self):
        return Markup('<pre><code>' + self.expand + '</code></pre>')

    # Renders the 'monitoring' field with a link to Grafana.
    @renders('monitoring')
    def monitoring_html(self):
        try:
            monitoring = json.loads(self.monitoring)
            monitoring['link'] = (
                self.pipeline.project.cluster.get('GRAFANA_HOST', '').strip('/')
                + GRAFANA_TASK_PATH
                + monitoring.get('pod_name', '')
            )
            return Markup(
                '<pre><code>'
                + json.dumps(monitoring, ensure_ascii=False, indent=4)
                + '</code></pre>'
            )
        except Exception:
            return Markup('<pre><code> 暂无 </code></pre>')

    # Renders the demo from the associated job template.
    @property
    def job_args_demo(self):
        return Markup('<pre><code>' + self.job_template.demo + '</code></pre>')

    # Renders the job template name as a link to its detail page.
    @property
    def job_template_url(self):
        return Markup(
            '<a target=_blank'
            f' href="/job_template_modelview/show/{self.job_template.id}">'
            f'{self.job_template.name}</a>'
        )

    # Creates a new, unsaved copy of the current task instance.
    def clone(self):
        return Task(
            name=self.name.replace('_', '-'),
            label=self.label,
            job_template_id=self.job_template_id,
            pipeline_id=self.pipeline_id,
            working_dir=self.working_dir,
            command=self.command,
            args=self.args,
            volume_mount=self.volume_mount,
            node_selector=self.node_selector,
            resource_memory=self.resource_memory,
            resource_cpu=self.resource_cpu,
            timeout=self.timeout,
            retry=self.retry,
            expand=self.expand,
        )


# Define the RunHistory class, which maps to the 'run' table.
# This model logs each execution of a pipeline, especially for scheduled runs.
# 每次上传运行
class RunHistory(Model, MyappModelBase):
    # Specifies the name of the database table for this model.
    __tablename__ = 'run'
    # Defines the primary key column for the table.
    id = Column(Integer, primary_key=True)
    # Defines a foreign key to the 'pipeline' table.
    pipeline_id = Column(Integer, ForeignKey('pipeline.id'))  # 定义外键
    # Defines a many-to-one relationship with the Pipeline model.
    pipeline = relationship('Pipeline', foreign_keys=[pipeline_id])
    # Defines a column for the compiled pipeline file used for this run.
    pipeline_file = Column(Text(65536), default='')
    # Defines a column for the ID of the uploaded pipeline definition.
    pipeline_argo_id = Column(String(100))  # 上传的pipeline id
    # Defines a column for the ID of the uploaded pipeline version.
    version_id = Column(String(100))  # 上传的版本号
    # Defines a column for the Kubeflow Pipelines experiment ID.
    experiment_id = Column(String(100))
    # Defines a column for the Kubeflow Pipelines run ID.
    run_id = Column(String(100))
    # Defines a column for any messages related to this run.
    message = Column(Text, default='')
    # Defines a column for the timestamp when this history record was created.
    created_on = Column(DateTime, default=datetime.datetime.now, nullable=False)
    # Defines a column for the logical execution date of the run (for scheduled jobs).
    execution_date = Column(String(200), nullable=False)
    # Defines a column for the status of the scheduling action.
    status = Column(
        String(100), default='comed'
    )  # commed表示已经到了该调度的时间，created表示已经发起了调度。注意操作前校验去重
        # commed: it's time to schedule; created: scheduling has been initiated. Note de-duplication before operation.

    # Renders the status as a link to the workflow details if it has been created.
    @property
    def status_url(self):
        if self.status == 'comed':
            return self.status

        path = MODEL_URLS.get('workflow', '') + '/labels=' + self.run_id
        return Markup(f'<a target=_blank href="{path}">{self.status}</a>')

    # A property to get the creator of the associated pipeline.
    @property
    def creator(self):
        return self.pipeline.creator

    # Renders a link to the parent pipeline's web UI.
    @property
    def pipeline_url(self):
        return Markup(
            '<a target=_blank'
            f' href="/pipeline_modelview/web/{self.pipeline.id}">{self.pipeline.describe}</a>'
        )

    # Renders a link to the workflow history view for the parent pipeline.
    @property
    def history(self):
        path = (
            MODEL_URLS.get('workflow', '')
            + '/labels="pipeline-id"%3A+"'
            + '%s"' % self.pipeline_id
        )
        return Markup(f"<a href='{path}'>运行记录</a>")

    # Renders a link to the logs of this specific pipeline run.
    @property
    def log(self):
        if self.run_id:
            pipeline_url = (
                self.pipeline.project.cluster.get('PIPELINE_URL')
                + 'runs/details/'
                + str(self.run_id)
            )
            return Markup(f'<a target=_blank href="{pipeline_url}">日志</a>')
        else:
            return Markup('日志')


# This is a mixin class, not a direct SQLAlchemy model.
# It provides a common structure for representing Kubernetes Custom Resource Definitions (CRDs).
class Crd:
    # __tablename__ = "crd"
    # These are Column definitions, but they will be applied to the classes that inherit from Crd.
    id = Column(Integer, primary_key=True)
    name = Column(String(100), default='')
    display_name = Column(String(100), default='')
    namespace = Column(String(100), default='')
    create_time = Column(String(100), default='')
    change_time = Column(String(100), default='')

    status = Column(String(100), default='')
    display_status = Column(String(100), default='')
    annotations = Column(Text, default='')
    labels = Column(Text, default='')
    spec = Column(Text(65536), default='')
    status_more = Column(Text(65536), default='')
    username = Column(String(100), default='')
    info_json = Column(Text, default='{}')
    add_row_time = Column(DateTime, default=datetime.datetime.now)
    algorithm = Column(Text, default='')
    snapshot = Column(Text, default='')
    foreign_key = Column(String(100), default='')

    # Renders the 'annotations' JSON as a preformatted block.
    @renders('annotations')
    def annotations_html(self):
        return Markup(self.annotations)

    # Renders the 'labels' JSON as a preformatted block.
    @renders('labels')
    def labels_html(self):
        return Markup(self.labels)

    # Parses the detailed status JSON to get the final phase of the CRD.
    @property
    def final_status(self):
        status = '未知'
        try:
            if self.status_more:
                status = json.loads(self.status_more).get('phase', '未知')
        except Exception as e:
            print(e)
        return status

    # Parses the detailed status to find a specific message, useful for pending pods.
    @property
    def status_message(self):
        if self.status == 'Pending':
            if self.status_more:
                status_more = json.loads(self.status_more)
                run_ids = pydash.get(status_more, f'nodes.{self.name}.children')
                if run_ids and len(run_ids) > 0:
                    run_id = run_ids[-1]
                    message = pydash.get(status_more, f'nodes.{run_id}.message', '')
                    return message
        return ''

    # Renders the 'spec' JSON as a preformatted block.
    @renders('spec')
    def spec_html(self):
        return Markup(self.spec)

    # Renders the 'status_more' JSON as a preformatted block.
    @renders('status_more')
    def status_more_html(self):
        return Markup(self.status_more)

    # Renders the 'info_json' field as a preformatted block.
    @renders('info_json')
    def info_json_html(self):
        return Markup(self.info_json)

    # Renders the 'algorithm' field as a preformatted block.
    @renders('algorithm')
    def algorithm_json(self):
        return Markup(self.algorithm)

    # Renders a "stop" link for the CRD instance.
    @property
    def stop(self):
        return Markup(f'<a href="../stop/{self.id}">停止</a>')


# Define the Workflow class, which maps to the 'workflow' table.
# This model represents an Argo Workflow CRD, inheriting the common Crd structure.
class Workflow(Model, Crd, MyappModelBase):
    # Specifies the name of the database table for this model.
    __tablename__ = 'workflow'

    # Defines a column to link the workflow back to the internal pipeline ID.
    pipeline_id = Column(
        Integer, nullable=False, default=0, server_default='0', comment='关联pipeline_id'
    )
    # Defines a column to store the duration of a historical training run.
    history_duration = Column(
        Integer, nullable=False, default=0, server_default='0', comment='历史训练时间'
    )
    # Defines a column for the deployment region.
    region = Column(
        String(100), nullable=False, default='default', server_default='default', comment='地区'
    )
    # Defines a column for the job's priority.
    priority = Column(
        String(100), nullable=False, default='default', server_default='default', comment='优先级'
    )

    # Finds the associated RunHistory record for this workflow.
    @property
    def run_history(self):
        from myapp.app import db

        label = json.loads(self.labels) if self.labels else {}
        runid = label.get('run-id', '')
        if runid:
            return (
                db.session.query(RunHistory)
                .filter(RunHistory.pipeline_file.contains(runid))
                .first()
            )
        else:
            return None

    # Deletes the corresponding Workflow CRD from Kubernetes.
    def delete_k8s_workflow(self, dbsession=None, region='default'):
        if not dbsession:
            from myapp.app import db

            dbsession = db.session

        try:
            k8s_client = get_k8s_client_by_region(region=region)

            crd_info = CRD_INFO.get('workflow', {})
            k8s_client.delete_crd(
                group=crd_info['group'],
                version=crd_info['version'],
                plural=crd_info['plural'],
                namespace=self.namespace,
                name=self.name,
            )
            self.status = 'Terminated'
            self.display_status = 'Terminated'
            self.change_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
            dbsession.commit()
        except Exception as e:
            logging.exception(e)

    # Stops the workflow, handling both Kubernetes and ModelArts jobs.
    def stop_workflow(self, dbsession=None):
        if not dbsession:
            from myapp.app import db

            dbsession = db.session
        modelarts_id = self.get_modelarts_id()
        if modelarts_id:
            modelarts_job = ModelArtsJob()
            modelarts_job.stop_job(modelarts_id)
            self.status = 'Terminated'
            self.display_status = 'Terminated'
            self.change_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
            dbsession.commit()
        else:
            self.delete_k8s_workflow(dbsession)

    # Retrieves the full CRD object from the Kubernetes API.
    def get_crd(self):
        try:
            if self.labels:
                run_id = json.loads(self.labels).get('run-id', '')
                k8s_client = py_k8s.K8s(self.pipeline.project.cluster.get('KUBECONFIG', ''))
                crd_info = CRD_INFO.get('workflow', {})
                crd = k8s_client.get_crd(
                    group=crd_info['group'],
                    version=crd_info['version'],
                    plural=crd_info['plural'],
                    namespace=self.namespace,
                    label_selector=f'run-id={run_id}',
                )
                if crd and len(crd) == 1:
                    return crd[0]
        except Exception as e:
            print(e)

    # Determines the schedule type by checking if a RunHistory record exists.
    @property
    def schedule_type(self):
        run_history = self.run_history
        if run_history:
            return 'crontab'
        else:
            return 'once'

    # Gets the logical execution date from the associated RunHistory record.
    @property
    def execution_date(self):
        run_history = self.run_history
        if run_history:
            return run_history.execution_date
        else:
            return 'once'

    # Parses the workflow status to display the status and duration of each task.
    @property
    def task_status(self):
        status_mode = json.loads(self.status_more)
        task_status = {}
        nodes = status_mode.get('nodes', {})
        tasks = self.pipeline.get_tasks()
        for pod_name in nodes:
            pod = nodes[pod_name]
            if pod['type'] == 'Pod':
                if pod['phase'] == 'Succeeded':  # Ignore retries and failures
                    templateName = pod['templateName']
                    for task in tasks:
                        if task.name == templateName:
                            finish_time = datetime.datetime.strptime(
                                pod['finishedAt'], '%Y-%m-%d %H:%M:%S'
                            )
                            start_time = datetime.datetime.strptime(
                                pod['startedAt'], '%Y-%m-%d %H:%M:%S'
                            )
                            elapsed = (finish_time - start_time).days * 24 + (
                                finish_time - start_time
                            ).seconds / 60 / 60
                            task_status[task.label] = str(round(elapsed, 2)) + 'h'

        message = ''
        for key in task_status:
            message += key + ': ' + task_status[key] + '\n'
        return Markup('<pre><code>' + message + '</code></pre>')

    # Calculates the total elapsed time of the workflow in milliseconds.
    @property
    def elapsed_time(self):
        modelarts_id = self.get_modelarts_id()
        if modelarts_id and self.status_more:
            status_more = json.loads(self.status_more)
            return status_more.get('duration', 0)

        return self.duration_time

    # An alternative calculation for duration time.
    @property
    def duration_time(self):
        if not self.status_more:
            return 0
        if self.status == 'Pending':
            return 0
        status_more = json.loads(self.status_more)
        finish_time = status_more.get('finishedAt', self.change_time)
        if not finish_time:
            finish_time = self.change_time

        start_time = self.foreign_key
        if start_time is None or len(start_time) == 0:
            start_time = status_more.get('startedAt', '')
        with log_exception:
            if finish_time and start_time:
                # Handle different timestamp formats
                if 'T' in finish_time:
                    finish_time = datetime.datetime.strptime(finish_time, '%Y-%m-%dT%H:%M:%S')
                else:
                    finish_time = datetime.datetime.strptime(finish_time, '%Y-%m-%d %H:%M:%S')
                if 'T' in start_time:
                    start_time = datetime.datetime.strptime(start_time, '%Y-%m-%dT%H:%M:%S')
                else:
                    start_time = datetime.datetime.strptime(start_time, '%Y-%m-%d %H:%M:%S')
                delta = finish_time - start_time
                elapsed = delta.days * 24 * 60 * 60 + delta.seconds
                return max(elapsed * 1000, 0)

        return 0

    # Finds and returns the parent Pipeline object for this workflow.
    @property
    def pipeline(self):
        from myapp.app import db

        if self.labels:
            try:
                labels = json.loads(self.labels)
                pipeline_id = labels.get('pipeline-id', '')
                if pipeline_id:
                    pipeline = db.session.query(Pipeline).filter_by(id=int(pipeline_id)).first()
                    if pipeline:
                        return pipeline
            except Exception as e:
                print(e)
        return None

    # Gets the name of the project associated with this workflow.
    @property
    def project(self):
        pipeline = self.pipeline
        if pipeline:
            return pipeline.project.name
        else:
            return '未知'

    # Overrides the stop property from the Crd mixin for this specific model view.
    @property
    def stop(self):
        return Markup(f'<a href="/workflow_modelview/stop/{self.id}">停止</a>')

    # Parses the 'algorithm' field to get the display engine information.
    @property
    def display_engine(self):
        if self.algorithm:
            algorithm = json.loads(self.algorithm)
            engine_version = pydash.get(algorithm, 'engine.engine_version')
            if engine_version:
                return engine_version
            image_url = pydash.get(algorithm, 'engine.image_url')
            if image_url:
                return image_url
        return '未知'

    # Parses the 'algorithm' field to get the resource flavor description.
    @property
    def flavor_desc(self):
        if self.algorithm:
            algorithm = json.loads(self.algorithm)
            flavor_desc = pydash.get(algorithm, 'resource.flavor_desc')
            if flavor_desc:
                return flavor_desc
        return '未知'

    # Constructs the output path for the job based on its configuration.
    def get_output_path(self):
        if self.algorithm:
            job_name = json.loads(self.algorithm).get('job_name', None)
            if not job_name:
                return ''
            username = self.username
            if is_private():
                path = f'/mnt/publish-data/{username}/jobs/{job_name}/'
            else:
                path = f'/publish-data/{username}/jobs/{job_name}/outputs/'
            return path
        return ''

    # Extracts the ModelArts job ID from the 'spec' or 'algorithm' fields.
    def get_modelarts_id(self):
        modelarts_id = None
        if self.spec:
            modelarts_id = json.loads(self.spec).get('modelarts_id', None)
        if not modelarts_id and self.algorithm:
            modelarts_id = json.loads(self.algorithm).get('modelarts_id', None)
        return modelarts_id

    # A helper method to get a value from a nested JSON field.
    def get_sub_field(self, key, sub_key):
        attr = getattr(self, key)
        if attr:
            return json.loads(getattr(self, key)).get(sub_key, None)
        return None

    # Constructs the base output path for the job.
    @property
    def output_path(self):
        job_name = json.loads(self.algorithm).get('job_name', None)
        base_path = f'{self.username}/jobs/{job_name}/'
        if is_private():
            os.path.join('/mnt/publish-data/', base_path)
        return base_path
    
    # A utility method to fix the display name by appending a unique suffix from the internal name.
    def reset_display_name(self, display_name=''):
        suffix = '-' + self.name.split('-')[-1]
        if self.name.startswith('pipeline') and not self.display_name.endswith(suffix):
            if display_name:
                self.display_name = display_name + suffix
            else:
                self.display_name += suffix
        return self.display_name


# Defines the Tfjob class for representing TensorFlow Job CRDs.
class Tfjob(Model, Crd, MyappModelBase):
    # Specifies the name of the database table for this model.
    __tablename__ = 'tfjob'

    # Provides a link to the parent workflow instance if available.
    @property
    def run_instance(self):
        if self.labels:
            try:
                labels = json.loads(self.labels)
                run_id = labels.get('run-id', '')
                if run_id:
                    return Markup(
                        f'<a href="/workflow_modelview/list/?_flt_2_labels={run_id}">运行实例</a>'
                    )
            except Exception as e:
                print(e)
        return Markup('未知')


# Defines the Xgbjob class for representing XGBoost Job CRDs.
class Xgbjob(Model, Crd, MyappModelBase):
    # Specifies the name of the database table for this model.
    __tablename__ = 'xgbjob'


# Defines the Pytorchjob class for representing PyTorch Job CRDs.
class Pytorchjob(Model, Crd, MyappModelBase):
    # Specifies the name of the database table for this model.
    __tablename__ = 'pytorchjob'
