# Standard library imports
import datetime
import importlib
import json
import logging
import os
import re
import time
import uuid

# Third-party library imports
from flask import g
from jinja2 import BaseLoader, DebugUndefined, Environment
import kfp
from kfp.components import create_component_from_func
from kubernetes import client
from kubernetes.client.models import V1EnvVar, V1SecurityContext
import yaml

# Local application imports
from myapp.app import app
from myapp.models.model_job import Task
from myapp.third.k8s import py_k8s
from myapp.third.k8s.py_karmada import (
    get_k8s_client_by_region,
    propagation_job,
)
from myapp.third.k8s.scheduler import get_scheduler_name
from myapp.third.zhentong.api import NODE_LABEL_KEY
from myapp.utils.device_resource import DeviceResource


# Initialize logger for this module.
log = logging.getLogger(__name__)
# Get application configuration.
conf = app.config



def is_npu(task):
    """
    Checks if the task is configured to use NPU (Ascend 910) resources.

    Args:
        task: The task object with resource_gpu attribute.

    Returns:
        bool: True if NPU is used, False otherwise.
    """
    # 如果是NPU（ascend910）环境，is_use_npu = True，会用volcano启动
    # Check if 'ascend910' is present in the lowercase resource_gpu string.
    if 'ascend910' in task.resource_gpu.lower():
        return True

    return False



def find_container(ori_dict, name):
    """
    Finds a container specification within a dictionary of templates by name.

    Args:
        ori_dict (dict): The dictionary containing pipeline specifications and templates.
        name (str): The name of the container to find.

    Returns:
        dict or None: The container dictionary if found, otherwise None.
    """
    # Check if 'spec' and 'templates' keys exist in the dictionary.
    if 'spec' not in ori_dict or 'templates' not in ori_dict['spec']:
        return
    # Iterate through each task template.
    for one_task in ori_dict['spec']['templates']:
        # If the task name matches, return the task.
        if one_task['name'] == name:
            return one_task



def change_to_multi_node_yaml(pipeline_json, resource_cfg):
    """
    Transforms a single-node pipeline JSON into a multi-node Volcano job YAML.

    This function is specifically designed for GPU environments with multi-node,
    multi-card training, converting an Argo Workflow definition into a Volcano Job.

    Args:
        pipeline_json (dict): The original pipeline definition in JSON format.
        resource_cfg (dict): Resource configuration, including node_num.

    Returns:
        dict: The new pipeline definition in JSON format, configured for Volcano.
    """
    logging.warning(f'[change_to_multi_node_yaml] old: {pipeline_json}')
    # Initialize dictionaries for DAG and task templates.
    dag_dict = {}
    task_dict = {}
    # Iterate through templates to identify DAG and container tasks.
    for temp in pipeline_json['spec']['templates']:
        if 'dag' in temp.keys():
            dag_dict = temp
        elif 'container' in temp.keys():
            task_dict = temp

    # Retrieve image pull secrets, providing defaults if not present.
    image_secrets = pipeline_json['spec'].get('imagePullSecrets', None)
    if image_secrets is None:
        image_secrets = [
            {'name': 'ee'},
            {'name': 'hubsecret'},
            {'name': 'twr'},
            {'name': 'acr'},
        ]

    # Ensure the container has a name, using the task name if not specified.
    # container需要加上name
    if 'name' not in task_dict['container'].keys():
        task_dict['container']['name'] = task_dict['name']

    # Deduplicate environment variables.
    # 环境变量要去重
    env_dict = {item['name']: item for item in task_dict['container']['env']}.values()
    task_dict['container']['env'] = list(env_dict)

    # Get the number of nodes from resource configuration.
    node_num = resource_cfg.get('node_num', 1)
    logging.info(
        f"[change_to_multi_node_yaml] "
        f"[node_num] {node_num}; [templates_len] {len(pipeline_json['spec']['templates'])}; "
        f"[dag_dict] {dag_dict}; [task_dict] {task_dict}"
    )

    # Define the Volcano job dictionary.
    volcano_dict = {
        'apiVersion': 'batch.volcano.sh/v1alpha1',
        'kind': 'Job',
        'metadata': {
            'name': pipeline_json.get('metadata', {}).get('name'),
            'ownerReferences': [
                {
                    'apiVersion': 'argoproj.io/v1alpha1',
                    'blockOwnerDeletion': True,
                    'kind': 'Workflow',
                    'name': '{{workflow.name}}',
                    'uid': '{{workflow.uid}}',
                }
            ],
            'namespace': pipeline_json.get('metadata', {}).get('namespace'),
            'labels': pipeline_json.get('metadata', {}).get('labels'),
            'annotations': pipeline_json.get('metadata', {}).get('annotations'),
        },
        'spec': {
            'minAvailable': node_num,
            'schedulerName': 'volcano',
            'plugins': {
                'env': [],
                'svc': ['--publish-not-ready-addresses=false', '--disable-network-policy=false'],
            },
            'policies': [
                {'event': 'PodEvicted', 'action': 'TerminateJob'},
                {'event': 'PodFailed', 'action': 'TerminateJob'},
                {'event': 'PodCompleted', 'action': 'CompleteJob'},
                {'event': 'TaskEvicted', 'action': 'TerminateJob'},
                {'event': 'TaskFailed', 'action': 'TerminateJob'},
                {'event': 'TaskCompleted', 'action': 'CompleteJob'},
            ],
            'queue': 'default',
            'tasks': [
                {
                    'replicas': node_num,
                    'name': 'worker',
                    'policies': [
                        {'event': 'PodEvicted', 'action': 'TerminateJob'},
                        {'event': 'PodFailed', 'action': 'TerminateJob'},
                        {'event': 'PodCompleted', 'action': 'CompleteJob'},
                        {'event': 'TaskEvicted', 'action': 'TerminateJob'},
                        {'event': 'TaskFailed', 'action': 'TerminateJob'},
                        {'event': 'TaskCompleted', 'action': 'CompleteJob'},
                    ],
                    'template': {
                        'spec': {
                            'restartPolicy': 'Never',
                            'containers': [task_dict['container']],
                            'imagePullSecrets': image_secrets,
                            'nodeSelector': task_dict['nodeSelector'],
                            'volumes': task_dict['volumes'],
                        }
                    },
                }
            ],
        },
    }
    # Convert the Volcano dictionary to a JSON string.
    volcano_json = json.dumps(volcano_dict)
    logging.info(f'[[change_to_multi_node_yaml]] {volcano_dict}')

    # Define the new pipeline JSON structure.
    new_pipeline_json = {
        'apiVersion': 'argoproj.io/v1alpha1',
        'kind': 'Workflow',
        'metadata': {
            'name': pipeline_json.get('metadata', {}).get('name'),
            'namespace': pipeline_json.get('metadata', {}).get('namespace'),
            'labels': pipeline_json.get('metadata', {}).get('labels'),
            'annotations': pipeline_json.get('metadata', {}).get('annotations'),
        },
        'spec': {
            'entrypoint': pipeline_json['spec']['entrypoint'],
            'serviceAccountName': pipeline_json['spec']['serviceAccountName'],
            'templates': [
                {
                    'name': pipeline_json['spec']['entrypoint'],
                    'dag': {
                        'tasks': [
                            {
                                'name': task_dict['name'],
                                'template': task_dict['name'],
                                'arguments': {
                                    'parameters': [
                                        {'name': 'message', 'value': 'A'},
                                        {'name': 'task', 'value': 'aaa'},
                                    ]
                                },
                            }
                        ]
                    },
                },
                {
                    'name': task_dict['name'],
                    'inputs': {'parameters': [{'name': 'message'}, {'name': 'task'}]},
                    'resource': {
                        'action': 'create',
                        'successCondition': 'status.state.phase = Completed',
                        'failureCondition': 'status.state.phase = Failed',
                        'manifest': volcano_json,
                    },
                },
            ],
        },
    }
    logging.info(f'[[change_to_multi_node_yaml]] new: {new_pipeline_json}')
    return new_pipeline_json



def npu_manifest(task, ori_dict, pipeline_name, namespace):
    """
    Generates a Volcano job manifest for NPU tasks.

    Args:
        task: The task object.
        ori_dict (dict): The original pipeline dictionary.
        pipeline_name (str): The name of the pipeline.
        namespace (str): The Kubernetes namespace.

    Returns:
        dict: The generated Volcano job manifest dictionary.
    """
    task_name = task.name
    # Load expand configuration, defaulting to an empty dictionary.
    expand = json.loads(task.expand if task.expand else '{}')
    logging.warning(f'[npu_manifest] expand: {expand}')
    # node_count = expand.get('node_count', 1)
    # Load resource configuration from task.resource_gpu.
    resource_cfg = json.loads(task.resource_gpu)
    # Get the number of nodes, defaulting to 1.
    node_count = resource_cfg.get('node_num', 1)

    # Find the container specification for the task.
    task_spec = find_container(ori_dict, task_name)
    # Generate a unique job name.
    job_name = (
        pipeline_name.replace('_', '-')[0:54].lower().strip('-') + '-' + uuid.uuid4().hex[:5]
    )
    # Define the base Volcano job manifest as a f-string.
    manifest = f"""
apiVersion: batch.volcano.sh/v1alpha1
kind: Job
metadata:
  name: {job_name}
  namespace: {namespace}
  labels:
    ring-controller.atlas: ascend-910
    fault-scheduling: "force"
    fault-retry-times: "3"
    workflows.argoproj.io/workflow: '{{{{workflow.name}}}}'
  ownerReferences:
    - apiVersion: argoproj.io/v1alpha1
      blockOwnerDeletion: true
      kind: Workflow
      name: "{{{{workflow.name}}}}"
      uid: "{{{{ workflow.uid }}}}"
spec:
  minAvailable: {node_count}
  schedulerName: volcano
  plugins:
    ssh: []
    env: []
    svc: []
  policies:
    - event: PodEvicted
      action: RestartJob
  maxRetry: 3
  queue: default
  tasks:
  - name: t-{task.id}
    replicas: {node_count}
    template:
      metadata:
        labels:
          app: main
          show-log: "true"
          task-name: {task_name}
          workflows.argoproj.io/workflow: '{{{{workflow.name}}}}'
          ring-controller.atlas: ascend-910
      spec:
        restartPolicy: Never
        containers:
        - name: main
        volumes:
        - name: ascend-910-config
          configMap:
            name: rings-config-{job_name}
"""
    # Load the manifest string into a dictionary.
    manifest_dict = yaml.safe_load(manifest)

    # Add volume mounts for NPU configuration to the container spec.
    task_spec['container']['volumeMounts'] += yaml.safe_load(
        """
      - name: ascend-910-config
        mountPath: /user/serverid/devindex/config
    """
    )
    # task_spec['container']['volumeMounts'] += yaml.safe_load(
    #     """
    #   - name: reset-config
    #     mountPath: /user/restore/reset/config
    #     readOnly: true
    # """
    # )
    # Add environment variable for XDL_IP to the container spec.
    task_spec['container']['env'] += yaml.safe_load(
        """
      - name: XDL_IP
        valueFrom:
          fieldRef:
            fieldPath: status.hostIP
    """
    )
    # Get the spec for the task template.
    spec = manifest_dict['spec']['tasks'][0]['template']['spec']
    # Merge container specifications.
    spec['containers'] = [{**spec['containers'][0], **task_spec['container']}]
    # Add volumes, node selector, service account name, image pull secrets, and scheduler name.
    spec['volumes'] += task_spec['volumes']
    spec['nodeSelector'] = task_spec['nodeSelector']
    spec['serviceAccountName'] = ori_dict['spec']['serviceAccountName']
    spec['imagePullSecrets'] = ori_dict['spec']['imagePullSecrets']
    spec['schedulerName'] = ori_dict['spec']['schedulerName']
    # 当 node_count 大于 1，添加 hostNetwork: true
    # If node_count is greater than 1, enable host network.
    if node_count > 1:
        spec['hostNetwork'] = True

    # spec['schedulerName'] = 'volcano'
    # Return the modified manifest dictionary.
    return manifest_dict



# 将dag 转为argo pipeline yaml
# @pysnooper.snoop(watch_explode=())
def dag_to_pipeline(pipeline, dbsession, **kwargs):
    """
    Converts a DAG definition into an Argo pipeline YAML.

    This function processes a pipeline's DAG, creates Kubernetes-related
    configurations for each task, and then compiles them into an Argo Workflow YAML.
    It supports NPU-specific configurations and multi-node setups.

    Args:
        pipeline: The pipeline object containing DAG and task information.
        dbsession: The database session object.
        **kwargs: Additional keyword arguments for pipeline configuration.

    Returns:
        str or None: The generated pipeline YAML as a string, or None if the DAG is empty.

    Raises:
        Exception: If a task does not exist or upstream tasks are invalid.
    """
    logging.info(f'[dag_to_pipeline] kwargs: {kwargs}')
    # If pipeline ID is not set, return.
    if not pipeline.id:
        return

    # Determine the username for pipeline creation.
    username = (
        g.user.username if g and g.user and g.user.username else pipeline.created_by.username
    )
    # Get user secret.
    user_secret = g.user.secret if g and g.user else ''
    # Operational context for pipeline tasks.
    op_ctx = {
        'secret': user_secret,
        'pipeline_id': pipeline.id,
    }

    # Fix DAG JSON and commit to database.
    pipeline.dag_json = pipeline.fix_dag_json(dbsession)
    dbsession.commit()
    # Load the DAG from JSON.
    dag = json.loads(pipeline.dag_json)

    # 如果dag为空，就直接退出
    # If the DAG is empty, return None.
    if not dag:
        return None

    # Initialize dictionaries for all tasks and NPU usage flag.
    all_tasks = {}
    is_use_npu = False
    # Iterate through tasks in the DAG.
    for task_name in dag:
        # 使用临时连接，避免连接中断的问题
        # Query for the task using a temporary connection.
        t = dbsession.query(Task).filter_by(name=task_name, pipeline_id=pipeline.id).first()
        # Raise an exception if the task does not exist.
        if not t:
            raise Exception('task %s not exist ' % task_name)

        # 如果是NPU（ascend910）环境，is_use_npu = True，会用volcano启动
        # Check if NPU is used for the task.
        if is_npu(t):
            is_use_npu = True
        all_tasks[task_name] = t

    # Initialize dictionary for all operations and pipeline configuration.
    all_ops = {}
    pipeline_conf = kfp.dsl.PipelineConf()

    # Renders string template variables.
    # 渲染字符串模板变量
    def template_str(src_str):
        # Create a Jinja2 environment for rendering templates.
        rtemplate = Environment(
            loader=BaseLoader,
            undefined=DebugUndefined,
            variable_start_string='{{{',
            variable_end_string='}}}',
        ).from_string(src_str)
        # Render the template with provided context.
        des_str = rtemplate.render(
            creator=pipeline.created_by.username,
            datetime=datetime,
            runner=username,
            uuid=uuid,
            pipeline_id=pipeline.id,
            pipeline_name=pipeline.name,
            cluster_name=pipeline.project.cluster['NAME'],
            **kwargs,
        )
        return des_str

    # end of template_str()

    # Process pipeline global environment variables.
    pipeline_global_env = (
        template_str(pipeline.global_env.strip()) if pipeline.global_env else ''
    )  # 优先渲染，不然里面如果有date就可能存在不一致的问题
    pipeline_global_env = [
        env.strip() for env in pipeline_global_env.split('\n') if '=' in env.strip()
    ]
    # Load global environment variables from configuration.
    global_envs = json.loads(
        template_str(json.dumps(conf.get('GLOBAL_ENV', {}), indent=4, ensure_ascii=False))
    )
    # Add pipeline-specific global environment variables.
    for env in pipeline_global_env:
        key, value = env[: env.index('=')], env[env.index('=') + 1 :]
        global_envs[key] = value

    def get_upstream(task_name):
        """
        Retrieves the upstream tasks for a given task.

        Args:
            task_name (str): The name of the task.

        Returns:
            list: A list of upstream task names.

        Raises:
            Exception: If the upstream definition is not valid.
        """
        if 'upstream' in dag[task_name] and dag[task_name]['upstream']:
            upstream_tasks = dag[task_name]['upstream']
            # Convert dictionary of upstream tasks to a list of keys.
            if isinstance(upstream_tasks, dict):
                upstream_tasks = list(upstream_tasks.keys())
            # Split string of upstream tasks by comma or semicolon.
            if isinstance(upstream_tasks, str):
                upstream_tasks = re.split(',|;', upstream_tasks)
                # upstream_tasks = upstream_tasks.split(',;')
            # Raise an exception if upstream tasks are not a list.
            if not isinstance(upstream_tasks, list):
                raise Exception('%s upstream is not valid' % task_name)
            return upstream_tasks

        return []

    # Add upstream dependencies.
    # 添加上游依赖
    def add_upstream(ops, task_name):
        """
        Configures upstream dependencies for an operation.

        Args:
            ops: The operation object.
            task_name (str): The name of the current task.

        Raises:
            Exception: If an upstream task does not exist.
        """
        upstream_tasks = get_upstream(task_name)
        if upstream_tasks is None or len(upstream_tasks) == 0:
            return

        for upstream_task in upstream_tasks:  # 可能存在已删除的upstream_task
            # If the upstream task exists in all_ops, configure the task order.
            if upstream_task in all_ops:
                ops.after(all_ops[upstream_task])  # 配置任务顺序
            else:
                # Raise an exception if an upstream task does not exist.
                raise Exception('%s upstream %s is not exist' % (task_name, upstream_task))

    # Set environment variables.
    # 设置环境变量
    def set_environment(task):
        """
        Sets environment variables for a task's container.

        Args:
            task: The task object.

        Returns:
            list: A list of V1EnvVar objects.
        """
        container_envs = []
        # Add environment variables from task's job template.
        if task.job_template.env:
            envs = re.split('\r|\n', task.job_template.env)
            envs = [env.strip() for env in envs if env.strip()]
            for env in envs:
                env_key, env_value = env.split('=')[0], env.split('=')[1]
                container_envs.append(V1EnvVar(env_key, env_value))

        # Set global environment variables.
        # 设置全局环境变量
        for global_env_key in global_envs:
            container_envs.append(V1EnvVar(global_env_key, global_envs[global_env_key]))

        # Set default environment variables for the task.
        # 设置task的默认环境变量
        container_envs.append(V1EnvVar('KFJ_TASK_ID', str(task.id)))
        container_envs.append(V1EnvVar('KFJ_TASK_NAME', str(task.name)))
        container_envs.append(V1EnvVar('KFJ_TASK_NODE_SELECTOR', str(task.get_node_selector())))
        container_envs.append(V1EnvVar('KFJ_TASK_VOLUME_MOUNT', str(task.volume_mount)))
        container_envs.append(V1EnvVar('KFJ_TASK_IMAGES', str(task.job_template.images)))
        container_envs.append(V1EnvVar('KFJ_TASK_RESOURCE_CPU', str(task.resource_cpu)))
        container_envs.append(V1EnvVar('KFJ_TASK_RESOURCE_MEMORY', str(task.resource_memory)))
        container_envs.append(
            V1EnvVar('KFJ_TASK_RESOURCE_GPU', str(task.resource_gpu.replace('+', '')))
        )
        container_envs.append(V1EnvVar('KFJ_TASK_PROJECT_NAME', str(pipeline.project.name)))
        container_envs.append(V1EnvVar('GPU_TYPE', os.environ.get('GPU_TYPE', 'NVIDIA')))
        container_envs.append(V1EnvVar('USERNAME', pipeline.created_by.username))
        container_envs.append(V1EnvVar('BASE_PATH', '/mnt/publish-data'))
        container_envs.append(
            V1EnvVar('USER_WORK_PATH', f'/mnt/publish-data/{pipeline.created_by.username}')
        )

        # Add additional environment variables from kwargs.
        if 'envs' in kwargs and kwargs['envs']:
            for env_key, env_val in kwargs['envs'].items():
                container_envs.append(V1EnvVar(env_key, env_val))
        return container_envs

    # end of set_environment()

    # Set the startup command.
    # 设置启动命令
    def set_command(task):
        """
        Determines the command to be executed for a task.

        Args:
            task: The task object.

        Returns:
            list: A list representing the command and its arguments.

        Raises:
            Exception: If a deep learning task must have an upstream but doesn't.
        """
        task_command = ''
        # If task has a command, append it.
        if task.command:
            commands = re.split('\r|\n', task.command)
            commands = [command.strip() for command in commands if command.strip()]
            if task_command:
                task_command += ' && ' + ' && '.join(commands)
            else:
                task_command += ' && '.join(commands)
        # Get job template entrypoint.
        job_template_entrypoint = (
            task.job_template.entrypoint.strip() if task.job_template.entrypoint else ''
        )
        command = None
        # Prioritize job template entrypoint, then task command.
        if job_template_entrypoint:
            command = job_template_entrypoint
        if task_command:
            command = task_command
        # Special handling for deep learning projects.
        if task.job_template.project.name == '深度学习':
            data_path = ''
            expand = json.loads(task.job_template.expand) if task.job_template.expand else {}
            must_have_upstream = expand.get('must_have_upstream')
            upstreams = get_upstream(task.name)
            # If upstream tasks exist, get data path from the first upstream's output.
            if len(upstreams) > 0:
                pre_op = all_ops[upstreams[0]]
                data_path = pre_op.outputs.get('output', '')
            # If upstream is required but not found, raise an exception.
            elif must_have_upstream:
                raise Exception(f'[{task.name}] {task.job_template.name} must have one upstream')
            # Generate a unique job name.
            job_name = f'taichu-{username}-{round(time.time() * 1000)}'
            # Define output path.
            output_path = f'/mnt/publish-data/{username}/jobs/{job_name}/'
            op_ctx['model_path'] = output_path
            # Append data and output paths to the command.
            command += f' --data_path "{data_path}" --output_path {output_path}'
            return ['bash', '-c', command]
        # Split command string into a list of arguments.
        command = command.split(' ') if command else []
        command = [com for com in command if com]
        return command

    # end of set_command()

    # Set startup command parameters.
    # 设置启动命令参数
    def set_command_args(task):
        """
        Sets command arguments for a task based on its arguments definition.

        Args:
            task: The task object.

        Returns:
            list: A list of command arguments.
        """
        ops_args = []

        task_args = json.loads(task.args)
        for task_attr_name in task_args:
            # Add parameter name.
            # 添加参数名
            if isinstance(task_args[task_attr_name], bool):
                if task_args[task_attr_name]:
                    ops_args.append('%s' % str(task_attr_name))
            # Add parameter value for dict or list types.
            # 添加参数值
            elif isinstance(task_args[task_attr_name], dict) or isinstance(
                task_args[task_attr_name], list
            ):
                ops_args.append('%s' % str(task_attr_name))
                ops_args.append('%s' % json.dumps(task_args[task_attr_name], ensure_ascii=False))
            # If parameter value is empty, do not add.
            elif not task_args[task_attr_name]:  # 如果参数值为空，则都不添加
                pass
            else:
                ops_args.append('%s' % str(task_attr_name))
                ops_args.append(
                    '%s' % str(task_args[task_attr_name])
                )  # 这里应该对不同类型的参数名称做不同的参数处理，比如bool型，只有参数，没有值
        return ops_args

    # end of set_command_args()

    # Add user-defined mounts.
    # 添加用户自定义挂载
    def set_volume(ops, task):
        """
        Configures volume mounts for a task's container.

        Args:
            ops: The operation object.
            task: The task object.
        """
        # Set default volume mounts.
        task.volume_mount = (
            'kubeflow-user-workspace(pvc):/mnt,kubeflow-archives(pvc):/archives,'
            'kubeflow-publish-data-pvc(pvc):/mnt/publish-data,(memory)10G:/dev/shm'
        )
        task.volume_mount = task.volume_mount.strip() if task.volume_mount else ''

        # # 挂载时区
        # ops = ops.add_volume(client.V1Volume(
        #     name='tz-config',
        #     host_path=client.V1HostPathVolumeSource(path='/usr/share/zoneinfo/Asia/Shanghai')
        # ))
        # ops = ops.add_volume_mount(
        #             client.V1VolumeMount(mount_path='/etc/localtime', name='tz-config'))

        # Mount graphics card driver.
        # 挂显卡驱动
        dr = DeviceResource.from_str(task.resource_gpu)
        if dr:
            # Add volumes from device resources.
            for k, v in dr.volumes.items():
                ops = ops.add_volume(
                    client.V1Volume(
                        name=k, host_path=client.V1HostPathVolumeSource(path=v['hostPath']['path'])
                    )
                )
            # Add volume mounts from device resources.
            for k, v in dr.volume_mounts.items():
                mount_path = v['mountPath']
                sub_path = v.get('subPath', None)
                ops = ops.add_volume_mount(
                    client.V1VolumeMount(mount_path=mount_path, name=k, sub_path=sub_path)
                )

        # Process user-defined volume mounts.
        if task.volume_mount:
            try:
                k8s_volumes, k8s_volume_mounts = py_k8s.K8s.get_volume_mounts(
                    task.volume_mount, pipeline.created_by.username
                )
                # Add Kubernetes volumes.
                for volume in k8s_volumes:
                    claim_name = volume.get('persistentVolumeClaim', {}).get('claimName', None)
                    hostpath = volume.get('hostPath', {}).get('path', None)
                    configmap_name = volume.get('configMap', {}).get('name', None)
                    memory_size = (
                        volume.get('emptyDir', {}).get('sizeLimit', '')
                        if volume.get('emptyDir', {}).get('medium', '') == 'Memory'
                        else None
                    )
                    ops = ops.add_volume(
                        client.V1Volume(
                            name=volume['name'],
                            persistent_volume_claim=(
                                client.V1PersistentVolumeClaimVolumeSource(claim_name=claim_name)
                                if claim_name
                                else None
                            ),
                            host_path=(
                                client.V1HostPathVolumeSource(path=hostpath) if hostpath else None
                            ),
                            config_map=(
                                client.V1ConfigMapVolumeSource(name=configmap_name)
                                if configmap_name
                                else None
                            ),
                            empty_dir=(
                                client.V1EmptyDirVolumeSource(
                                    medium='Memory', size_limit=memory_size
                                )
                                if memory_size
                                else None
                            ),
                        )
                    )

                # 对于admin，将整个publish-data挂载到/mnt下；其他用户只挂载属于该用户的目录
                # For admin, mount the entire publish-data to /mnt; other users only mount their own directories.
                for mount in k8s_volume_mounts:
                    mountPath = mount.get('mountPath')
                    subPath = mount.get('subPath')
                    if username == 'admin' and 'publish-data' in mountPath:
                        continue
                    ops = ops.add_volume_mount(
                        client.V1VolumeMount(
                            mount_path=mountPath, name=mount.get('name', ''), sub_path=subPath
                        )
                    )

                # Special handling for 'kubeflow-publish-data-pvc'.
                if 'kubeflow-publish-data-pvc' in task.volume_mount:
                    if username == 'admin':
                        ops = ops.add_volume_mount(
                            client.V1VolumeMount(
                                mount_path='/mnt/publish-data',
                                name='kubeflow-publish-data-pvc',
                                sub_path=None,
                            )
                        )
                    else:
                        ops = ops.add_volume_mount(
                            client.V1VolumeMount(
                                mount_path='/mnt/publish-data/train_code',
                                name='kubeflow-publish-data-pvc',
                                sub_path='train_code',
                            )
                        )
                        ops = ops.add_volume_mount(
                            client.V1VolumeMount(
                                mount_path='/mnt/publish-data/train_data',
                                name='kubeflow-publish-data-pvc',
                                sub_path='train_data',
                            )
                        )
            except Exception as e:
                logging.exception(e)

    # end of set_volume()

    # Add pod labels.
    # 添加pod label
    def set_labels(ops, task):
        """
        Sets Kubernetes pod labels for an operation.

        Args:
            ops: The operation object.
            task: The task object.
        """
        ops.add_pod_label('pipeline-id', str(pipeline.id))
        ops.add_pod_label('pipeline-name', str(pipeline.name))
        ops.add_pod_label('app', str(pipeline.name))
        ops.add_pod_label('task-name', str(task.name))
        ops.add_pod_label('run-id', global_envs.get('KFJ_RUN_ID', ''))
        ops.add_pod_label('task-id', str(task.id))
        ops.add_pod_label('upload-rtx', username)
        ops.add_pod_label('run-rtx', username)
        ops.add_pod_label('pipeline-rtx', pipeline.created_by.username)
        ops.add_pod_label('show-log', 'true')
        # ops.add_pod_label('job-template', task.job_template.name)
        if 'task_type' in kwargs:
            ops.add_pod_label('task_type', kwargs['task_type'])
        if 'kind' in kwargs:
            ops.add_pod_label('kind', kwargs['kind'])

    # end of set_labels()

    # Add affinity control.
    # 添加亲密度控制
    def set_affinity(ops):
        """
        Configures pod anti-affinity for an operation.

        Args:
            ops: The operation object.
        """
        affinity = client.V1Affinity(
            pod_anti_affinity=client.V1PodAntiAffinity(
                preferred_during_scheduling_ignored_during_execution=[
                    client.V1WeightedPodAffinityTerm(
                        weight=80,
                        pod_affinity_term=client.V1PodAffinityTerm(
                            label_selector=client.V1LabelSelector(
                                match_labels={
                                    # 'job-template':task.job_template.name,
                                    'pipeline-id': str(pipeline.id)
                                }
                            ),
                            topology_key='kubernetes.io/hostname',
                        ),
                    )
                ]
            )
        )
        ops.add_affinity(affinity)

    # end of set_affinity()

    # Set resources.
    # 设置资源
    def set_resource(ops, task):
        """
        Configures CPU, memory, and GPU resources for a task's container.

        Args:
            ops: The operation object.
            task: The task object.
        """
        # Retrieve CPU, GPU, and memory resources, prioritizing job template environment variables.
        resource_cpu = (
            task.job_template.get_env('TASK_RESOURCE_CPU')
            if task.job_template.get_env('TASK_RESOURCE_CPU')
            else task.resource_cpu
        )
        resource_gpu = (
            task.job_template.get_env('TASK_RESOURCE_GPU')
            if task.job_template.get_env('TASK_RESOURCE_GPU')
            else task.resource_gpu
        )
        resource_memory = (
            task.job_template.get_env('TASK_RESOURCE_MEMORY')
            if task.job_template.get_env('TASK_RESOURCE_MEMORY')
            else task.resource_memory
        )

        # Set resource limits.
        # 设置资源限制
        if resource_memory:
            # If memory is a range, set request and limit accordingly.
            if '~' not in resource_memory:
                ops.set_memory_request(resource_memory)
                ops.set_memory_limit(resource_memory)
            else:
                # logging.info(task.resource_memory)
                ops.set_memory_request(resource_memory.split('~')[0])
                ops.set_memory_limit(resource_memory.split('~')[1])

        if resource_cpu:
            # If CPU is a range, set request and limit accordingly.
            if '~' not in resource_cpu:
                ops.set_cpu_request(resource_cpu)
                ops.set_cpu_limit(resource_cpu)
            else:
                # logging.info(task.resource_cpu)
                ops.set_cpu_request(resource_cpu.split('~')[0])
                ops.set_cpu_limit(resource_cpu.split('~')[1])

        if resource_gpu:
            # If GPU resources are specified, add resource requests and limits.
            dr = DeviceResource.from_str(task.resource_gpu)
            if dr:
                for k, v in dr.resources_requests.items():
                    ops.add_resource_request(k, v)
                for k, v in dr.resources_limits.items():
                    ops.add_resource_limit(k, v)

            # gpu_num, gpu_type, gpu_mem = core.get_gpu(resource_gpu)
            # if resource_gpu and gpu_num > 0:
            #     if gpu_type == 'ascend910':
            #         ops.add_resource_request('huawei.com/Ascend910', gpu_num)
            #         ops.add_resource_limit('huawei.com/Ascend910', gpu_num)
            #     else:
            #         ops.set_gpu_limit(gpu_num)
            #         if gpu_mem:
            #             ops.add_resource_limit('nvidia.com/gpumem', gpu_mem.lower())

    # end of set_resource()

    # Add node selector.
    # 添加node selector
    def set_node_selector(ops, task):
        """
        Configures Kubernetes node selectors for an operation.

        Args:
            ops: The operation object.
            task: The task object.
        """
        # Add node selectors from device resources.
        dr = DeviceResource.from_str(task.resource_gpu)
        if dr:
            for k, v in dr.node_selector.items():
                ops.add_node_selector_constraint(k, v)
        # Add node selectors from task's node_selector string.
        for selector in re.split(',|;|\n|\t', task.node_selector):
            selector = selector.replace(' ', '')
            if '=' in selector:
                ops.add_node_selector_constraint(
                    selector.strip().split('=')[0].strip(), selector.strip().split('=')[1].strip()
                )

    def set_tolerations(ops, task):
        """
        Configures Kubernetes tolerations for an operation.

        Args:
            ops: The operation object.
            task: The task object.
        """
        # If NODE_LABEL_KEY is in task's node_selector, add a toleration.
        if NODE_LABEL_KEY in task.node_selector:
            ops.add_toleration(
                client.V1Toleration(key=NODE_LABEL_KEY, operator='Exists', effect='NoSchedule')
            )

    # Set workflow labels.
    # 设置workflow标签
    def set_pipeline_label():
        """
        Sets Kubernetes labels for the pipeline workflow.
        """
        pipeline_conf.labels['upload-rtx'] = username
        pipeline_conf.labels['run-rtx'] = username
        pipeline_conf.labels['pipeline-rtx'] = pipeline.created_by.username
        pipeline_conf.labels['save-time'] = datetime.datetime.now().strftime('%Y-%m-%dT%H-%M-%S')
        pipeline_conf.labels['pipeline-id'] = str(pipeline.id)
        pipeline_conf.labels['run-id'] = global_envs.get(
            'KFJ_RUN_ID', ''
        )  # 以此来绑定运行时id，不能用kfp的run—id。那个是传到kfp以后才产生的。
        if 'task_type' in kwargs:
            pipeline_conf.labels['task_type'] = kwargs['task_type']
        if 'kind' in kwargs:
            pipeline_conf.labels['kind'] = kwargs['kind']

    # end of set_pipeline_label()

    # @pysnooper.snoop()
    def get_ops(task_name, k8s_resource=None):
        """
        Generates an operation (Op) for a given task.

        This function creates either a ResourceOp (for NPU tasks) or a ContainerOp
        (for other tasks) and configures its environment, volumes, labels, and resources.

        Args:
            task_name (str): The name of the task.
            k8s_resource (dict, optional): Kubernetes resource definition for NPU tasks.
                                           Defaults to None.

        Raises:
            Exception: If a deep learning task must have an upstream but doesn't.
        """
        task = all_tasks[task_name]

        # If k8s_resource is provided, create a ResourceOp.
        if k8s_resource:
            # 如果是npu，则调用上一次生成的yaml使用ResourceOp创建
            ops = kfp.dsl.ResourceOp(
                k8s_resource=k8s_resource,
                name=task.name,
                success_condition='status.state.phase == Completed',
                failure_condition='status.state.phase == Terminated,status.state.phase == Failed',
            )
            # Add upstream dependencies.
            add_upstream(ops, task_name)
            return

        # Set creation parameters for ops-pod.
        # 设置ops-pod的创建参数
        container_kwargs = {}

        # Set privileged mode if specified in job template.
        # 设置privileged
        if task.job_template.privileged:
            container_kwargs['security_context'] = V1SecurityContext(
                privileged=task.job_template.privileged
            )

        # Set environment variables.
        # 设置环境变量
        container_kwargs['env'] = set_environment(task)

        # Set file outputs.
        # 设置文件输出
        file_outputs = (
            json.loads(task.outputs) if task.outputs and json.loads(task.outputs) else None
        )
        # Handle different job template types.
        if task.job_template.name == conf.get('CUSTOMIZE_JOB'):
            # Set working directory.
            # 设置工作目录
            container_kwargs['working_dir'] = json.loads(task.args).get('workdir')
            command = json.loads(task.args).get('command')
            # 对于NPU，为了让modelarts上的镜像能够直接运行，加入下面的环境变量
            # 迁移MS_COMPILER_CACHE_PATH，MindSpore缓存目录在挂载目录下时，运行出错
            # modelarts上的镜像都以ma-user身份运行，增加ma-user下的包路径到PYTHONPATH
            # For NPU, to allow images on modelarts to run directly, add the following environment variables.
            # Migrate MS_COMPILER_CACHE_PATH, MindSpore cache directory runs incorrectly when mounted.
            # Images on modelarts run as ma-user, add package path under ma-user to PYTHONPATH.
            if 'ascend910' in task.resource_gpu:
                command = (
                    'export MS_COMPILER_CACHE_PATH=/home/ma-user/ && export'
                    ' PYTHONPATH=$PYTHONPATH:/home/ma-user/.local/lib/python3.9/site-packages &&'
                    f' {command}'
                )

            ops = kfp.dsl.ContainerOp(
                name=task.name,
                image=json.loads(task.args).get('images'),
                command=['bash', '-c', command],
                container_kwargs=container_kwargs,
                file_outputs=file_outputs,
            )
        elif task.job_template.op_type == 'function':
            expand = json.loads(task.job_template.expand) if task.job_template.expand else {}
            packages_to_install = expand.get('packages_to_install')
            must_have_upstream = expand.get('must_have_upstream')
            previous_outputs = '{}'
            func_name = task_name.replace('-', '_')
            upstreams = get_upstream(task_name)
            if len(upstreams) > 0:
                pre_op = all_ops[upstreams[0]]
                previous_outputs = pre_op.outputs.get('output', '{}')
            elif must_have_upstream:
                raise Exception(f'[{task_name}] {task.job_template.name} must have one upstream')
            func_str = re.sub(
                r'^def\s+(\w+)\s*\(', f'def {func_name}(', task.job_template.function_str
            )
            with open(f'{func_name}.py', 'w') as f:
                f.write(func_str)
            module = importlib.import_module(func_name)
            callable_func = getattr(module, func_name)
            func_op = create_component_from_func(
                callable_func,
                base_image=get_image(task.job_template.images),
                packages_to_install=packages_to_install,
            )
            task_args = json.loads(task.args)
            params = {'context': op_ctx, 'task_args': task_args, 'path': task.input_path}
            ops = func_op(previous_outputs, params)
            os.remove(f'{func_name}.py')
        else:
            # 设置工作目录
            if task.job_template.workdir and task.job_template.workdir.strip():
                container_kwargs['working_dir'] = task.job_template.workdir.strip()
            if task.working_dir and task.working_dir.strip():
                container_kwargs['working_dir'] = task.working_dir.strip()

            # 设置启动命令
            command = set_command(task)
            ops_args = set_command_args(task)

            ops = kfp.dsl.ContainerOp(
                name=task.name,
                image=get_image(task.job_template.images),
                arguments=ops_args,
                command=command if command else None,
                container_kwargs=container_kwargs,
                file_outputs=file_outputs,
            )

        # 添加用户自定义挂载
        set_volume(ops, task)

        # 添加上游依赖
        add_upstream(ops, task_name)

        # 添加node selector
        set_node_selector(ops, task)
        # 添加tolerations
        set_tolerations(ops, task)

        # # 根据用户身份设置机器选择器
        # if g and g.user and g.user.org:
        #     ops.add_node_selector_constraint('org-'+g.user.org,'true')
        # elif pipeline.created_by.org:  # 对定时任务，没有g存储
        #     ops.add_node_selector_constraint('org-'+pipeline.created_by.org,'true')

        # 添加pod label
        set_labels(ops, task)

        # 添加亲密度控制
        set_affinity(ops)

        # 设置重试次数
        if task.retry:
            ops.set_retry(int(task.retry))

        # 设置超时
        if task.timeout:
            ops.set_timeout(int(task.timeout))

        # 配置资源
        set_resource(ops, task)

        if task.skip:
            ops.command = ['echo', 'skip']
            ops.arguments = None
            ops._container.resources = None

        all_ops[task_name] = ops

    # end of get_ops()

    # pipeline运行的相关配置
    hubsecret_list = conf.get('HUBSECRET', [])
    for name in hubsecret_list:
        pipeline_conf.image_pull_secrets.append(client.V1LocalObjectReference(name=name))
    for task_name in all_tasks:
        # 配置拉取秘钥。本来在contain里面，workflow在外面
        task_temp = all_tasks[task_name]
        if task_temp.job_template.images.repository.hubsecret:
            hubsecret = task_temp.job_template.images.repository.hubsecret
            if hubsecret not in hubsecret_list:
                hubsecret_list.append(hubsecret)
                pipeline_conf.image_pull_secrets.append(
                    client.V1LocalObjectReference(name=hubsecret)
                )

    # 配置默认拉取策略
    pipeline_conf.image_pull_policy = conf.get('IMAGE_PULL_POLICY', 'Always')

    # 设置scheduler
    pipeline_conf.scheduler_name = get_scheduler_name()

    # 设置并发
    if pipeline.parallelism:
        pipeline_conf.parallelism = int(pipeline.parallelism)

    # 设置workflow标签
    set_pipeline_label()

    # 这里面是真正的pipeline name  上传时指定的是version的name
    @kfp.dsl.pipeline(name=pipeline.name, description=pipeline.describe)
    def my_pipeline():
        for one_task in dag:
            get_ops(one_task)

    if is_use_npu:
        # 先用原来的流程创建一次yaml
        origin_yaml = kfp.compiler.Compiler().create_workflow(
            my_pipeline,
            pipeline_name='dummy',
            pipeline_description='dummy',
            pipeline_conf=pipeline_conf,
        )

        all_ops = {}

        # 对于使用resource_op的部分，使用上面的配置作为k8s_resource
        @kfp.dsl.pipeline(name=pipeline.name, description=pipeline.describe)
        def my_pipeline_w_resource_op():
            for one_task in dag:
                cur_task = all_tasks[one_task]
                k8s_resource = None
                if is_npu(cur_task):
                    k8s_resource = npu_manifest(
                        cur_task, origin_yaml, pipeline.name, pipeline.namespace
                    )
                    upload_config_map(
                        pipeline.project.cluster,
                        k8s_resource.get('metadata').get('name'),
                        pipeline.namespace,
                    )

                get_ops(one_task, k8s_resource)

        pipeline_file = kfp.compiler.Compiler().create_workflow(
            my_pipeline_w_resource_op, f'{pipeline.name}.yaml', pipeline_conf=pipeline_conf
        )
    else:
        pipeline_file = kfp.compiler.Compiler().create_workflow(
            my_pipeline, f'{pipeline.name}.yaml', pipeline_conf=pipeline_conf
        )
    pipeline_file = yaml.dump(pipeline_file)
    pipeline_file = template_str(pipeline_file)
    return pipeline_file


def get_image(images):
    if ':' in images.name:
        return images.name
    return f'{images.name}:{images.image_version}'


def upload_config_map(cluster, name, namespace, region='default'):
    # k8s_client = K8s(cluster.get('KUBECONFIG', ''))
    k8s_client = get_k8s_client_by_region(region=region)

    # create rings-config
    config_data = {'hccl.json': '{"status":"initializing"}'}
    k8s_client.create_configmap(
        namespace=f'{namespace}',
        name=f'rings-config-{name}',
        data=config_data,
        labels={'ring-controller.atlas': 'ascend-910'},
    )
    # # create reset-config
    # config_data = {'hccl.json': '{"status":"initializing"}', 'checkCode': ''}
    # k8s_client.create_configmap(
    #     namespace=f'{namespace}',
    #     name=f'reset-config-{name}',
    #     data=config_data,
    #     labels={'reset': 'true'},
    # )


# @pysnooper.snoop(watch_explode=())
def upload_pipeline(pipeline_file, pipeline_name, kfp_host, pipeline_argo_id):
    if not pipeline_file:
        return None, None
    pipeline_file_name = f'{pipeline_name}.yaml'
    file = open(pipeline_file_name, mode='wb')
    file.write(bytes(pipeline_file, encoding='utf-8'))
    file.close()
    kfp_client = kfp.Client(kfp_host)
    pipeline_argo = None
    if pipeline_argo_id:
        try:
            pipeline_argo = kfp_client.get_pipeline(pipeline_argo_id)
        except Exception as e:
            logging.error(e)

    pipeline_version_name = (
        f"{pipeline_name}_version_at_{datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S')}"
    )
    if pipeline_argo:
        pipeline_argo_version = kfp_client.upload_pipeline_version(
            pipeline_package_path=pipeline_file_name,
            pipeline_version_name=pipeline_version_name,
            pipeline_id=pipeline_argo.id,
        )
        time.sleep(1)  # 因为创建是异步的，要等k8s反应，所以有时延
        return pipeline_argo.id, pipeline_argo_version.id
    else:
        exist_pipeline_argo_id = None
        try:
            exist_pipeline_argo_id = kfp_client.get_pipeline_id(pipeline_name)
        except Exception as e:
            logging.error(e)

        if exist_pipeline_argo_id:
            pipeline_argo_version = kfp_client.upload_pipeline_version(
                pipeline_package_path=pipeline_file_name,
                pipeline_version_name=pipeline_version_name,
                pipeline_id=exist_pipeline_argo_id,
            )
            time.sleep(1)
            return exist_pipeline_argo_id, pipeline_argo_version.id
        else:
            pipeline_argo = kfp_client.upload_pipeline(
                pipeline_file_name, pipeline_name=pipeline_name
            )
            time.sleep(1)
            return pipeline_argo.id, pipeline_argo.default_version.id


def run_pipeline(cluster, workflow_json, namespace=None, region='default'):
    crd_name = workflow_json.get('metadata', {}).get('name', '')

    k8s_client = get_k8s_client_by_region(region=region)

    if namespace is None:
        namespace = workflow_json.get('metadata', {}).get(
            'namespace', conf.get('PIPELINE_NAMESPACE')
        )

    crd_info = conf.get('CRD_INFO', {}).get('workflow', {})
    workflow_obj = k8s_client.get_one_crd(
        group=crd_info['group'],
        version=crd_info['version'],
        plural=crd_info['plural'],
        namespace=namespace,
        name=crd_name,
    )
    if workflow_obj:
        k8s_client.delete_crd(
            group=crd_info['group'],
            version=crd_info['version'],
            plural=crd_info['plural'],
            namespace=namespace,
            name=crd_name,
        )
        time.sleep(1)

    k8s_client.create_crd(
        group=crd_info['group'],
        version=crd_info['version'],
        plural=crd_info['plural'],
        namespace=namespace,
        body=workflow_json,
    )

    # 如果是异地集群，任务要分发
    if region != 'default':
        for i in range(10):
            wf_obj = k8s_client.get_one_crd(
                group=crd_info['group'],
                version=crd_info['version'],
                plural=crd_info['plural'],
                namespace=namespace,
                name=crd_name,
            )
            if wf_obj:
                log.info(f'[run_pipeline][{i}] find {crd_name} before propagation')
                time.sleep(0.3)
                break
            else:
                log.info(f'[run_pipeline][{i}] {crd_name} not found before propagation')
                time.sleep(0.5)

        propagation_job(
            region=region,
            k8s_client=k8s_client,
            name=crd_name,
            suffix=region,
            namespace=namespace,
            api_version='argoproj.io/v1alpha1',
            kind='Workflow',
        )

    return crd_name
