# Copyright 2018-2019 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#      http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

# 导入datetime模块，用于处理日期和时间
# Import the datetime module for handling dates and times
import datetime
# 导入json模块，用于处理JSON数据
# Import the json module for handling JSON data
import json
# 从collections模块导入defaultdict和OrderedDict
# Import defaultdict and OrderedDict from the collections module
from collections import defaultdict, OrderedDict
# 从deprecated模块导入deprecated装饰器，用于标记不推荐使用的方法
# Import the deprecated decorator from the deprecated module to mark methods as deprecated
from deprecated import deprecated
# 导入inspect模块，用于获取对象信息
# Import the inspect module for getting object information
import inspect
# 导入re模块，用于正则表达式操作
# Import the re module for regular expression operations
import re
# 导入tarfile模块，用于处理tar归档文件
# Import the tarfile module for handling tar archive files
import tarfile
# 导入uuid模块，用于生成唯一标识符
# Import the uuid module for generating unique identifiers
import uuid
# 导入warnings模块，用于发出警告
# Import the warnings module for issuing warnings
import warnings
# 导入zipfile模块，用于处理zip归档文件
# Import the zipfile module for handling zip archive files
import zipfile
# 从typing模块导入类型提示
# Import type hints from the typing module
from typing import Callable, Set, List, Text, Dict, Tuple, Any, Union, Optional

# 导入kfp模块
# Import the kfp module
import kfp
# 从kfp.dsl导入_for_loop
# Import _for_loop from kfp.dsl
from kfp.dsl import _for_loop
# 从kfp.compiler导入_data_passing_rewriter和v2_compat
# Import _data_passing_rewriter and v2_compat from kfp.compiler
from kfp.compiler import _data_passing_rewriter, v2_compat

# 导入dsl模块
# Import the dsl module
from .. import dsl
# 从. _k8s_helper导入convert_k8s_obj_to_json和sanitize_k8s_name
# Import convert_k8s_obj_to_json and sanitize_k8s_name from ._k8s_helper
from ._k8s_helper import convert_k8s_obj_to_json, sanitize_k8s_name
# 从. _op_to_template导入_op_to_template和_process_obj
# Import _op_to_template and _process_obj from ._op_to_template
from ._op_to_template import _op_to_template, _process_obj
# 从. _default_transformers导入add_pod_env和add_pod_labels
# Import add_pod_env and add_pod_labels from ._default_transformers
from ._default_transformers import add_pod_env, add_pod_labels

# 从..components.structures导入InputSpec
# Import InputSpec from ..components.structures
from ..components.structures import InputSpec
# 从..components._yaml_utils导入dump_yaml
# Import dump_yaml from ..components._yaml_utils
from ..components._yaml_utils import dump_yaml
# 从..dsl._metadata导入_extract_pipeline_metadata
# Import _extract_pipeline_metadata from ..dsl._metadata
from ..dsl._metadata import _extract_pipeline_metadata
# 从..dsl._ops_group导入OpsGroup
# Import OpsGroup from ..dsl._ops_group
from ..dsl._ops_group import OpsGroup
# 从..dsl._pipeline_param导入extract_pipelineparams_from_any和PipelineParam
# Import extract_pipelineparams_from_any and PipelineParam from ..dsl._pipeline_param
from ..dsl._pipeline_param import extract_pipelineparams_from_any, PipelineParam

# 定义SDK版本标签
# Define the SDK version label
_SDK_VERSION_LABEL = 'pipelines.kubeflow.org/kfp_sdk_version'
# 定义SDK环境标签
# Define the SDK environment label
_SDK_ENV_LABEL = 'pipelines.kubeflow.org/pipeline-sdk-type'
# 定义默认的SDK环境
# Define the default SDK environment
_SDK_ENV_DEFAULT = 'kfp'
class Compiler(object):
  """DSL编译器，将pipeline函数编译为工作流yaml。

  示例:
    如何使用编译器构造工作流yaml:: 

      @dsl.pipeline(
        name='name',
        description='description'
      )
      def my_pipeline(a: int = 1, b: str = "default value"):
        ...

      Compiler().compile(my_pipeline, 'path/to/workflow.yaml')
  """
  """DSL Compiler that compiles pipeline functions into workflow yaml.

  Example:
    How to use the compiler to construct workflow yaml:: 

      @dsl.pipeline(
        name='name',
        description='description'
      )
      def my_pipeline(a: int = 1, b: str = "default value"):
        ...

      Compiler().compile(my_pipeline, 'path/to/workflow.yaml')
  """

  def __init__(
      self,
      mode: dsl.PipelineExecutionMode = dsl.PipelineExecutionMode.V1_LEGACY,
      launcher_image: Optional[str] = None):
    """创建一个KFP编译器，用于编译要执行的pipeline函数。

    参数:
      mode: 要使用的pipeline执行模式。
      launcher_image: KFP启动器要使用的可配置镜像。仅在`mode == dsl.PipelineExecutionMode.V2_COMPATIBLE`时适用。
        目前仅用于测试或自定义部署。
    """
    """Creates a KFP compiler for compiling pipeline functions for execution.

    Args:
      mode: The pipeline execution mode to use.
      launcher_image: Configurable image for KFP launcher to use. Only applies
        when `mode == dsl.PipelineExecutionMode.V2_COMPATIBLE`. Should only be
        needed for tests or custom deployments right now.
    """
    if mode == dsl.PipelineExecutionMode.V2_ENGINE:
      raise ValueError('V2_ENGINE execution mode is not supported yet.')

    if mode == dsl.PipelineExecutionMode.V2_COMPATIBLE:
      warnings.warn('V2_COMPATIBLE execution mode is at Beta quality.'
                    ' Some pipeline features may not work as expected.')
    self._mode = mode
    self._launcher_image = launcher_image
    self._pipeline_name_param: Optional[dsl.PipelineParam] = None
    self._pipeline_root_param: Optional[dsl.PipelineParam] = None

  def _get_groups_for_ops(self, root_group):
    """获取每个操作所属组的辅助函数。

    每个pipeline都有一个根组。每个组都有一个操作符（叶子）和组的列表。
    此函数遍历树并获取所有操作符的所有祖先组。

    返回:
      一个字典。键是操作符的名称。值是包括操作符本身在内的祖先组列表。
      给定操作符的列表按最远的组在前，操作符本身在后的顺序排序。
    """
    """Helper function to get belonging groups for each op.

    Each pipeline has a root group. Each group has a list of operators (leaf) and groups.
    This function traverse the tree and get all ancestor groups for all operators.

    Returns:
      A dict. Key is the operator's name. Value is a list of ancestor groups including the
              op itself. The list of a given operator is sorted in a way that the farthest
              group is the first and operator itself is the last.
    """
    def _get_op_groups_helper(current_groups, ops_to_groups):
      root_group = current_groups[-1]
      for g in root_group.groups:
        # 在ops_to_groups中添加递归opsgroup
        # 以便i/o依赖可以传播到祖先opsgroups
        # Add recursive opsgroup in the ops_to_groups
        # such that the i/o dependency can be propagated to the ancester opsgroups
        if g.recursive_ref:
          ops_to_groups[g.name] = [x.name for x in current_groups] + [g.name]
          continue
        current_groups.append(g)
        _get_op_groups_helper(current_groups, ops_to_groups)
        del current_groups[-1]
      for op in root_group.ops:
        ops_to_groups[op.name] = [x.name for x in current_groups] + [op.name]

    ops_to_groups = {}
    current_groups = [root_group]
    _get_op_groups_helper(current_groups, ops_to_groups)
    return ops_to_groups

  #TODO: 与_get_groups_for_ops合并
  #TODO: combine with the _get_groups_for_ops
  def _get_groups_for_opsgroups(self, root_group):
    """获取每个opsgroup所属组的辅助函数。

    每个pipeline都有一个根组。每个组都有一个操作符（叶子）和组的列表。
    此函数遍历树并获取所有opsgroups的所有祖先组。

    返回:
      一个字典。键是opsgroup的名称。值是包括opsgroup本身在内的祖先组列表。
      给定opsgroup的列表按最远的组在前，opsgroup本身在后的顺序排序。
    """
    """Helper function to get belonging groups for each opsgroup.

    Each pipeline has a root group. Each group has a list of operators (leaf) and groups.
    This function traverse the tree and get all ancestor groups for all opsgroups.

    Returns:
      A dict. Key is the opsgroup's name. Value is a list of ancestor groups including the
              opsgroup itself. The list of a given opsgroup is sorted in a way that the farthest
              group is the first and opsgroup itself is the last.
    """
    def _get_opsgroup_groups_helper(current_groups, opsgroups_to_groups):
      root_group = current_groups[-1]
      for g in root_group.groups:
        # 在ops_to_groups中添加递归opsgroup
        # 以便i/o依赖可以传播到祖先opsgroups
        # Add recursive opsgroup in the ops_to_groups
        # such that the i/o dependency can be propagated to the ancester opsgroups
        if g.recursive_ref:
          continue
        opsgroups_to_groups[g.name] = [x.name for x in current_groups] + [g.name]
        current_groups.append(g)
        _get_opsgroup_groups_helper(current_groups, opsgroups_to_groups)
        del current_groups[-1]

    opsgroups_to_groups = {}
    current_groups = [root_group]
    _get_opsgroup_groups_helper(current_groups, opsgroups_to_groups)
    return opsgroups_to_groups

  def _get_groups(self, root_group):
    """获取pipeline中所有组（不包括操作）的辅助函数。"""
    """Helper function to get all groups (not including ops) in a pipeline."""

    def _get_groups_helper(group):
      groups = {group.name: group}
      for g in group.groups:
        # 跳过递归opsgroup，因为不需要为递归opsgroup生成模板。
        # Skip the recursive opsgroup because no templates
        # need to be generated for the recursive opsgroups.
        if not g.recursive_ref:
          groups.update(_get_groups_helper(g))
      return groups

    return _get_groups_helper(root_group)

  def _get_uncommon_ancestors(self, op_groups, opsgroup_groups, op1, op2):
    """获取两个操作之间不共同祖先的辅助函数。

    例如，op1的祖先组是[root, G1, G2, G3, op1]，op2的祖先组是
    [root, G1, G4, op2]，则返回一个元组([G2, G3, op1], [G4, op2])。
    """
    """Helper function to get unique ancestors between two ops.

    For example, op1's ancestor groups are [root, G1, G2, G3, op1], op2's ancestor groups are
    [root, G1, G4, op2], then it returns a tuple ([G2, G3, op1], [G4, op2]).
    """
    #TODO: 为以下两个代码模块提取一个函数
    #TODO: extract a function for the following two code module
    if op1.name in op_groups:
      op1_groups = op_groups[op1.name]
    elif op1.name in opsgroup_groups:
      op1_groups = opsgroup_groups[op1.name]
    else:
      raise ValueError(op1.name + ' does not exist.')

    if op2.name in op_groups:
      op2_groups = op_groups[op2.name]
    elif op2.name in opsgroup_groups:
      op2_groups = opsgroup_groups[op2.name]
    else:
      raise ValueError(op2.name + ' does not exist.')

    both_groups = [op1_groups, op2_groups]
    common_groups_len = sum(1 for x in zip(*both_groups) if x==(x[0],)*len(x))
    group1 = op1_groups[common_groups_len:]
    group2 = op2_groups[common_groups_len:]
    return (group1, group2)

  def _get_condition_params_for_ops(self, root_group):
    """获取操作条件中引用的参数。"""
    """Get parameters referenced in conditions of ops."""
    conditions = defaultdict(set)

    def _get_condition_params_for_ops_helper(group, current_conditions_params):
      new_current_conditions_params = current_conditions_params
      if group.type == 'condition':
        new_current_conditions_params = list(current_conditions_params)
        if isinstance(group.condition.operand1, dsl.PipelineParam):
          new_current_conditions_params.append(group.condition.operand1)
        if isinstance(group.condition.operand2, dsl.PipelineParam):
          new_current_conditions_params.append(group.condition.operand2)
      for op in group.ops:
        for param in new_current_conditions_params:
          conditions[op.name].add(param)
      for g in group.groups:
        # 如果子组是递归opsgroup，则传播条件表达式中的pipelineparams，
        # 类似于操作。
        # If the subgroup is a recursive opsgroup, propagate the pipelineparams
        # in the condition expression, similar to the ops.
        if g.recursive_ref:
          for param in new_current_conditions_params:
            conditions[g.name].add(param)
        else:
          _get_condition_params_for_ops_helper(g, new_current_conditions_params)
    _get_condition_params_for_ops_helper(root_group, [])
    return conditions

  def _get_next_group_or_op(cls, to_visit: List, already_visited: Set):
    """获取下一个要访问的组或操作。"""
    """Get next group or op to visit."""
    if len(to_visit) == 0:
      return None
    next = to_visit.pop(0)
    while next in already_visited:
      next = to_visit.pop(0)
    already_visited.add(next)
    return next

  def _get_for_loop_ops(self, new_root) -> Dict[Text, dsl.ParallelFor]:
    # 获取for循环操作
    # Get for loop ops
    to_visit = self._get_all_subgroups_and_ops(new_root)
    op_name_to_op = {}
    already_visited = set()

    while len(to_visit):
      next_op = self._get_next_group_or_op(to_visit, already_visited)
      if next_op is None:
        break
      to_visit.extend(self._get_all_subgroups_and_ops(next_op))
      if isinstance(next_op, dsl.ParallelFor):
        op_name_to_op[next_op.name] = next_op

    return op_name_to_op

  def _get_all_subgroups_and_ops(self, op):
    """获取此组中包含的所有操作和组。"""
    """Get all ops and groups contained within this group."""
    subgroups = []
    if hasattr(op, 'ops'):
      subgroups.extend(op.ops)
    if hasattr(op, 'groups'):
      subgroups.extend(op.groups)
    return subgroups

  def _get_inputs_outputs(
          self,
          pipeline,
          root_group,
          op_groups,
          opsgroup_groups,
          condition_params,
          op_name_to_for_loop_op: Dict[Text, dsl.ParallelFor],
  ):
    """获取每个组和操作的输入和输出。

    返回:
      一个元组(inputs, outputs)。
      inputs和outputs是字典，键是组/操作名称，值是元组列表(param_name, producing_op_name)。
      producing_op_name是生成参数的操作的名称。如果参数是pipeline参数（没有生产者操作），
      则producing_op_name为None。
    """
    """Get inputs and outputs of each group and op.

    Returns:
      A tuple (inputs, outputs).
      inputs and outputs are dicts with key being the group/op names and values being list of
      tuples (param_name, producing_op_name). producing_op_name is the name of the op that
      produces the param. If the param is a pipeline param (no producer op), then
      producing_op_name is None.
    """
    inputs = defaultdict(set)
    outputs = defaultdict(set)

    for op in pipeline.ops.values():
      # 同时考虑操作的输入和该操作条件中使用的所有参数。
      # op's inputs and all params used in conditions for that op are both considered.
      for param in op.inputs + list(condition_params[op.name]):
        # 如果值已提供（立即值），则无需将其作为其父组的输入公开。
        # if the value is already provided (immediate value), then no need to expose
        # it as input for its parent groups.
        if param.value:
          continue
        if param.op_name:
          upstream_op = pipeline.ops[param.op_name]
          upstream_groups, downstream_groups = \
            self._get_uncommon_ancestors(op_groups, opsgroup_groups, upstream_op, op)
          for i, group_name in enumerate(downstream_groups):
            if i == 0:
              # 如果是第一个不共同的下游组，则输入来自第一个不共同的上游组。
              # If it is the first uncommon downstream group, then the input comes from
              # the first uncommon upstream group.
              inputs[group_name].add((param.full_name, upstream_groups[0]))
            else:
              # 如果不是第一个下游组，则输入从其祖先组传递下来，因此上游组为None。
              # If not the first downstream group, then the input is passed down from
              # its ancestor groups so the upstream group is None.
              inputs[group_name].add((param.full_name, None))
          for i, group_name in enumerate(upstream_groups):
            if i == len(upstream_groups) - 1:
              # 如果是最后一个上游组，它是一个操作符，输出来自容器。
              # If last upstream group, it is an operator and output comes from container.
              outputs[group_name].add((param.full_name, None))
            else:
              # 如果不是最后一个上游组，输出值来自其子组之一。
              # If not last upstream group, output value comes from one of its child.
              outputs[group_name].add((param.full_name, upstream_groups[i+1]))
        else:
          if not op.is_exit_handler:
            for group_name in op_groups[op.name][::-1]:
              # 如果组是for循环组，并且参数是该循环的参数，则该参数由该for循环ops_group创建，
              # 不应作为其任何父组的输入。
              # if group is for loop group and param is that loop's param, then the param
              # is created by that for loop ops_group and it shouldn't be an input to
              # any of its parent groups.
              inputs[group_name].add((param.full_name, None))
              if group_name in op_name_to_for_loop_op:
                # 例如:
                #   loop_group.loop_args.name = 'loop-item-param-99ca152e'
                #   param.name =                'loop-item-param-99ca152e--a'
                # for example:
                #   loop_group.loop_args.name = 'loop-item-param-99ca152e'
                #   param.name =                'loop-item-param-99ca152e--a'
                loop_group = op_name_to_for_loop_op[group_name]
                if loop_group.loop_args.name in param.name:
                  break


    # 生成递归opsgroups的输入/输出
    # 它将递归opsgroups的IO传播到它们的祖先opsgroups
    # Generate the input/output for recursive opsgroups
    # It propagates the recursive opsgroups IO to their ancester opsgroups
    def _get_inputs_outputs_recursive_opsgroup(group):
      #TODO: 用上面的代码重构以下代码
      #TODO: refactor the following codes with the above
      if group.recursive_ref:
        params = [(param, False) for param in group.inputs]
        params.extend([(param, True) for param in list(condition_params[group.name])])
        for param, is_condition_param in params:
          if param.value:
            continue
          full_name = param.full_name
          if param.op_name:
            upstream_op = pipeline.ops[param.op_name]
            upstream_groups, downstream_groups = \
              self._get_uncommon_ancestors(op_groups, opsgroup_groups, upstream_op, group)
            for i, g in enumerate(downstream_groups):
              if i == 0:
                inputs[g].add((full_name, upstream_groups[0]))
              # 无需将条件参数作为参数传递给下游操作。
              #TODO: 这也可能适用于操作。在这里添加一个TODO并考虑一下。
              # There is no need to pass the condition param as argument to the downstream ops.
              #TODO: this might also apply to ops. add a TODO here and think about it.
              elif i == len(downstream_groups) - 1 and is_condition_param:
                continue
              else:
                inputs[g].add((full_name, None))
            for i, g in enumerate(upstream_groups):
              if i == len(upstream_groups) - 1:
                outputs[g].add((full_name, None))
              else:
                outputs[g].add((full_name, upstream_groups[i+1]))
          elif not is_condition_param:
            for g in op_groups[group.name]:
              inputs[g].add((full_name, None))
      for subgroup in group.groups:
        _get_inputs_outputs_recursive_opsgroup(subgroup)

    _get_inputs_outputs_recursive_opsgroup(root_group)

    # 生成SubGraph和parallelfor的输入
    # Generate the input for SubGraph along with parallelfor
    for sub_graph in opsgroup_groups:
      if sub_graph in op_name_to_for_loop_op:
        # opsgroup列表按最远的组在前，opsgroup本身在后的顺序排序。
        # 要获取不是opsgroup本身的最新opsgroup，使用-2。
        # The opsgroup list is sorted with the farthest group as the first and
        # the opsgroup itself as the last. To get the latest opsgroup which is
        # not the opsgroup itself -2 is used.
        parent = opsgroup_groups[sub_graph][-2]
        if parent and parent.startswith('subgraph'):
          # 仅将操作的pipeline参数从子图传播到parallelfor
          # propagate only op's pipeline param from subgraph to parallelfor
          loop_op = op_name_to_for_loop_op[sub_graph]
          pipeline_param = loop_op.loop_args.items_or_pipeline_param
          if loop_op.items_is_pipeline_param and pipeline_param.op_name:
            param_name = '%s-%s' % (
              sanitize_k8s_name(pipeline_param.op_name), pipeline_param.name)
            inputs[parent].add((param_name, pipeline_param.op_name))

    return inputs, outputs

  def _get_dependencies(self, pipeline, root_group, op_groups, opsgroups_groups, opsgroups, condition_params):
    """获取所有操作和组的依赖组和操作。

    返回:
      一个字典。键是组/操作名称，值是依赖组/操作的列表。
      依赖关系按以下方式计算：如果op2依赖于op1，
      并且它们的祖先是[root, G1, G2, op1]和[root, G1, G3, G4, op2]，
      则G3依赖于G2。基本上，依赖关系仅存在于其祖先链中的第一个不共同祖先中。
      只有兄弟组/操作可以有依赖关系。
    """
    """Get dependent groups and ops for all ops and groups.

    Returns:
      A dict. Key is group/op name, value is a list of dependent groups/ops.
      The dependencies are calculated in the following way: if op2 depends on op1,
      and their ancestors are [root, G1, G2, op1] and [root, G1, G3, G4, op2],
      then G3 is dependent on G2. Basically dependency only exists in the first uncommon
      ancesters in their ancesters chain. Only sibling groups/ops can have dependencies.
    """
    dependencies = defaultdict(set)
    for op in pipeline.ops.values():
      upstream_op_names = set()
      for param in op.inputs + list(condition_params[op.name]):
        if param.op_name:
          upstream_op_names.add(param.op_name)
      upstream_op_names |= set(op.dependent_names)

      for upstream_op_name in upstream_op_names:
        # 依赖的操作可以是BaseOp或opsgroup
        # the dependent op could be either a BaseOp or an opsgroup
        if upstream_op_name in pipeline.ops:
          upstream_op = pipeline.ops[upstream_op_name]
        elif upstream_op_name in opsgroups:
          upstream_op = opsgroups[upstream_op_name]
        else:
          raise ValueError('compiler cannot find the ' + upstream_op_name)

        upstream_groups, downstream_groups = self._get_uncommon_ancestors(op_groups, opsgroups_groups, upstream_op, op)
        dependencies[downstream_groups[0]].add(upstream_groups[0])

    # 基于递归opsgroups生成依赖关系
    #TODO: 用上面的代码重构以下代码
    # Generate dependencies based on the recursive opsgroups
    #TODO: refactor the following codes with the above
    def _get_dependency_opsgroup(group, dependencies):
      upstream_op_names = set([dependency.name for dependency in group.dependencies])
      if group.recursive_ref:
        for param in group.inputs + list(condition_params[group.name]):
          if param.op_name:
            upstream_op_names.add(param.op_name)

      for op_name in upstream_op_names:
        if op_name in pipeline.ops:
          upstream_op = pipeline.ops[op_name]
        elif op_name in opsgroups:
          upstream_op = opsgroups[op_name]
        else:
          raise ValueError('compiler cannot find the ' + op_name)
        upstream_groups, downstream_groups = \
          self._get_uncommon_ancestors(op_groups, opsgroups_groups, upstream_op, group)
        dependencies[downstream_groups[0]].add(upstream_groups[0])

      for subgroup in group.groups:
        _get_dependency_opsgroup(subgroup, dependencies)

    _get_dependency_opsgroup(root_group, dependencies)

    return dependencies

  def _resolve_value_or_reference(self, value_or_reference, potential_references):
    """_resolve_value_or_reference解析值和PipelineParams，它们可以是任务参数或输入参数。

    参数:
      value_or_reference: 要解析的值或引用。可以是基本python类型或PipelineParam
      potential_references(dict{str->str}): 参数名称到任务名称的字典
    """
    """_resolve_value_or_reference resolves values and PipelineParams, which could be task parameters or input parameters.

    Args:
      value_or_reference: value or reference to be resolved. It could be basic python types or PipelineParam
      potential_references(dict{str->str}): a dictionary of parameter names to task names
      """
    if isinstance(value_or_reference, dsl.PipelineParam):
      parameter_name = value_or_reference.full_name
      task_names = [task_name for param_name, task_name in potential_references if param_name == parameter_name]
      if task_names:
        task_name = task_names[0]
        # 当task_name为None时，参数直接来自远古祖先而不是父级。
        # 因此，它被解析为当前组中的输入参数。
        # When the task_name is None, the parameter comes directly from ancient ancesters
        # instead of parents. Thus, it is resolved as the input parameter in the current group.
        if task_name is None:
          return '{{inputs.parameters.%s}}' % parameter_name
        else:
          return '{{tasks.%s.outputs.parameters.%s}}' % (task_name, parameter_name)
      else:
        return '{{inputs.parameters.%s}}' % parameter_name
    else:
      return str(value_or_reference)

  @staticmethod
  def _resolve_task_pipeline_param(pipeline_param: PipelineParam, group_type) -> str:
    # 解析任务pipeline参数
    # Resolve task pipeline param
    if pipeline_param.op_name is None:
      return '{{workflow.parameters.%s}}' % pipeline_param.name
    param_name = '%s-%s' % (sanitize_k8s_name(pipeline_param.op_name), pipeline_param.name)
    if group_type == 'subgraph':
      return '{{inputs.parameters.%s}}' % (param_name)
    return '{{tasks.%s.outputs.parameters.%s}}' % (sanitize_k8s_name(pipeline_param.op_name), param_name)

  def _group_to_dag_template(self, group, inputs, outputs, dependencies):
    """给定一个OpsGroup生成模板。

    inputs, outputs, dependencies都是辅助字典。
    """
    """Generate template given an OpsGroup.

    inputs, outputs, dependencies are all helper dicts.
    """
    template = {'name': group.name}
    if group.parallelism != None:
      template["parallelism"] = group.parallelism

    # 生成inputs部分。
    # Generate inputs section.
    if inputs.get(group.name, None):
      template_inputs = [{'name': x[0]} for x in inputs[group.name]]
      template_inputs.sort(key=lambda x: x['name'])
      template['inputs'] = {
        'parameters': template_inputs
      }

    # 生成outputs部分。
    # Generate outputs section.
    if outputs.get(group.name, None):
      template_outputs = []
      for param_name, dependent_name in outputs[group.name]:
        template_outputs.append({
          'name': param_name,
          'valueFrom': {
            'parameter': '{{tasks.%s.outputs.parameters.%s}}' % (dependent_name, param_name)
          }
        })
      template_outputs.sort(key=lambda x: x['name'])
      template['outputs'] = {'parameters': template_outputs}

    # 生成tasks部分。
    # Generate tasks section.
    tasks = []
    sub_groups = group.groups + group.ops
    for sub_group in sub_groups:
      is_recursive_subgroup = (isinstance(sub_group, OpsGroup) and sub_group.recursive_ref)
      # 递归子组的特殊处理：使用现有的opsgroup名称
      # Special handling for recursive subgroup: use the existing opsgroup name
      if is_recursive_subgroup:
        task = {
            'name': sub_group.recursive_ref.name,
            'template': sub_group.recursive_ref.name,
        }
      else:
        task = {
          'name': sub_group.name,
          'template': sub_group.name,
        }
      if isinstance(sub_group, dsl.OpsGroup) and sub_group.type == 'condition':
        subgroup_inputs = inputs.get(sub_group.name, [])
        condition = sub_group.condition
        operand1_value = self._resolve_value_or_reference(condition.operand1, subgroup_inputs)
        operand2_value = self._resolve_value_or_reference(condition.operand2, subgroup_inputs)
        if condition.operator in ['==', '!=']:
          operand1_value = '"' + operand1_value + '"'
          operand2_value = '"' + operand2_value + '"'
        task['when'] = '{} {} {}'.format(operand1_value, condition.operator, operand2_value)

      # 为此任务生成dependencies部分。
      # Generate dependencies section for this task.
      if dependencies.get(sub_group.name, None):
        group_dependencies = list(dependencies[sub_group.name])
        group_dependencies.sort()
        task['dependencies'] = group_dependencies

      # 为此任务生成arguments部分。
      # Generate arguments section for this task.
      if inputs.get(sub_group.name, None):
        task['arguments'] = {'parameters': self.get_arguments_for_sub_group(sub_group, is_recursive_subgroup, inputs)}

      # withItems和withParam的附加任务修改
      # additional task modifications for withItems and withParam
      if isinstance(sub_group, dsl.ParallelFor):
        if sub_group.items_is_pipeline_param:
          # 这些循环参数是'withParam'而不是'withItems'。
          # 即，它们不是静态列表，而是另一个任务的输出或作为全局pipeline参数输入
          # these loop args are a 'withParam' rather than 'withItems'.
          # i.e., rather than a static list, they are either the output of another task or were input
          # as global pipeline parameters

          pipeline_param = sub_group.loop_args.items_or_pipeline_param
          withparam_value = self._resolve_task_pipeline_param(pipeline_param, group.type)
          if pipeline_param.op_name:
            # 这些循环参数是另一个任务的输出
            # these loop args are the output of another task
            if 'dependencies' not in task or task['dependencies'] is None:
              task['dependencies'] = []
            if sanitize_k8s_name(
                pipeline_param.op_name) not in task['dependencies'] and group.type != 'subgraph':
              task['dependencies'].append(
                  sanitize_k8s_name(pipeline_param.op_name))

          task['withParam'] = withparam_value
        else:
          # 需要清理字典键以保持一致性。
          # Need to sanitize the dict keys for consistency.
          loop_tasks = sub_group.loop_args.to_list_for_task_yaml()
          nested_pipeline_params = extract_pipelineparams_from_any(loop_tasks)

          # 在嵌套pipeline_params的情况下设置依赖关系
          # Set dependencies in case of nested pipeline_params
          map_to_tmpl_var = {str(p): self._resolve_task_pipeline_param(p, group.type) for p in nested_pipeline_params}
          for pipeline_param in nested_pipeline_params:
            if pipeline_param.op_name:
              # 这些pipeline_param是另一个任务的输出
              # these pipeline_param are the output of another task
              if 'dependencies' not in task or task['dependencies'] is None:
                task['dependencies'] = []
              if sanitize_k8s_name(
                  pipeline_param.op_name) not in task['dependencies']:
                task['dependencies'].append(
                    sanitize_k8s_name(pipeline_param.op_name))

          sanitized_tasks = []
          if isinstance(loop_tasks[0], dict):
            for argument_set in loop_tasks:
              c_dict = {}
              for k, v in argument_set.items():
                c_dict[sanitize_k8s_name(k, True)] = v
              sanitized_tasks.append(c_dict)
          else:
            sanitized_tasks = loop_tasks
          # 如果map_to_tmpl_var不为空，则替换pipeline参数
          # Replace pipeline param if map_to_tmpl_var not empty
          task['withItems'] = _process_obj(sanitized_tasks, map_to_tmpl_var) if map_to_tmpl_var else sanitized_tasks

        # 我们将对依赖项进行排序，以获得确定的yaml，从而获得稳定的测试
        # We will sort dependencies to have determinitc yaml and thus stable tests
        if task.get('dependencies'):
          task['dependencies'].sort()

      tasks.append(task)
    tasks.sort(key=lambda x: x['name'])
    template['dag'] = {'tasks': tasks}
    return template

  def get_arguments_for_sub_group(
          self,
          sub_group: Union[OpsGroup, dsl._container_op.BaseOp],
          is_recursive_subgroup: Optional[bool],
          inputs: Dict[Text, Tuple[Text, Text]],
  ):
    # 获取子组的参数
    # Get arguments for sub group
    arguments = []
    for param_name, dependent_name in inputs[sub_group.name]:
      if is_recursive_subgroup:
        for input_name, input in sub_group.arguments.items():
          if param_name == input.full_name:
            break
        referenced_input = sub_group.recursive_ref.arguments[input_name]
        argument_name = referenced_input.full_name
      else:
        argument_name = param_name

      # 准备参数。它可以是pipeline输入引用、任务输出引用或循环项（或循环项属性）
      # Preparing argument. It can be pipeline input reference, task output reference or loop item (or loop item attribute
      sanitized_loop_arg_full_name = '---'
      if isinstance(sub_group, dsl.ParallelFor):
        sanitized_loop_arg_full_name = sanitize_k8s_name(sub_group.loop_args.full_name)
      arg_ref_full_name = sanitize_k8s_name(param_name)
      # 我们只关心对当前循环项的引用，而不是外部循环
      # We only care about the reference to the current loop item, not the outer loops
      if isinstance(sub_group, dsl.ParallelFor) and arg_ref_full_name.startswith(sanitized_loop_arg_full_name):
        if arg_ref_full_name == sanitized_loop_arg_full_name:
          argument_value = '{{item}}'
        elif _for_loop.LoopArgumentVariable.name_is_loop_arguments_variable(param_name):
          subvar_name = _for_loop.LoopArgumentVariable.get_subvar_name(param_name)
          argument_value = '{{item.%s}}' % subvar_name
        else:
          raise ValueError("Argument seems to reference the loop item, but not the item itself and not some attribute of the item. param_name: {}, ".format(param_name))
      else:
        if dependent_name:
          argument_value = '{{tasks.%s.outputs.parameters.%s}}' % (dependent_name, param_name)
        else:
          argument_value = '{{inputs.parameters.%s}}' % param_name

      arguments.append({
        'name': argument_name,
        'value': argument_value,
      })

    arguments.sort(key=lambda x: x['name'])

    return arguments

  def _create_dag_templates(self, pipeline, op_transformers=None, op_to_templates_handler=None):
    """在pipeline中创建所有组和操作模板。

    参数:
      pipeline: Pipeline上下文对象，用于获取所有pipeline数据。
      op_transformers: 应用于所有正在处理的ContainerOp实例的函数列表。
      op_to_templates_handler: 将基本操作转换为argo模板列表的处理程序。
    """
    """Create all groups and ops templates in the pipeline.

    Args:
      pipeline: Pipeline context object to get all the pipeline data from.
      op_transformers: A list of functions that are applied to all ContainerOp instances that are being processed.
      op_to_templates_handler: Handler which converts a base op into a list of argo templates.
    """
    op_to_templates_handler = op_to_templates_handler or (lambda op : [_op_to_template(op)])
    root_group = pipeline.groups[0]

    # 在确定输入/输出之前调用转换函数，否则
    # 用户将无法在容器定义中使用pipeline参数
    # （例如作为pod标签） - 生成的模板无效。
    # Call the transformation functions before determining the inputs/outputs, otherwise
    # the user would not be able to use pipeline parameters in the container definition
    # (for example as pod labels) - the generated template is invalid.
    for op in pipeline.ops.values():
      for transformer in op_transformers or []:
        transformer(op)

    # 生成核心数据结构以准备argo yaml生成
    #   op_name_to_parent_groups: op name -> 包括当前op在内的祖先组列表
    #   opsgroups: ospgroup.name -> opsgroup的字典
    #   inputs, outputs: group/op names -> 元组列表(full_param_name, producing_op_name)
    #   condition_params: recursive_group/op names -> pipelineparam列表
    #   dependencies: group/op name -> 依赖组/操作列表。
    # 递归opsgroup的特殊处理
    #   op_name_to_parent_groups也包含递归opsgroups
    #   来自_get_condition_params_for_ops的condition_params也包含递归opsgroups
    #   groups不包括递归opsgroups
    # Generate core data structures to prepare for argo yaml generation
    #   op_name_to_parent_groups: op name -> list of ancestor groups including the current op
    #   opsgroups: a dictionary of ospgroup.name -> opsgroup
    #   inputs, outputs: group/op names -> list of tuples (full_param_name, producing_op_name)
    #   condition_params: recursive_group/op names -> list of pipelineparam
    #   dependencies: group/op name -> list of dependent groups/ops.
    # Special Handling for the recursive opsgroup
    #   op_name_to_parent_groups also contains the recursive opsgroups
    #   condition_params from _get_condition_params_for_ops also contains the recursive opsgroups
    #   groups does not include the recursive opsgroups
    opsgroups = self._get_groups(root_group)
    op_name_to_parent_groups = self._get_groups_for_ops(root_group)
    opgroup_name_to_parent_groups = self._get_groups_for_opsgroups(root_group)
    condition_params = self._get_condition_params_for_ops(root_group)
    op_name_to_for_loop_op = self._get_for_loop_ops(root_group)
    inputs, outputs = self._get_inputs_outputs(
      pipeline,
      root_group,
      op_name_to_parent_groups,
      opgroup_name_to_parent_groups,
      condition_params,
      op_name_to_for_loop_op,
    )
    dependencies = self._get_dependencies(
      pipeline,
      root_group,
      op_name_to_parent_groups,
      opgroup_name_to_parent_groups,
      opsgroups,
      condition_params,
    )

    templates = []
    for opsgroup in opsgroups.keys():
      template = self._group_to_dag_template(opsgroups[opsgroup], inputs, outputs, dependencies)
      templates.append(template)

    for op in pipeline.ops.values():
      if self._mode == dsl.PipelineExecutionMode.V2_COMPATIBLE:
        v2_compat.update_op(op,
                            pipeline_name=self._pipeline_name_param,
                            pipeline_root=self._pipeline_root_param,
                            launcher_image=self._launcher_image)
      templates.extend(op_to_templates_handler(op))

      if hasattr(op, 'custom_job_spec'):
        warnings.warn('CustomJob spec is not supported yet when running on KFP.'
                      ' The component will execute within the KFP cluster.')
      if hasattr(op, 'importer_spec'):
        raise NotImplementedError(
            'dsl.importer is not supported yet when running on KFP.')

    return templates

  def _create_pipeline_workflow(self,
                                parameter_defaults,
                                pipeline,
                                op_transformers=None,
                                pipeline_conf=None):
    """为pipeline创建工作流。"""
    """Create workflow for the pipeline."""

    # 输入参数
    # Input Parameters
    input_params = []
    for name, value in parameter_defaults.items():
      param = {'name': name}
      if value is not None:
        param['value'] = value
      input_params.append(param)

    # 使pipeline组名称唯一，以防止与模板名称冲突
    # Making the pipeline group name unique to prevent name clashes with templates
    pipeline_group = pipeline.groups[0]
    temp_pipeline_group_name = uuid.uuid4().hex
    pipeline_group.name = temp_pipeline_group_name

    # 模板
    # Templates
    templates = self._create_dag_templates(pipeline, op_transformers)

    # 退出处理器
    # Exit Handler
    exit_handler = None
    if pipeline.groups[0].groups:
      first_group = pipeline.groups[0].groups[0]
      if first_group.type == 'exit_handler':
        exit_handler = first_group.exit_op

    # 整个pipeline工作流
    # 它必须作为子域有效
    # The whole pipeline workflow
    # It must valid as a subdomain
    pipeline_name = pipeline.name or 'pipeline'

    # 解决pipeline名称与容器模板名称冲突的变通方法
    # TODO: 确保模板名称（容器、DAG、工作流）完全不会冲突
    # Workaround for pipeline name clashing with container template names
    # TODO: Make sure template names cannot clash at all (container, DAG, workflow)
    template_map = {template['name'].lower(): template  for template in templates}
    from ..components._naming import _make_name_unique_by_adding_index
    pipeline_template_name = _make_name_unique_by_adding_index(pipeline_name, template_map, '-')

    # 恢复pipeline模板的名称
    # Restoring the name of the pipeline template
    pipeline_template = template_map[temp_pipeline_group_name]
    pipeline_template['name'] = pipeline_template_name

    templates.sort(key=lambda x: x['name'])
    workflow = {
      'apiVersion': 'argoproj.io/v1alpha1',
      'kind': 'Workflow',
      'metadata': {'generateName': pipeline_template_name + '-', 'annotations': pipeline_conf.annotations,'labels': pipeline_conf.labels},
      'spec': {
        'entrypoint': pipeline_template_name,
        'templates': templates,
        'arguments': {'parameters': input_params},
        'serviceAccountName': 'pipeline-runner',
      }
    }
    # 在pipeline级别设置并行度限制
    # set parallelism limits at pipeline level
    if pipeline_conf.parallelism:
      workflow['spec']['parallelism'] = pipeline_conf.parallelism

    # 在工作流完成后设置ttl
    # set ttl after workflow finishes
    if pipeline_conf.ttl_seconds_after_finished >= 0:
      workflow['spec']['ttlSecondsAfterFinished'] = pipeline_conf.ttl_seconds_after_finished

    if pipeline_conf.scheduler_name:
      workflow['spec']['schedulerName'] = pipeline_conf.scheduler_name

    if pipeline_conf._pod_disruption_budget_min_available:
      pod_disruption_budget = {"minAvailable": pipeline_conf._pod_disruption_budget_min_available}
      workflow['spec']['podDisruptionBudget'] = pod_disruption_budget

    if len(pipeline_conf.image_pull_secrets) > 0:
      image_pull_secrets = []
      for image_pull_secret in pipeline_conf.image_pull_secrets:
        image_pull_secrets.append(convert_k8s_obj_to_json(image_pull_secret))
      workflow['spec']['imagePullSecrets'] = image_pull_secrets

    if pipeline_conf.host_aliases:
      all_hosts = []
      for ip in pipeline_conf.host_aliases:
        all_hosts.append({
          "hostnames": pipeline_conf.host_aliases[ip],
          "ip": ip
        })
      workflow['spec']['hostAliases'] = all_hosts

    if pipeline_conf.timeout:
      workflow['spec']['activeDeadlineSeconds'] = pipeline_conf.timeout

    if exit_handler:
      workflow['spec']['onExit'] = exit_handler.name

    # 这可以被模板中指定的任务特定节点选择覆盖。
    # This can be overwritten by the task specific
    # nodeselection, specified in the template.
    if pipeline_conf.default_pod_node_selector:
      workflow['spec']['nodeSelector'] = pipeline_conf.default_pod_node_selector

    if pipeline_conf.dns_config:
      workflow['spec']['dnsConfig'] = convert_k8s_obj_to_json(pipeline_conf.dns_config)

    if pipeline_conf.image_pull_policy != None:
      if pipeline_conf.image_pull_policy in ["Always", "Never", "IfNotPresent"]:
        for template in workflow["spec"]["templates"]:
          container = template.get('container', None)
          if container and "imagePullPolicy" not in container:
            container["imagePullPolicy"] = pipeline_conf.image_pull_policy
      else:
        raise ValueError(
                  'Invalid imagePullPolicy. Must be one of `Always`, `Never`, `IfNotPresent`.'
              )
    return workflow

  def _validate_exit_handler(self, pipeline):
    """确保只有一个全局退出处理器。

    注意，这是一个临时变通方法，直到argo支持本地退出处理器。
    """
    """Makes sure there is only one global exit handler.

    Note this is a temporary workaround until argo supports local exit handler.
    """

    def _validate_exit_handler_helper(group, exiting_op_names, handler_exists):
      if group.type == 'exit_handler':
        if handler_exists or len(exiting_op_names) > 1:
          raise ValueError('Only one global exit_handler is allowed and all ops need to be included.')
        handler_exists = True

      if group.ops:
        exiting_op_names.extend([x.name for x in group.ops])

      for g in group.groups:
        _validate_exit_handler_helper(g, exiting_op_names, handler_exists)

    return _validate_exit_handler_helper(pipeline.groups[0], [], False)

  def _sanitize_and_inject_artifact(self, pipeline: dsl.Pipeline, pipeline_conf=None):
    """清理操作符/参数名称并注入pipeline工件位置。"""
    """Sanitize operator/param names and inject pipeline artifact location."""

    # 清理操作符名称和参数名称
    # Sanitize operator names and param names
    sanitized_ops = {}

    for op in pipeline.ops.values():
      sanitized_name = sanitize_k8s_name(op.name)
      op.name = sanitized_name
      for param in op.outputs.values():
        param.name = sanitize_k8s_name(param.name, True)
        if param.op_name:
          param.op_name = sanitize_k8s_name(param.op_name)
      if op.output is not None and not isinstance(op.output, dsl._container_op._MultipleOutputsError):
        op.output.name = sanitize_k8s_name(op.output.name, True)
        op.output.op_name = sanitize_k8s_name(op.output.op_name)
      if op.dependent_names:
        op.dependent_names = [sanitize_k8s_name(name) for name in op.dependent_names]
      if isinstance(op, dsl.ContainerOp) and op.file_outputs is not None:
        sanitized_file_outputs = {}
        for key in op.file_outputs.keys():
          sanitized_file_outputs[sanitize_k8s_name(key, True)] = op.file_outputs[key]
        op.file_outputs = sanitized_file_outputs
      elif isinstance(op, dsl.ResourceOp) and op.attribute_outputs is not None:
        sanitized_attribute_outputs = {}
        for key in op.attribute_outputs.keys():
          sanitized_attribute_outputs[sanitize_k8s_name(key, True)] = \
            op.attribute_outputs[key]
        op.attribute_outputs = sanitized_attribute_outputs
      if isinstance(op, dsl.ContainerOp):
        if op.input_artifact_paths:
          op.input_artifact_paths = {sanitize_k8s_name(key, True): value for key, value in op.input_artifact_paths.items()}
        if op.artifact_arguments:
          op.artifact_arguments = {sanitize_k8s_name(key, True): value for key, value in op.artifact_arguments.items()}
      sanitized_ops[sanitized_name] = op
    pipeline.ops = sanitized_ops

  def _create_workflow(
      self,
      pipeline_func: Callable,
      pipeline_name: Optional[Text] = None,
      pipeline_description: Optional[Text] = None,
      params_list: Optional[List[dsl.PipelineParam]] = None,
      pipeline_conf: Optional[dsl.PipelineConf] = None,
  ) -> Dict[Text, Any]:
    """ create_workflow的内部实现。"""
    """ Internal implementation of create_workflow."""
    params_list = params_list or []

    # 创建没有默认值的参数列表并调用pipeline函数。
    # 将类型信息分配给PipelineParam
    # Create the arg list with no default values and call pipeline function.
    # Assign type information to the PipelineParam
    pipeline_meta = _extract_pipeline_metadata(pipeline_func)
    pipeline_meta.name = pipeline_name or pipeline_meta.name
    pipeline_meta.description = pipeline_description or pipeline_meta.description
    pipeline_name = sanitize_k8s_name(pipeline_meta.name)

    # 需要首先清除dsl.PipelineParams的默认值。否则，
    # 在分配给每个组件时，它将立即在原地解析。
    # Need to first clear the default value of dsl.PipelineParams. Otherwise, it
    # will be resolved immediately in place when being to each component.
    default_param_values = OrderedDict()

    if self._pipeline_root_param:
      params_list.append(self._pipeline_root_param)
    if self._pipeline_name_param:
      params_list.append(self._pipeline_name_param)

    for param in params_list:
      default_param_values[param.name] = param.value
      param.value = None

    args_list = []
    kwargs_dict = dict()
    signature = inspect.signature(pipeline_func)
    for arg_name, arg in signature.parameters.items():
      arg_type = None
      for input in pipeline_meta.inputs or []:
        if arg_name == input.name:
          arg_type = input.type
          break
      param = dsl.PipelineParam(sanitize_k8s_name(arg_name, True), param_type=arg_type)
      if arg.kind == inspect.Parameter.KEYWORD_ONLY:
        kwargs_dict[arg_name] = param
      else:
        args_list.append(param)

    with dsl.Pipeline(pipeline_name) as dsl_pipeline:
      pipeline_func(*args_list, **kwargs_dict)

    pipeline_conf = pipeline_conf or dsl_pipeline.conf # 传递给编译器的配置是覆盖性的。不幸的是，检测dsl_pipeline.conf是否被修改过并不简单。
    # Configuration passed to the compiler is overriding. Unfortunately, it's not trivial to detect whether the dsl_pipeline.conf was ever modified.

    self._validate_exit_handler(dsl_pipeline)
    self._sanitize_and_inject_artifact(dsl_pipeline, pipeline_conf)

    # 通过合并两个参数列表来填充默认值。
    # Fill in the default values by merging two param lists.
    args_list_with_defaults = OrderedDict()
    if pipeline_meta.inputs:
      args_list_with_defaults = OrderedDict([
        (sanitize_k8s_name(input_spec.name, True), input_spec.default)
        for input_spec in pipeline_meta.inputs
      ])

    if params_list:
      # 或者，如果参数由params_list提供，则填充pipeline_meta。
      # Or, if args are provided by params_list, fill in pipeline_meta.
      for k, v in default_param_values.items():
        args_list_with_defaults[k] = v

      pipeline_meta.inputs = pipeline_meta.inputs or []
      for param in params_list:
        pipeline_meta.inputs.append(
            InputSpec(
                name=param.name,
                type=param.param_type,
                default=default_param_values[param.name]))

    op_transformers = [add_pod_env]
    pod_labels = {_SDK_VERSION_LABEL: kfp.__version__, _SDK_ENV_LABEL:_SDK_ENV_DEFAULT}
    op_transformers.append(add_pod_labels(pod_labels))
    op_transformers.extend(pipeline_conf.op_transformers)

    if self._mode == dsl.PipelineExecutionMode.V2_COMPATIBLE:
      for op in dsl_pipeline.ops.values():
        op.inputs.append(self._pipeline_name_param)
        op.inputs.append(self._pipeline_root_param)

    workflow = self._create_pipeline_workflow(
        args_list_with_defaults,
        dsl_pipeline,
        op_transformers,
        pipeline_conf,
    )

    from ._data_passing_rewriter import fix_big_data_passing
    workflow = fix_big_data_passing(workflow)

    workflow = _data_passing_rewriter.add_pod_name_passing(
        workflow, str(self._pipeline_root_param or None))

    if pipeline_conf and pipeline_conf.data_passing_method != None:
      workflow = pipeline_conf.data_passing_method(workflow)

    metadata = workflow.setdefault('metadata', {})
    annotations = metadata.setdefault('annotations', {})
    labels = metadata.setdefault('labels', {})

    annotations[_SDK_VERSION_LABEL] = kfp.__version__
    annotations['pipelines.kubeflow.org/pipeline_compilation_time'] = datetime.datetime.now().isoformat()
    annotations['pipelines.kubeflow.org/pipeline_spec'] = json.dumps(pipeline_meta.to_dict(), sort_keys=True)

    if self._mode == dsl.PipelineExecutionMode.V2_COMPATIBLE:
      annotations['pipelines.kubeflow.org/v2_pipeline'] = "true"
      labels['pipelines.kubeflow.org/v2_pipeline'] = "true"


    # 标签可能比注解记录得更好，所以在这里也添加一些信息
    # Labels might be logged better than annotations so adding some information here as well
    labels[_SDK_VERSION_LABEL] = kfp.__version__

    return workflow

  # 目前（0.1.31）此函数仅由TFX的KubeflowDagRunner使用。
  # 参见 https://github.com/tensorflow/tfx/blob/811e4c1cc0f7903d73d151b9d4f21f79f6013d4a/tfx/orchestration/kubeflow/kubeflow_dag_runner.py#L238
  # For now (0.1.31) this function is only used by TFX's KubeflowDagRunner.
  # See https://github.com/tensorflow/tfx/blob/811e4c1cc0f7903d73d151b9d4f21f79f6013d4a/tfx/orchestration/kubeflow/kubeflow_dag_runner.py#L238
  @deprecated(
      version='0.1.32',
      reason='Workflow spec is not intended to be handled by user, please '
             'switch to _create_workflow')
  def create_workflow(self,
                      pipeline_func: Callable,
                      pipeline_name: Text=None,
                      pipeline_description: Text=None,
                      params_list: List[dsl.PipelineParam]=None,
                      pipeline_conf: dsl.PipelineConf = None) -> Dict[Text, Any]:
    """从pipeline函数和指定的pipeline参数/元数据创建工作流规范。
    目前，pipeline参数要么在pipeline函数的签名中指定，要么通过传递dsl.PipelineParam列表来指定。
    冲突将导致ValueError。

    参数:
      pipeline_func: 调用ContainerOps的Pipeline函数。
      pipeline_name: 要编译的pipeline的名称。
      pipeline_description: pipeline的描述。
      params_list: 要附加到pipeline的pipeline参数列表。
      pipeline_conf: PipelineConf实例。可以指定操作转换、镜像拉取密钥和其他pipeline级别的配置选项。
        覆盖pipeline可能设置的任何配置。

    返回:
      创建的工作流字典。
    """
    """Create workflow spec from pipeline function and specified pipeline
    params/metadata. Currently, the pipeline params are either specified in
    the signature of the pipeline function or by passing a list of
    dsl.PipelineParam. Conflict will cause ValueError.

    Args:
      pipeline_func: Pipeline function where ContainerOps are invoked.
      pipeline_name: The name of the pipeline to compile.
      pipeline_description: The description of the pipeline.
      params_list: List of pipeline params to append to the pipeline.
      pipeline_conf: PipelineConf instance. Can specify op transforms, image pull secrets and other pipeline-level configuration options. Overrides any configuration that may be set by the pipeline.

    Returns:
      The created workflow dictionary.
    """
    return self._create_workflow(pipeline_func, pipeline_name, pipeline_description, params_list, pipeline_conf)

  @deprecated(
      version='0.1.32',
      reason='Switch to _create_workflow.')
  def _compile(self, pipeline_func, pipeline_conf: dsl.PipelineConf = None):
    """将给定的pipeline函数编译为工作流。"""
    """Compile the given pipeline function into workflow."""
    return self._create_workflow(pipeline_func=pipeline_func, pipeline_conf=pipeline_conf)

  def compile(
              self,
              pipeline_func,
              package_path,
              type_check: bool = True,
              pipeline_conf: Optional[dsl.PipelineConf] = None):
    """将给定的pipeline函数编译为工作流yaml。

    参数:
      pipeline_func: 带有@dsl.pipeline装饰器的Pipeline函数。
      package_path: 输出工作流tar.gz文件的路径。例如，
        "~/a.tar.gz"
      type_check: 是否启用类型检查，默认为True。
      pipeline_conf: PipelineConf实例。可以指定操作转换、镜像
        拉取密钥和其他pipeline级别的配置选项。覆盖
        pipeline可能设置的任何配置。
    """
    """Compile the given pipeline function into workflow yaml.

    Args:
      pipeline_func: Pipeline functions with @dsl.pipeline decorator.
      package_path: The output workflow tar.gz file path. for example,
        "~/a.tar.gz"
      type_check: Whether to enable the type check or not, default: True.
      pipeline_conf: PipelineConf instance. Can specify op transforms, image
        pull secrets and other pipeline-level configuration options. Overrides
        any configuration that may be set by the pipeline.
    """
    pipeline_root_dir = getattr(pipeline_func, 'pipeline_root', None)
    if (
        pipeline_root_dir is not None or
        self._mode == dsl.PipelineExecutionMode.V2_COMPATIBLE):
      self._pipeline_root_param = dsl.PipelineParam(
          name=dsl.ROOT_PARAMETER_NAME, value=pipeline_root_dir or '')

    if self._mode == dsl.PipelineExecutionMode.V2_COMPATIBLE:
      pipeline_name = getattr(pipeline_func, '_component_human_name', '')
      self._pipeline_name_param = dsl.PipelineParam(name='pipeline-name',
                                                    value=pipeline_name)

    import kfp
    type_check_old_value = kfp.TYPE_CHECK
    try:
      kfp.TYPE_CHECK = type_check
      self._create_and_write_workflow(
          pipeline_func=pipeline_func,
          pipeline_conf=pipeline_conf,
          package_path=package_path)
    finally:
      kfp.TYPE_CHECK = type_check_old_value

  @staticmethod
  def _write_workflow(workflow: Dict[Text, Any], package_path: Text = None):
    """将pipeline工作流转储为yaml规范，并按用户指定的格式写出。

    参数:
      workflow: pipline的工作流规范，字典。
      package_path: 要写入的文件路径。如果未指定，将返回一个yaml_text字符串。
    """
    """Dump pipeline workflow into yaml spec and write out in the format specified by the user.

    Args:
      workflow: Workflow spec of the pipline, dict.
      package_path: file path to be written. If not specified, a yaml_text string will be returned.
    """
    yaml_text = dump_yaml(workflow)

    if package_path is None:
      return yaml_text

    if package_path.endswith('.tar.gz') or package_path.endswith('.tgz'):
      from contextlib import closing
      from io import BytesIO
      with tarfile.open(package_path, "w:gz") as tar:
        with closing(BytesIO(yaml_text.encode())) as yaml_file:
          tarinfo = tarfile.TarInfo('pipeline.yaml')
          tarinfo.size = len(yaml_file.getvalue())
          tar.addfile(tarinfo, fileobj=yaml_file)
    elif package_path.endswith('.zip'):
      with zipfile.ZipFile(package_path, "w") as zip:
        zipinfo = zipfile.ZipInfo('pipeline.yaml')
        zipinfo.compress_type = zipfile.ZIP_DEFLATED
        zip.writestr(zipinfo, yaml_text)
    elif package_path.endswith('.yaml') or package_path.endswith('.yml'):
      with open(package_path, 'w') as yaml_file:
        yaml_file.write(yaml_text)
    else:
      raise ValueError(
          'The output path '+ package_path +
          ' should ends with one of the following formats: ' 
          '[.tar.gz, .tgz, .zip, .yaml, .yml]')

  def _create_and_write_workflow(
      self,
      pipeline_func: Callable,
      pipeline_name: Text=None,
      pipeline_description: Text=None,
      params_list: List[dsl.PipelineParam]=None,
      pipeline_conf: dsl.PipelineConf=None,
      package_path: Text=None
  ) -> None:
    """编译给定的pipeline函数并将其转储到指定的文件格式。"""
    """Compile the given pipeline function and dump it to specified file format."""
    workflow = self._create_workflow(
        pipeline_func,
        pipeline_name,
        pipeline_description,
        params_list,
        pipeline_conf)
    self._write_workflow(workflow, package_path)
    _validate_workflow(workflow)


def _validate_workflow(workflow: dict):
  # 验证工作流
  # Validate workflow
  workflow = workflow.copy()
  # 解决Argo lint问题
  # Working around Argo lint issue
  for argument in workflow['spec'].get('arguments', {}).get('parameters', []):
    if 'value' not in argument:
      argument['value'] = ''

  yaml_text = dump_yaml(workflow)
  if '{{pipelineparam' in yaml_text:
    raise RuntimeError(
        '''Internal compiler error: Found unresolved PipelineParam.
Please create a new issue at https://github.com/kubeflow/pipelines/issues attaching the pipeline code and the pipeline package.'''
    )

  # 如果可用，运行Argo lint
  # Running Argo lint if available
  import shutil
  import subprocess
  argo_path = shutil.which('argo')
  if argo_path:
    has_working_argo_lint = False
    try:
      has_working_argo_lint = _run_argo_lint("""
        apiVersion: argoproj.io/v1alpha1
        kind: Workflow
        metadata:
          generateName: hello-world-
        spec:
          entrypoint: whalesay
          templates:
          - name: whalesay
            container:
              image: docker/whalesay:latest""")
    except:
      warnings.warn("Cannot validate the compiled workflow. Found the argo program in PATH, but it's not usable. argo v2.4.3 should work.")

    if has_working_argo_lint:
      _run_argo_lint(yaml_text)


def _run_argo_lint(yaml_text: str):
  # 如果可用，运行Argo lint
  # Running Argo lint if available
  import shutil
  import subprocess
  argo_path = shutil.which('argo')
  if argo_path:
    result = subprocess.run([argo_path, 'lint', '/dev/stdin'], input=yaml_text.encode('utf-8'), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
    if result.returncode:
      if re.match(
          pattern=r'.+failed to resolve {{tasks\.+.+\.outputs\.artifacts\..+}}.+',
          string=result.stderr.decode('utf-8')
      ):
        raise RuntimeError(
            'Compiler has produced Argo-incompatible workflow due to '
            'unresolvable input artifact(s). Please check whether inputPath has'
            ' been connected to outputUri placeholder, which is not supported '
            'yet. Otherwise, please create a new issue at '
            'https://github.com/kubeflow/pipelines/issues attaching the '
            'pipeline code and the pipeline package. Error: {}'.format(
                result.stderr.decode('utf-8')))
      raise RuntimeError(
          '''Internal compiler error: Compiler has produced Argo-incompatible workflow.
Please create a new issue at https://github.com/kubeflow/pipelines/issues attaching the pipeline code and the pipeline package.
Error: {}'''.format(result.stderr.decode('utf-8')))

    return True
  return False