# Copyright 2018-2019 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#      http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

# 导入枚举模块，用于创建枚举类型
# Import the enum module for creating enumeration types
import enum
# 从typing模块导入Callable, Optional, Union，用于类型提示
# Import Callable, Optional, Union from the typing module for type hinting
from typing import Callable, Optional, Union

# 从kubernetes客户端导入V1PodDNSConfig模型
# Import the V1PodDNSConfig model from the kubernetes client
from kubernetes.client.models import V1PodDNSConfig
# 从kfp.dsl导入内部模块，用于定义和操作pipeline
# Import internal modules from kfp.dsl for defining and operating pipelines
from kfp.dsl import _container_op
from kfp.dsl import _resource_op
from kfp.dsl import _ops_group
from kfp.dsl import _component_bridge
# 从kfp.components导入内部模块，用于处理组件
# Import internal modules from kfp.components for handling components
from kfp.components import _components
from kfp.components import _naming
# 导入sys模块，用于与Python解释器交互
# Import the sys module for interacting with the Python interpreter
import sys

# 当@pipeline装饰器被应用时，这个处理器会被调用。
# 它可以被命令行DSL编译器用来注入在每个pipeline定义中运行的代码。
# This handler is called whenever the @pipeline decorator is applied.
# It can be used by the command-line DSL compiler to inject code that runs for every
# pipeline definition.
_pipeline_decorator_handler = None

# 定义Pipeline执行模式的枚举
# Define an enumeration for Pipeline execution modes
class PipelineExecutionMode(enum.Enum):
  # 编译为不支持元数据组件的Argo YAML
  # Compile to Argo YAML without support for metadata-enabled components.
  V1_LEGACY = 1
  # 编译为支持元数据组件的Argo YAML。
  # 以此模式编译的pipeline旨在与v2语义兼容。
  # Compiles to Argo YAML with support for metadata-enabled components.
  # Pipelines compiled using this mode aim to be compatible with v2 semantics.
  V2_COMPATIBLE = 2
  # 编译为KFP v2 IR，用于v2引擎执行。
  # 目前不支持此选项。
  # Compiles to KFP v2 IR for execution using the v2 engine.
  # This option is unsupported right now.
  V2_ENGINE = 3


# 定义pipeline装饰器
# Define the pipeline decorator
def pipeline(
    name: Optional[str] = None,
    description: Optional[str] = None,
    pipeline_root: Optional[str] = None):
  """pipeline函数的装饰器。

  示例
    ::

      @pipeline(
        name='my-pipeline',
        description='My ML Pipeline.'
        pipeline_root='gs://my-bucket/my-output-path'
      )
      def my_pipeline(a: PipelineParam, b: PipelineParam):
        ...

  参数:
    name: pipeline名称。默认为函数名的净化版本。
    description: 可选的，对pipeline的人类可读描述。
    pipeline_root: 在此pipeline下生成输入/输出URI的根目录。
      如果在此pipeline中使用了输入/输出URI占位符，则此项为必需。
  """
  """Decorator of pipeline functions.

  Example
    ::

      @pipeline(
        name='my-pipeline',
        description='My ML Pipeline.'
        pipeline_root='gs://my-bucket/my-output-path'
      )
      def my_pipeline(a: PipelineParam, b: PipelineParam):
        ...

  Args:
    name: The pipeline name. Default to a sanitized version of the function
      name.
    description: Optionally, a human-readable description of the pipeline.
    pipeline_root: The root directory to generate input/output URI under this
      pipeline. This is required if input/output URI placeholder is used in this
      pipeline.
  """

  def _pipeline(func: Callable):
    # 如果提供了名称，则设置函数的_component_human_name属性
    # If a name is provided, set the _component_human_name attribute of the function
    if name:
      func._component_human_name = name
    # 如果提供了描述，则设置函数的_component_description属性
    # If a description is provided, set the _component_description attribute of the function
    if description:
      func._component_description = description
    # 如果提供了pipeline_root，则设置函数的pipeline_root属性
    # If a pipeline_root is provided, set the pipeline_root attribute of the function
    if pipeline_root:
      func.pipeline_root = pipeline_root

    # 如果存在pipeline装饰器处理器，则调用它
    # If a pipeline decorator handler exists, call it
    if _pipeline_decorator_handler:
      return _pipeline_decorator_handler(func) or func
    else:
      return func

  return _pipeline


# PipelineConf类包含pipeline级别的设置
# The PipelineConf class contains pipeline-level settings
class PipelineConf():
  """PipelineConf包含pipeline级别的设置。"""
  """PipelineConf contains pipeline level settings."""

  def __init__(self):
    # 初始化镜像拉取密钥列表
    # Initialize the list of image pull secrets
    self.image_pull_secrets = []
    # 初始化超时时间
    # Initialize the timeout
    self.timeout = 0
    # 初始化完成后TTL秒数
    # Initialize the TTL seconds after finished
    self.ttl_seconds_after_finished = -1
    # 初始化PodDisruptionBudget的最小可用数量
    # Initialize the minimum available for PodDisruptionBudget
    self._pod_disruption_budget_min_available = None
    # 初始化操作转换器列表
    # Initialize the list of op transformers
    self.op_transformers = []
    # 初始化默认的Pod节点选择器
    # Initialize the default pod node selector
    self.default_pod_node_selector = {}
    # 初始化镜像拉取策略
    # Initialize the image pull policy
    self.image_pull_policy = None
    # 初始化并行度
    # Initialize the parallelism
    self.parallelism = None
    # 初始化数据传递方法
    # Initialize the data passing method
    self._data_passing_method = None
    # 初始化DNS配置
    # Initialize the DNS config
    self.dns_config = None
    # 初始化主机别名
    # Initialize the host aliases
    self.host_aliases={}
    # 初始化标签
    # Initialize the labels
    self.labels = {}
    # 初始化注解
    # Initialize the annotations
    self.annotations = {}
    # 初始化调度器名称
    # Initialize the scheduler name
    self.scheduler_name = None

  def set_image_pull_secrets(self, image_pull_secrets):
    """配置pipeline级别的imagepullsecret

    参数:
      image_pull_secrets: Kubernetes V1LocalObjectReference列表。
        详细描述请查阅Kubernetes V1LocalObjectReference定义
        https://github.com/kubernetes-client/python/blob/master/kubernetes/docs/V1LocalObjectReference.md
    """
    """Configures the pipeline level imagepullsecret

    Args:
      image_pull_secrets: a list of Kubernetes V1LocalObjectReference For
        detailed description, check Kubernetes V1LocalObjectReference definition
        https://github.com/kubernetes-client/python/blob/master/kubernetes/docs/V1LocalObjectReference.md
    """
    self.image_pull_secrets = image_pull_secrets
    return self

  def set_host_aliases(self, ip, hostnames):
    # 设置主机别名
    # Set host aliases
    if ip in self.host_aliases:
      self.host_aliases[ip] = self.host_aliases[ip] + hostnames
    else:
      self.host_aliases[ip] = hostnames
    return self

  def set_timeout(self, seconds: int):
    """配置pipeline级别的超时

    参数:
      seconds: 超时秒数
    """
    """Configures the pipeline level timeout

    Args:
      seconds: number of seconds for timeout
    """
    self.timeout = seconds
    return self

  def set_parallelism(self, max_num_pods: int):
    """配置工作流中可同时执行的最大并行Pod总数。

    参数:
      max_num_pods: 最大并行Pod总数。
    """
    """Configures the max number of total parallel pods that can execute at the same time in a workflow.

    Args:
      max_num_pods: max number of total parallel pods.
    """
    if max_num_pods < 1:
      raise ValueError(
          'Pipeline max_num_pods set to < 1, allowed values are > 0')

    self.parallelism = max_num_pods
    return self

  def set_ttl_seconds_after_finished(self, seconds: int):
    """配置pipeline完成后的TTL。

    参数:
      seconds: 工作流完成后被垃圾回收的秒数。
    """
    """Configures the ttl after the pipeline has finished.

    Args:
      seconds: number of seconds for the workflow to be garbage collected after
        it is finished.
    """
    self.ttl_seconds_after_finished = seconds
    return self

  def set_pod_disruption_budget(self, min_available: Union[int, str]):
    """PodDisruptionBudget持有您允许pipeline Pods的并发中断数。

    参数:
      min_available (Union[int, str]):  如果驱逐后至少有"minAvailable"个由"selector"选择的Pod仍然可用，
        则允许驱逐，即即使在被驱逐的Pod不存在的情况下。因此，例如，您可以通过指定"100%"来防止所有自愿驱逐。
        "minAvailable"可以是绝对数或百分比。
    """
    """ PodDisruptionBudget holds the number of concurrent disruptions that you allow for pipeline Pods.

    Args:
      min_available (Union[int, str]):  An eviction is allowed if at least
        "minAvailable" pods selected by "selector" will still be available after
        the eviction, i.e. even in the absence of the evicted pod.  So for
        example you can prevent all voluntary evictions by specifying "100%".
        "minAvailable" can be either an absolute number or a percentage.
    """
    self._pod_disruption_budget_min_available = min_available
    return self

  def set_default_pod_node_selector(self, label_name: str, value: str):
    """为pipeline添加nodeSelector约束。

    每个约束都是一个键值对标签。

    为了使容器有资格在节点上运行，该节点必须具有每个约束作为标签出现。

    参数:
      label_name: 约束标签的名称。
      value: 约束标签的值。
    """
    """Add a constraint for nodeSelector for a pipeline.

    Each constraint is a key-value pair label.

    For the container to be eligible to run on a node, the node must have each
    of the constraints appeared as labels.

    Args:
      label_name: The name of the constraint label.
      value: The value of the constraint label.
    """
    self.default_pod_node_selector[label_name] = value
    return self

  def set_image_pull_policy(self, policy: str):
    """配置默认的镜像拉取策略

    参数:
      policy: 拉取策略，必须是以下之一：Always, Never, IfNotPresent。
        更多信息请参考：
        https://github.com/kubernetes-client/python/blob/10a7f95435c0b94a6d949ba98375f8cc85a70e5a/kubernetes/docs/V1Container.md
    """
    """Configures the default image pull policy

    Args:
      policy: the pull policy, has to be one of: Always, Never, IfNotPresent.
        For more info:
        https://github.com/kubernetes-client/python/blob/10a7f95435c0b94a6d949ba98375f8cc85a70e5a/kubernetes/docs/V1Container.md
    """
    self.image_pull_policy = policy
    return self

  def add_op_transformer(self, transformer):
    """配置将应用于pipeline中所有操作的op_transformers。
    这些操作可以是ResourceOp, VolumeOp, 或 ContainerOp。

    参数:
      transformer: 一个函数，它接受一个kfp Op作为输入并返回一个kfp Op
    """
    """Configures the op_transformers which will be applied to all ops in the pipeline.
    The ops can be ResourceOp, VolumeOp, or ContainerOp.

    Args:
      transformer: A function that takes a kfp Op as input and returns a kfp Op
    """
    self.op_transformers.append(transformer)

  def set_dns_config(self, dns_config: V1PodDNSConfig):
    """设置将给予每个Pod的dnsConfig。

    参数:
      dns_config: Kubernetes V1PodDNSConfig 详细描述请查阅
        Kubernetes V1PodDNSConfig定义
        https://github.com/kubernetes-client/python/blob/master/kubernetes/docs/V1PodDNSConfig.md

    示例:
      ::

        import kfp
        from kubernetes.client.models import V1PodDNSConfig, V1PodDNSConfigOption
        pipeline_conf = kfp.dsl.PipelineConf()
        pipeline_conf.set_dns_config(dns_config=V1PodDNSConfig(
            nameservers=["1.2.3.4"],
            options=[V1PodDNSConfigOption(name="ndots", value="2")],
        ))
    """
    """Set the dnsConfig to be given to each pod.

    Args:
      dns_config: Kubernetes V1PodDNSConfig For detailed description, check
        Kubernetes V1PodDNSConfig definition
        https://github.com/kubernetes-client/python/blob/master/kubernetes/docs/V1PodDNSConfig.md

    Example:
      ::

        import kfp
        from kubernetes.client.models import V1PodDNSConfig, V1PodDNSConfigOption
        pipeline_conf = kfp.dsl.PipelineConf()
        pipeline_conf.set_dns_config(dns_config=V1PodDNSConfig(
            nameservers=["1.2.3.4"],
            options=[V1PodDNSConfigOption(name="ndots", value="2")],
        ))
    """
    self.dns_config = dns_config

  @property
  def data_passing_method(self):
    # 获取数据传递方法
    # Get the data passing method
    return self._data_passing_method

  @data_passing_method.setter
  def data_passing_method(self, value):
    """设置表示用于中间数据传递的方法的对象。

    示例:
      ::

        from kfp.dsl import PipelineConf, data_passing_methods
        from kubernetes.client.models import V1Volume, V1PersistentVolumeClaimVolumeSource
        pipeline_conf = PipelineConf()
        pipeline_conf.data_passing_method =
        data_passing_methods.KubernetesVolume(
            volume=V1Volume(
                name='data',
                persistent_volume_claim=V1PersistentVolumeClaimVolumeSource('data-volume'),
            ),
            path_prefix='artifact_data/',
        )
    """
    """Sets the object representing the method used for intermediate data passing.

    Example:
      ::

        from kfp.dsl import PipelineConf, data_passing_methods
        from kubernetes.client.models import V1Volume, V1PersistentVolumeClaimVolumeSource
        pipeline_conf = PipelineConf()
        pipeline_conf.data_passing_method =
        data_passing_methods.KubernetesVolume(
            volume=V1Volume(
                name='data',
                persistent_volume_claim=V1PersistentVolumeClaimVolumeSource('data-volume'),
            ),
            path_prefix='artifact_data/',
        )
    """
    self._data_passing_method = value


def get_pipeline_conf():
  """将pipeline级别的设置配置到当前pipeline
    注意：在用户定义的pipeline函数内部调用此函数。
  """
  """Configure the pipeline level setting to the current pipeline
    Note: call the function inside the user defined pipeline function.
  """
  return Pipeline.get_default_pipeline().conf


# TODO: Pipeline实际上是一个opsgroup，需要重构代码。
# TODO: Pipeline is in fact an opsgroup, refactor the code.
class Pipeline():
  """一个pipeline包含一个操作符列表。

  这个类不应该被pipeline作者使用，因为pipeline作者可以使用pipeline函数（用@pipeline装饰）来引用他们的pipeline。
  这个类对于实现编译器很有用。例如，编译器可以使用以下代码来获取pipeline对象及其操作：

  示例:
    ::

      with Pipeline() as p:
        pipeline_func(*args_list)

      traverse(p.ops)
  """
  """A pipeline contains a list of operators.

  This class is not supposed to be used by pipeline authors since pipeline
  authors can use pipeline functions (decorated with @pipeline) to reference
  their pipelines.
  This class is useful for implementing a compiler. For example, the compiler
  can use the following to get the pipeline object and its ops:

  Example:
    ::

      with Pipeline() as p:
        pipeline_func(*args_list)

      traverse(p.ops)
  """

  # _default_pipeline在（通常是编译器）运行"with Pipeline()"时设置
  # _default_pipeline is set when it (usually a compiler) runs "with Pipeline()"
  _default_pipeline = None

  @staticmethod
  def get_default_pipeline():
    """获取默认pipeline。"""
    """Get default pipeline. """
    return Pipeline._default_pipeline

  @staticmethod
  def add_pipeline(name, description, func):
    """添加一个具有指定名称和描述的pipeline函数。"""
    """Add a pipeline function with the specified name and description."""
    # 将@pipeline装饰器应用于pipeline函数
    # Applying the @pipeline decorator to the pipeline function
    func = pipeline(name=name, description=description)(func)

  def __init__(self, name: str):
    """创建一个新的Pipeline实例。

    参数:
      name: pipeline的名称。部署后，该名称将显示在Pipeline System UI中。
    """
    """Create a new instance of Pipeline.

    Args:
      name: the name of the pipeline. Once deployed, the name will show up in
        Pipeline System UI.
    """
    self.name = name
    self.ops = {}
    # 添加根组。
    # Add the root group.
    self.groups = [_ops_group.OpsGroup('pipeline', name=name)]
    self.group_id = 0
    self.conf = PipelineConf()
    self._metadata = None

  def __enter__(self):
    # 进入上下文管理器
    # Enter the context manager
    if Pipeline._default_pipeline:
      raise Exception('Nested pipelines are not allowed.')

    Pipeline._default_pipeline = self
    self._old_container_task_constructor = (
        _components._container_task_constructor)
    _components._container_task_constructor = (
        _component_bridge._create_container_op_from_component_and_arguments)

    def register_op_and_generate_id(op):
      return self.add_op(op, op.is_exit_handler)

    self._old__register_op_handler = _container_op._register_op_handler
    _container_op._register_op_handler = register_op_and_generate_id
    return self

  def __exit__(self, *args):
    # 退出上下文管理器
    # Exit the context manager
    Pipeline._default_pipeline = None
    _container_op._register_op_handler = self._old__register_op_handler
    _components._container_task_constructor = (
        self._old_container_task_constructor)

  def add_op(self, op: _container_op.BaseOp, define_only: bool):
    """添加一个新操作符。

    参数:
      op: ContainerOp, ResourceOp或其继承类型的操作符。
        返回
      op_name: 一个唯一的op名称。
    """
    """Add a new operator.

    Args:
      op: An operator of ContainerOp, ResourceOp or their inherited types.
        Returns
      op_name: a unique op name.
    """
    # 清理op名称。
    # 技术上这可以推迟到编译阶段，但PipelineParams的字符串序列化使得未清理的名称存在问题。
    # Sanitizing the op name.
    # Technically this could be delayed to the compilation stage, but string
    # serialization of PipelineParams make unsanitized names problematic.
    op_name = _naming._sanitize_python_function_name(op.human_name).replace(
        '_', '-')
    #如果存在同名op，则生成新名称。
    #If there is an existing op with this name then generate a new name.
    op_name = _naming._make_name_unique_by_adding_index(op_name,
                                                        list(self.ops.keys()),
                                                        ' ')
    if op_name == '':
      op_name = _naming._make_name_unique_by_adding_index(
          'task', list(self.ops.keys()), ' ')

    self.ops[op_name] = op
    if not define_only:
      self.groups[-1].ops.append(op)

    return op_name

  def push_ops_group(self, group: _ops_group.OpsGroup):
    """将一个OpsGroup推入堆栈。

    参数:
      group: 一个OpsGroup。通常是ExitHandler, Branch, 和 Loop之一。
    """
    """Push an OpsGroup into the stack.

    Args:
      group: An OpsGroup. Typically it is one of ExitHandler, Branch, and Loop.
    """
    self.groups[-1].groups.append(group)
    self.groups.append(group)

  def pop_ops_group(self):
    """从堆栈中移除当前的OpsGroup。"""
    """Remove the current OpsGroup from the stack."""
    del self.groups[-1]

  def remove_op_from_groups(self, op):
    # 从组中递归删除操作
    # Recursively remove an op from groups
    for group in self.groups:
      group.remove_op_recursive(op)

  def get_next_group_id(self):
    """获取新组的下一个id。"""
    """Get next id for a new group. """

    self.group_id += 1
    return self.group_id

  def _set_metadata(self, metadata):
    """_set_metadata将元数据信息传递给containerop

    参数:
      metadata (ComponentMeta): 组件元数据
    """
    """_set_metadata passes the containerop the metadata information

    Args:
      metadata (ComponentMeta): component metadata
    """
    self._metadata = metadata