# -*- coding: utf-8 -*-
from pprint import pprint
from collections import defaultdict
from itertools import groupby
from django.conf import settings

import env
from blueapps.account.decorators import login_exempt
from gcloud import err_code
from gcloud.apigw.decorators import mark_request_whether_is_trust, return_json_response
from gcloud.apigw.decorators import project_inject
from gcloud.taskflow3.models import TaskFlowInstance
from gcloud.taskflow3.celery.tasks import prepare_and_start_task
from gcloud.taskflow3.domains.queues import PrepareAndStartTaskQueueResolver
from gcloud.iam_auth.intercept import iam_intercept
from gcloud.iam_auth.view_interceptors.apigw import TaskOperateInterceptor
from gcloud.template_base.utils import inject_template_node_id
from pipeline.core.constants import PE
from gcloud.utils.throttle import check_task_operation_throttle
from gcloud.contrib.operate_record.decorators import record_operation
from gcloud.contrib.operate_record.constants import RecordType, OperateType, OperateSource
from apigw_manager.apigw.decorators import apigw_require
from gcloud.utils.strings import standardize_name
from gcloud.constants import (
    ONETIME,
    TASK_CATEGORY,
    TASK_NAME_MAX_LENGTH,
    TaskCreateMethod,
)
from gcloud.apigw.views.utils import logger
from gcloud.taskflow3.models import TaskFlowInstance
from gcloud.tasktmpl3.models import TaskTemplate
from pipeline.models import PipelineInstance
from pipeline.exceptions import PipelineException
from gcloud import err_code
from gcloud.core.models import Project

from pipeline.utils.uniqid import node_uniqid
from pipeline_web.core.models import NodeInInstance

from bamboo_engine.builder import (
    build_tree,
    builder,
    EmptyStartEvent,
    ServiceActivity,
    EmptyEndEvent,
    ParallelGateway,
    ExclusiveGateway,
    ConvergeGateway,
    SubProcess,
    Var,
    Data,
    NodeOutput
)


TEST_PROJECT_ID = "2"  # do not change this to non number
TEST_ID_LIST = [node_uniqid() for i in range(15)]
TEST_PIPELINE_TREE = {
    "id": TEST_ID_LIST[0],
    "name": "name",
    "start_event": {
        "id": TEST_ID_LIST[1],
        "name": "start",
        "type": "EmptyStartEvent",
        "incoming": None,
        "outgoing": TEST_ID_LIST[5],
    },
    "end_event": {
        "id": TEST_ID_LIST[2],
        "name": "end",
        "type": "EmptyEndEvent",
        "incoming": TEST_ID_LIST[6],
        "outgoing": None,
    },
    "activities": {
        TEST_ID_LIST[3]: {
            "id": TEST_ID_LIST[3],
            "type": "ServiceActivity",
            "name": "first_task",
            "incoming": TEST_ID_LIST[5],
            "outgoing": TEST_ID_LIST[6],
            "optional": True,
            "component": {
                "code": "bk_display",
                "data": {
                    "bk_display_message": {
                        "hook": False,
                        "value": ""
                    }
                },
                "version": "1.0"
            },
            'retryable': True,
            'skippable': True,
            'error_ignorable': False,
            'stage_name': 'test_pipline_tree'
        }
    },
    "flows": {  # 存放该 Pipeline 中所有的线
        TEST_ID_LIST[5]: {"id": TEST_ID_LIST[5], "source": TEST_ID_LIST[1], "target": TEST_ID_LIST[3]},
        TEST_ID_LIST[6]: {"id": TEST_ID_LIST[6], "source": TEST_ID_LIST[3], "target": TEST_ID_LIST[2]}
    },
    "gateways": {},  # 这里存放着网关的详细信息
    "constants": {}
}

id_list3 = [node_uniqid() for i in range(15)]
sub_pipeline = {
    'id': id_list3[0],
    'name': 'name',
    'start_event': {
        'id': id_list3[1],
        'name': 'start',
        'type': 'EmptyStartEvent',
        'incoming': None,
        'outgoing': id_list3[5]
    },
    'end_event': {
        'id': id_list3[2],
        'name': 'end',
        'type': 'EmptyEndEvent',
        'incoming': id_list3[7],
        'outgoing': None
    },
    'activities': {
        id_list3[3]: {
            'id': id_list3[3],
            'type': 'ServiceActivity',
            'name': 'first-s_task',
            'incoming': id_list3[5],
            'outgoing': id_list3[6],
            'component': {
                'code': 'test',
                'inputs': {
                    'input_test': {
                        'type': 'plain',
                        'value': 'before_after',
                    },
                    'radio_test': {
                        'type': 'plain',
                        'value': '1',
                    },
                },
                'global_outputs': {
                    'key1': '${global_key1}',
                }
            },
            'retryable': True,
            'skippable': True,
            'error_ignorable': False,
            'stage_name': 'test_pipline_tree_sub_pipe'
        },
        id_list3[4]: {
            'id': id_list3[4],
            'type': 'ServiceActivity',
            'name': 'first_task',
            'incoming': id_list3[6],
            'outgoing': id_list3[7],
            'component': {
                'code': 'test',
                'inputs': {
                    'input_test': {
                        'type': 'plain',
                        'value': 'value1'
                    },
                    'radio_test': {
                        'type': 'splice',
                        'value': 'before_${global_key1}'
                    },
                },
                'global_outputs': {

                }
            },
            'retryable': True,
            'skippable': True,
            'error_ignorable': False,
            'stage_name': 'test_pipline_tree_sub_pipe'
        },
    },
    'flows': {  # 存放该 Pipeline 中所有的线
        id_list3[5]: {
            'id': id_list3[5],
            'source': id_list3[1],
            'target': id_list3[3]
        },
        id_list3[6]: {
            'id': id_list3[6],
            'source': id_list3[3],
            'target': id_list3[4]
        },
        id_list3[7]: {
            'id': id_list3[7],
            'source': id_list3[4],
            'target': id_list3[2]
        },
    },
    'gateways': {  # 这里存放着网关的详细信息
    },
    'data': {
        'inputs': {
            '${demo_input_test}': {
                'type': 'plain',
                'value': 'value2'
            },
            '${global_key1}': {
                'type': 'splice',
                'source_act': id_list3[3],
                'source_key': 'key1',
                'value': '',
            },
            '${custom_key2}': {
                'type': 'splice',
                'value': 'aaa_${global_key1}',
            },
        }
    }
}
TEST_PIPELINE_TREE_WITH_SUB = {
    "id": TEST_ID_LIST[0],
    "name": "name",
    "start_event": {
        "id": TEST_ID_LIST[1],
        "name": "start",
        "type": "EmptyStartEvent",
        "incoming": None,
        "outgoing": TEST_ID_LIST[2],
    },
    "end_event": {
        "id": TEST_ID_LIST[7],
        "name": "end",
        "type": "EmptyEndEvent",
        "incoming": TEST_ID_LIST[6],
        "outgoing": None,
    },
    "activities": {
        TEST_ID_LIST[3]: {
            "id": TEST_ID_LIST[3],
            "type": "ServiceActivity",
            "name": "first_task",
            "incoming": TEST_ID_LIST[2],
            "outgoing": TEST_ID_LIST[4],
            "optional": True,
            "component": {
                "code": "test",
                "data": {
                    "input_test": {"hook": False, "value": "${custom_key1}"},
                    "radio_test": {"hook": False, "value": "1"},
                },
            },
            'retryable': True,
            'skippable': True,
            'error_ignorable': False,
            'stage_name': 'test_pipline_tree'
        },
        TEST_ID_LIST[5]: {
            "id": TEST_ID_LIST[5],
            "type": "SubProcess",
            "name": "second_task",
            "template_id": 99,
            "incoming": TEST_ID_LIST[4],
            "outgoing": TEST_ID_LIST[6],
            'pipeline': sub_pipeline,
            'retryable': True,
            'skippable': True,
            'exposed_constants': [],
            'hooked_constants': [],
            'params': {}
        }
    },
    "flows": {  # 存放该 Pipeline 中所有的线
        TEST_ID_LIST[2]: {"id": TEST_ID_LIST[2], "source": TEST_ID_LIST[1], "target": TEST_ID_LIST[3]},
        TEST_ID_LIST[4]: {"id": TEST_ID_LIST[4], "source": TEST_ID_LIST[3], "target": TEST_ID_LIST[5]},
        TEST_ID_LIST[6]: {"id": TEST_ID_LIST[6], "source": TEST_ID_LIST[5], "target": TEST_ID_LIST[7]}
    },
    "gateways": {},  # 这里存放着网关的详细信息
    "constants": {
        "${custom_key1}": {
            "index": 0,
            "name": "input1",
            "key": "${custom_key1}",
            "desc": "",
            "validation": "^.*$",
            "show_type": "show",
            "value": "value1",
            "source_type": "custom",
            "source_tag": "",
            "source_info": {},
            "custom_type": "input",
        },
        "${custom_key2}": {
            "index": 1,
            "name": "input2",
            "key": "${custom_key2}",
            "desc": "",
            "validation": "^.*$",
            "show_type": "show",
            "value": "value1",
            "source_type": "custom",
            "source_tag": "",
            "source_info": {},
            "custom_type": "input",
        },
    },
    "outputs": [],
}

sub_pipeline_inst_tree = {
    'id': id_list3[0],
    'name': 'name',
    'start_event': {
        'id': id_list3[1],
        'name': 'start',
        'type': 'EmptyStartEvent',
        'incoming': None,
        'outgoing': id_list3[5]
    },
    'end_event': {
        'id': id_list3[2],
        'name': 'end',
        'type': 'EmptyEndEvent',
        'incoming': id_list3[7],
        'outgoing': None
    },
    'activities': {
        id_list3[3]: {
            'id': id_list3[3],
            'type': 'ServiceActivity',
            'name': 'first-s_task',
            'incoming': id_list3[5],
            'outgoing': id_list3[6],
            "component": {
                "code": "bk_display",
                "data": {
                    "bk_display_message": {
                        "hook": False,
                        "value": ""
                    }
                },
                "version": "1.0"
            },
            'retryable': True,
            'skippable': True,
            'error_ignorable': False,
            'stage_name': 'test_pipline_tree_sub_pipe_inst',
            'template_node_id': id_list3[8]
        },
        id_list3[4]: {
            'id': id_list3[4],
            'type': 'ServiceActivity',
            'name': 'first_task',
            'incoming': id_list3[6],
            'outgoing': id_list3[7],
            "component": {
                "code": "bk_display",
                "data": {
                    "bk_display_message": {
                        "hook": False,
                        "value": ""
                    }
                },
                "version": "1.0"
            },
            'retryable': True,
            'skippable': True,
            'error_ignorable': False,
            'stage_name': 'test_pipline_tree_sub_pipe_inst'
        },
    },
    'flows': {  # 存放该 Pipeline 中所有的线
        id_list3[5]: {
            'id': id_list3[5],
            'source': id_list3[1],
            'target': id_list3[3]
        },
        id_list3[6]: {
            'id': id_list3[6],
            'source': id_list3[3],
            'target': id_list3[4]
        },
        id_list3[7]: {
            'id': id_list3[7],
            'source': id_list3[4],
            'target': id_list3[2]
        },
    },
    'gateways': {  # 这里存放着网关的详细信息
    },
    "constants": {},
    "outputs": []
}
TEST_PIPELINE_TREE_WITH_SUB_INST_TREE = {
    "id": TEST_ID_LIST[0],
    "name": "name99",
    "start_event": {
        "id": TEST_ID_LIST[1],
        "name": "start",
        "type": "EmptyStartEvent",
        "incoming": None,
        "outgoing": TEST_ID_LIST[2],
    },
    "end_event": {
        "id": TEST_ID_LIST[7],
        "name": "end",
        "type": "EmptyEndEvent",
        "incoming": TEST_ID_LIST[6],
        "outgoing": None,
    },
    "activities": {
        TEST_ID_LIST[3]: {
            "id": TEST_ID_LIST[3],
            "type": "ServiceActivity",
            "name": "first_task",
            "incoming": TEST_ID_LIST[2],
            "outgoing": TEST_ID_LIST[4],
            "optional": True,
            "component": {
                "code": "bk_display",
                "data": {
                    "bk_display_message": {
                        "hook": False,
                        "value": ""
                    }
                },
                "version": "1.0"
            },
            'retryable': True,
            'skippable': True,
            'error_ignorable': False,
            'stage_name': 'test_pipline_tree_inst',
            'template_node_id': TEST_ID_LIST[9]
        },
        TEST_ID_LIST[5]: {
            "id": TEST_ID_LIST[5],
            "type": "SubProcess",
            "name": "second_task",
            "template_id": TEST_ID_LIST[12],
            "incoming": TEST_ID_LIST[4],
            "outgoing": TEST_ID_LIST[6],
            'retryable': True,
            'skippable': True,
            'pipeline': sub_pipeline_inst_tree,
            'exposed_constants': [],
            'hooked_constants': [],
            'params': {}
        }
    },
    "flows": {  # 存放该 Pipeline 中所有的线
        TEST_ID_LIST[2]: {"id": TEST_ID_LIST[2], "source": TEST_ID_LIST[1], "target": TEST_ID_LIST[3]},
        TEST_ID_LIST[4]: {"id": TEST_ID_LIST[4], "source": TEST_ID_LIST[3], "target": TEST_ID_LIST[5]},
        TEST_ID_LIST[6]: {"id": TEST_ID_LIST[6], "source": TEST_ID_LIST[5], "target": TEST_ID_LIST[7]}
    },
    "gateways": {},  # 这里存放着网关的详细信息
    "constants": {},
    "outputs": [],
}







class JobTask:
    def __init__(self, id, extra_data):
        self.id = id
        self.extra_data = extra_data

def parallel_unit(income_node,act_list):
    """创建并行网关开始,汇聚网关结束的pipeline片段
                income_node
                     |
             ParallelGateway
                     |
            -------------------
            |        |        |
          prio=n   prio=n   prio=n
           act1    act1    act1
            |        |        |
            -------------------
                     |
               ConvergeGateway
    或 act_list为1个时:
        income_node -> proi=n act1 ->
    """
    # 判断个数,1个时生成1个act,多个时,生成并行act片段
    if len(act_list) == 0:
        return None
    if len(act_list) == 1:
        return income_node.extend(act_list[0])
    else:
        pg = ParallelGateway()
        cg = ConvergeGateway()
        return income_node.extend(pg).connect(*act_list).to(pg).converge(cg)


def serial_unit(income_node,ordered_act_list):
    '''

    :param income_node: act_node_obj 活动节点对象
    :param ordered_act_list: 活动对象的数组
    :return:  act_obj
    '''
    # 判断个数,1个时生成1个act,多个时,生成串行act片段
    if len(ordered_act_list) == 0:
        return None
    if len(ordered_act_list) == 1:
        return income_node.extend(ordered_act_list[0])
    if len(ordered_act_list) > 1:
        act_node = income_node.extend(ordered_act_list[0])
        for i in range(1, len(ordered_act_list)):
            act_node = act_node.extend(ordered_act_list[i])
        return act_node

    pass



def add_pipe():
    """
                     |--[a,p3]------|     |----[a,p5]---|
     [Start]-[a,p1]-[x]            [x]--[x]----[a,p5]--[x]--[a,p6]--[End]
                     |--[a,p3]-----|     |----[a,p5]---|
    参考app应用,node名称无视, 每个job_task代表1个应用, 应用优先级代表priority
    """
    from itertools import groupby

    # 创建一些示例 JobTask 对象
    job_task1 = JobTask(1,
                        {"topo_level_info": {"node_key_field": "node1", "process_info": {"process": {"priority": 1}}}})
    job_task2 = JobTask(2,
                        {"topo_level_info": {"node_key_field": "node2", "process_info": {"process": {"priority": 2}}}})
    job_task3 = JobTask(3,
                        {"topo_level_info": {"node_key_field": "node1", "process_info": {"process": {"priority": 2}}}})
    job_task4 = JobTask(4,
                        {"topo_level_info": {"node_key_field": "node3", "process_info": {"process": {"priority": 2}}}})
    job_task5 = JobTask(5,
                        {"topo_level_info": {"node_key_field": "node2", "process_info": {"process": {"priority": 3}}}})
    # 这个与id=2重复,生成act时,存在问题，实际p2存在1次只生成1套.,p2的job_task对象无论有多少个,p2都只有1和act1和act2
    job_task6 = JobTask(6,
                        {"topo_level_info": {"node_key_field": "node2", "process_info": {"process": {"priority": 2}}}})
    job_tasks = [job_task1, job_task2, job_task3, job_task4, job_task5, job_task6]

    #根据prior排序
    weights = 1  # 正序,升序, weights = -1 时为逆序
    ordered_job_tasks = sorted(
        job_tasks,
        key=lambda x: weights * x.extra_data["topo_level_info"]["process_info"]["process"]["priority"],
    )
    # 根据prior分组,返回 grouped_job_tasks : {1:[task_has_p1,task2_has_p1,..  ],2:[task_list],3:[...,...]}
    grouped_job_tasks = groupby(
        ordered_job_tasks, lambda x: x.extra_data["topo_level_info"]["process_info"]["process"]["priority"]
    )
    # 遍历分组
    prior_batch = None
    index = 0
    start_event = EmptyStartEvent()
    for priority, priority_job_tasks in grouped_job_tasks:
        # 优先级priority为N的,组中的成员:
        logger.info(f"priority[{priority}]")
        act_list = []
        for job_task in priority_job_tasks:
            logger.info(f"    job_task[{job_task.id}]")
            act = ServiceActivity(
                component_code='pipe_example_component',name='task'+str(job_task.id)+'_act_p' + str(priority)
            )
            act_list.append(act)
            pass

        if index == 0:
            # start节点连接第一个活动
            prior_batch = parallel_unit(start_event,act_list)
        else:
            # 后续活动，通过 extend 方法添加
            prior_batch = parallel_unit(prior_batch,act_list)

        index = index + 1

    end_event = EmptyEndEvent()
    prior_batch.extend(end_event)
    pipeline = builder.build_tree(start_event)
    pprint(pipeline)
    return {"pipeline": pipeline}

def build_parallel_groups_tree(groups):
    """
    组间并行,组里host串/并
    :return:
    """
    start_event = EmptyStartEvent()
    end_event = EmptyEndEvent()

    # 1. 遍历组
    main_converge_gw = ConvergeGateway()
    processed_groups = []
    for group in groups:
        group_name = group["group_name"]
        my_hosts_pipeline_type = group["my_hosts_pipeline_type"]
        my_hosts = group["hosts"]
        print(f"处理组: { group['group_name'] }, 并行")
        if my_hosts_pipeline_type == "serial":
            # 如果host是串行，按 host_serial_priority 排序
            sorted_hosts = sorted(my_hosts, key=lambda x: x["host_serial_priority"])
            sorted_seri_acts = []
            prev_act = None
            for host in sorted_hosts:
                print(f"  串行处理主机: {host['host_name']} (优先级: {host['host_serial_priority']})")
                act = ServiceActivity(component_code='pipe_example_component',
                                      name=str(group['group_name'])
                                           + '_'+ str(host['host_name']) +'_p' + str(host['host_serial_priority']))
                sorted_seri_acts.append(act)
                if prev_act:
                    prev_act.extend(act)
                else:
                    first_act = act  # 记录入口节点
                prev_act = act

            # 记录组入口和出口
            processed_groups.append({
                "entry": first_act,
                "exit": sorted_seri_acts[-1]
            })
            pass
        elif my_hosts_pipeline_type == "parallel":
            if len(my_hosts) == 1:
                print(f"  串行处理主机: {host['host_name']} ")
                # 并行组但 hosts=1：直接创建单个 Activity
                host = my_hosts[0]
                act = ServiceActivity(
                    component_code="pipe_example_component",
                    name=f"{group_name}_{host['host_name']}_parall"
                )
                # entry 和 exit 均为 act，无需网关
                processed_groups.append({"entry": act, "exit": act})
            else:
                for host in my_hosts:
                    print(f"  并行处理主机: {host['host_name']} ")
                # 并行组 hosts>1：创建子并行网关和汇聚网关
                parallel_gw = ParallelGateway(name=f"{group_name}_")
                converge_gw = ConvergeGateway()
                activities = [
                    ServiceActivity(
                        component_code="pipe_example_component",
                        name=f"{group_name}_{host['host_name']}"
                    ) for host in my_hosts
                ]
                # 连接子并行网关到所有 Activity，再汇聚到子汇聚网关
                parallel_gw.connect(*activities)
                for act in activities:
                    act.extend(converge_gw)
                processed_groups.append({"entry": parallel_gw, "exit": converge_gw})
    # 将所有组的出口连接到主汇聚网关
    # for group in processed_groups:
    #     group["exit"].extend(main_converge_gw)

    # 连接组间并行网关
    if len(processed_groups) > 1:
        main_parallel_gw = ParallelGateway()
        start_event.extend(main_parallel_gw)
        main_parallel_gw.connect(*[group["entry"] for group in processed_groups])\
            .to(main_parallel_gw).converge(main_converge_gw).extend(end_event)
    else:
        # 仅一个组时直接连接
        start_event.extend(processed_groups[0]["entry"]).tail().extend(end_event)

    # 主流程收尾
    # main_converge_gw.extend(end_event)
    # 构建管道
    pipeline = builder.build_tree(start_event)
    return {"pipeline": pipeline, "start_event_obj":start_event}

def build_serial_groups_tree(groups):
    """
    生成组间串行,组内host串/并
    :param groups: List<group_name,group_serial_priority,my_hosts_pipeline_type,hosts:List<> >
    :return: pipeline对象
    """
    # 2. 按 group_serial_priority 对组排序
    sorted_groups = sorted(groups, key=lambda x: x["group_serial_priority"])
    start_event = EmptyStartEvent()
    end_event = EmptyEndEvent()
    # 3. 遍历排序后的组
    start_in_act = None  # 由start事件实例的act对象
    for group_idx, group in enumerate(sorted_groups):
        print(f"处理组: { group['group_name'] },"
              f"prior: {str(group['group_serial_priority'])}, ")
        my_hosts = group.get("hosts")
        if group["my_hosts_pipeline_type"] == "serial":
            # 如果host是串行，按 host_serial_priority 排序
            sorted_hosts = sorted(my_hosts, key=lambda x: x["host_serial_priority"])
            sorted_seri_acts = []
            for host in sorted_hosts:
                print(f"  串行处理主机: {host['host_name']} (优先级: {host['host_serial_priority']})")
                act = ServiceActivity(component_code='pipe_example_component',
                                      name=str(group['group_name'])
                                           + '_'+ str(host['host_name']) +'_p' + str(host['host_serial_priority']))
                sorted_seri_acts.append(act)
            # 判定有序的group所在当前group位置
            if group_idx == 0:
                # 如果是第一个组，连接到 start_event
                start_in_act = serial_unit(start_event,sorted_seri_acts)
            else:
                # 不是第一个group,将上一次迭代操作后,保存的包含start头和act身体的对象start_in_act作为head与sorted_seri_acts串联
                start_in_act = serial_unit(start_in_act,sorted_seri_acts)
            pass
        else:
            # 如果host是并行，直接处理
            parallel_acts = []
            for host in my_hosts:
                print(f"  并行处理主机: {host['host_name']} (优先级: {host['host_serial_priority']})")
                act = ServiceActivity(component_code='pipe_example_component',
                                      name=str(group['group_name'])
                                           + '_'+ str(host['host_name']) +'_prall')
                parallel_acts.append(act)
            # 判定有序的group所在当前group位置
            if group_idx == 0:
                # 如果是第一个组，连接到 start_event
                start_in_act = parallel_unit(start_event, parallel_acts)
            else:
                # 不是第一个group,将上一次迭代操作后,保存的包含start头和act身体的对象start_in_act作为head,与并行单元串联
                start_in_act = parallel_unit(start_in_act, parallel_acts)
            pass
        pass
    #start拼接"串/并"act身体后,最终与end连接
    start_in_act.extend(end_event)

    # 构建管道
    pipeline = builder.build_tree(start_event)
    return {"pipeline": pipeline, "start_event_obj":start_event}


def build_apps_pipeline_tree(apps):
    """
    将 apps 按优先级排序和分组，生成流程树
    :param apps: 应用列表
    :return: 生成的流程树
    """
    from itertools import groupby


    # 按 app_priority 排序
    weights = 1  # 正序,升序, weights = -1 时为逆序
    ordered_apps = sorted(
        apps,
        key=lambda x: weights * x["app_priority"]
    )

    # 按 app_priority 分组
    grouped_apps = groupby(
        ordered_apps, lambda x: x["app_priority"]
    )

    # 初始化流程节点
    start_event = EmptyStartEvent()
    prior_batch = None
    index = 0

    # 遍历优先级分组
    for priority, priority_apps in grouped_apps:
        logger.info(f"Processing apps with priority[{priority}]")
        subproc_list = []

        # 遍历当前优先级组内的所有 app
        for app in priority_apps:
            logger.info(f"    App[{app['app_name']}]")
            # 根据 group_pipeline_type 选择构建子流程的方式
            if app["group_pipeline_type"] == "serial":
                # subproc_data = {
                #     "name": app["app_name"],
                #     "groups": app["groups"]
                # }
                subproc_data = Data(inputs={'${sub_input}': Var(type=Var.PLAIN, value=1)})
                subproc_dict = build_serial_groups_tree(app["groups"])
            elif app["group_pipeline_type"] == "parallel":
                # subproc_data = {
                #     "name": app["app_name"],
                #     "groups": app["groups"]
                # }
                subproc_data = Data(inputs={'${sub_input}': Var(type=Var.PLAIN, value=1)})
                subproc_dict = build_parallel_groups_tree(app["groups"])
            else:
                raise ValueError(f"Unknown group_pipeline_type: {app['group_pipeline_type']}")

            # 创建 SubProcess
            subproc_obj = SubProcess(name=app['app_name']
                                     ,start=subproc_dict["start_event_obj"], data=subproc_data)
            subproc_list.append(subproc_obj)

        # 连接当前优先级组的子流程
        if index == 0:
            # 第一个优先级组，连接到 start_event
            prior_batch = parallel_unit(start_event, subproc_list)
        else:
            # 后续优先级组，连接到前一个优先级组的出口
            prior_batch = parallel_unit(prior_batch, subproc_list)

        index += 1

    # 流程收尾
    end_event = EmptyEndEvent()
    prior_batch.extend(end_event)
    pipeline = builder.build_tree(start_event)
    return {"pipeline": pipeline}

def my_apps3():
    apps = [
        {
            "app_name": "app1", "app_priority": 1,
            "groups": [
                {
                    "group_name": "group1", "group_priority": 5,
                    "hosts": [
                        {"host_name": "host1", "host_priority": 1, "sops_task":{"id":1,"vars":{"name":"app1group1host1","port":"9090"} } },
                        {"host_name": "host2", "host_priority": 7, "sops_task":{"id":1,"vars":{"name":"app1group1host2","port":"9090"} } },
                        {"host_name": "host3", "host_priority": 1, "sops_task":{"id":1,"vars":{"name":"app1group1host3","port":"9090"} } }
                    ]
                },
                {
                    "group_name": "group2", "group_priority": 5,
                    "hosts": [
                        {"host_name": "host1", "host_priority": 4, "sops_task":{"id":1,"vars":{"name":"app1group2host1","port":"9090"} } },
                        {"host_name": "host2", "host_priority": 4, "sops_task":{"id":1,"vars":{"name":"app1group2host2","port":"9090"} } }
                    ]
                },
                {
                    "group_name": "group3", "group_priority": 5,
                    "hosts": [
                        {"host_name": "host1", "host_priority": 2, "sops_task": {"id": 1, "vars": {"name": "app1group3host1", "port": "9090"}}},
                        {"host_name": "host2", "host_priority": 2, "sops_task": {"id": 1, "vars": {"name": "app1group3host2", "port": "9090"}}}
                    ]
                }
            ]
        },
        {
            "app_name": "app2", "app_priority": 2,
            "groups": [
                {
                    "group_name": "group1", "group_priority": 8,
                    "hosts": [
                        {"host_name": "host1", "host_priority": 2, "sops_task": {"id": 1, "vars": {"name": "app2group1host1", "port": "9090"}}},
                        {"host_name": "host2", "host_priority": 5, "sops_task": {"id": 1, "vars": {"name": "app2group1host2", "port": "9090"}}},
                        {"host_name": "host3", "host_priority": 6, "sops_task": {"id": 1, "vars": {"name": "app2group1host3", "port": "9090"}}}
                    ]
                }
            ]
        },
        {
            "app_name": "app3", "app_priority": 2,
            "groups": [
                {
                    "group_name": "group1", "group_priority": 3,
                    "hosts": [
                        {"host_name": "host1", "host_priority": 1, "sops_task": {"id": 1, "vars": {"name": "app3group1host1", "port": "9090"}}},
                        {"host_name": "host2", "host_priority": 8, "sops_task": {"id": 1, "vars": {"name": "app3group1host2", "port": "9090"}}},
                        {"host_name": "host3", "host_priority": 4, "sops_task": {"id": 1, "vars": {"name": "app3group1host3", "port": "9090"}}},
                        {"host_name": "host4", "host_priority": 1, "sops_task": {"id": 1, "vars": {"name": "app3group1host4", "port": "9090"}}}
                    ]
                },
                {
                    "group_name": "group2", "group_priority": 3,
                    "hosts": [
                        {"host_name": "host1", "host_priority": 9, "sops_task": {"id": 1, "vars": {"name": "app3group2host1", "port": "9090"}}},
                        {"host_name": "host2", "host_priority": 2, "sops_task": {"id": 1, "vars": {"name": "app3group2host2", "port": "9090"}}}
                    ]
                },
                {
                    "group_name": "group3", "group_priority": 1,
                    "hosts": [
                        {"host_name": "host1", "host_priority": 3, "sops_task": {"id": 1, "vars": {"name": "app3group3host1", "port": "9090"}}}
                    ]
                }
            ]
        },
        {
            "app_name": "app4", "app_priority": 6,
            "groups": [
                {
                    "group_name": "group1", "group_priority": 7,
                    "hosts": [
                        {"host_name": "host1", "host_priority": 5, "sops_task": {"id": 1, "vars": {"name": "app4group1host1", "port": "9090"}}},
                        {"host_name": "host2", "host_priority": 6, "sops_task": {"id": 1, "vars": {"name": "app4group1host2", "port": "9090"}}}
                    ]
                },
                {
                    "group_name": "group2", "group_priority": 2,
                    "hosts": [
                        {"host_name": "host1", "host_priority": 2, "sops_task": {"id": 1, "vars": {"name": "app4group2host1", "port": "9090"}}},
                        {"host_name": "host2", "host_priority": 3, "sops_task": {"id": 1, "vars": {"name": "app4group2host2", "port": "9090"}}}
                    ]
                },
                {
                    "group_name": "group3", "group_priority": 9,
                    "hosts": [
                        {"host_name": "host1", "host_priority": 1, "sops_task": {"id": 1, "vars": {"name": "app4group3host1", "port": "9090"}}}
                    ]
                },
                {
                    "group_name": "group4", "group_priority": 2,
                    "hosts": [
                        {"host_name": "host1", "host_priority": 4, "sops_task": {"id": 1, "vars": {"name": "app4group4host1", "port": "9090"}}},
                        {"host_name": "host2", "host_priority": 7, "sops_task": {"id": 1, "vars": {"name": "app4group4host2", "port": "9090"}}}
                    ]
                }
            ]
        },
        {
            "app_name": "app5", "app_priority": 6,
            "groups": [
                {
                    "group_name": "group1", "group_priority": 8,
                    "hosts": [
                        {"host_name": "host1", "host_priority": 3, "sops_task": {"id": 1, "vars": {"name": "app5group1host1", "port": "9090"}}}
                    ]
                },
                {
                    "group_name": "group2", "group_priority": 8,
                    "hosts": [
                        {"host_name": "host1", "host_priority": 5, "sops_task": {"id": 1, "vars": {"name": "app5group2host1", "port": "9090"}}},
                        {"host_name": "host2", "host_priority": 5, "sops_task": {"id": 1, "vars": {"name": "app5group2host2", "port": "9090"}}}
                    ]
                }
            ]
        }
    ]
    return apps

def my_apps2():
    '''
    app和group、host的 priority进行先分组再排序，理解为批次序号
    例如：第2批apps中，app1有: 第1批hosts[host1,]。第2批hosts[host1,host2] 。
    :return:
    '''
    return [
        {
            "app_name": "app1", "app_priority": 2,
            "groups": [
                {
                    "group_name": "group1", "group_priority": 5, #批次序号
                    "hosts": [
                        {"host_name": "host1", "host_priority": 1},
                        {"host_name": "host2", "host_priority": 7},
                        {"host_name": "host3", "host_priority": 1}
                    ]
                },
                {
                    "group_name": "group2", "group_priority": 5, #批次序号
                    "hosts": [
                        {"host_name": "host1", "host_priority": 4},
                        {"host_name": "host2", "host_priority": 9}
                    ]
                }
            ]
        },
        {
            "app_name": "app2", "app_priority": 2,
            "groups": [
                {
                    "group_name": "group1", "group_priority": 8,
                    "hosts": [
                        {"host_name": "host1", "host_priority": 2},
                        {"host_name": "host2", "host_priority": 5},
                        {"host_name": "host3", "host_priority": 6}
                    ]
                }
            ]
        },
        {
            "app_name": "app3", "app_priority": 4,
            "groups": [
                {
                    "group_name": "group1", "group_priority": 3,
                    "hosts": [
                        {"host_name": "host1", "host_priority": 1},
                        {"host_name": "host2", "host_priority": 8},
                        {"host_name": "host3", "host_priority": 4},
                        {"host_name": "host4", "host_priority": 1}
                    ]
                },
                {
                    "group_name": "group2", "group_priority": 3,
                    "hosts": [
                        {"host_name": "host1", "host_priority": 9},
                        {"host_name": "host2", "host_priority": 2}
                    ]
                },
                {
                    "group_name": "group3", "group_priority": 1,
                    "hosts": [
                        {"host_name": "host1", "host_priority": 3}
                    ]
                }
            ]
        },
        {
            "app_name": "app4", "app_priority": 5,
            "groups": [
                {
                    "group_name": "group1", "group_priority": 7,
                    "hosts": [
                        {"host_name": "host1", "host_priority": 5},
                        {"host_name": "host2", "host_priority": 6}
                    ]
                },
                {
                    "group_name": "group2", "group_priority": 2,
                    "hosts": [
                        {"host_name": "host1", "host_priority": 2},
                        {"host_name": "host2", "host_priority": 3}
                    ]
                },
                {
                    "group_name": "group3", "group_priority": 9,
                    "hosts": [
                        {"host_name": "host1", "host_priority": 1}
                    ]
                },
                {
                    "group_name": "group4", "group_priority": 2,
                    "hosts": [
                        {"host_name": "host1", "host_priority": 4},
                        {"host_name": "host2", "host_priority": 7}
                    ]
                }
            ]
        },
        {
            "app_name": "app5", "app_priority": 5,
            "groups": [
                {
                    "group_name": "group1", "group_priority": 8,
                    "hosts": [
                        {"host_name": "host1", "host_priority": 3}
                    ]
                },
                {
                    "group_name": "group2", "group_priority": 8,
                    "hosts": [
                        {"host_name": "host1", "host_priority": 5},
                        {"host_name": "host2", "host_priority": 5}
                    ]
                }
            ]
        }
    ]

def my_apps():
    return [
        {
            "app_name": "app1", "app_priority": 2, "group_pipeline_type": "serial",  # 组间串行
            "groups": [
                {
                    "group_name": "group1", "group_serial_priority": 5, "my_hosts_pipeline_type": "parallel",
                    "hosts": [
                        {"host_name": "host1", "host_serial_priority": 3},
                        {"host_name": "host2", "host_serial_priority": 7},
                        {"host_name": "host3", "host_serial_priority": 1}
                    ]
                },
                {
                    "group_name": "group2", "group_serial_priority": 2, "my_hosts_pipeline_type": "serial",
                    "hosts": [
                        {"host_name": "host1", "host_serial_priority": 4},
                        {"host_name": "host2", "host_serial_priority": 9}
                    ]
                }
            ]
        },
        {
            "app_name": "app2", "app_priority": 2, "group_pipeline_type": "parallel",
            "groups": [
                {
                    "group_name": "group1", "group_serial_priority": 8, "my_hosts_pipeline_type": "serial",
                    "hosts": [
                        {"host_name": "host1", "host_serial_priority": 2},
                        {"host_name": "host2", "host_serial_priority": 5},
                        {"host_name": "host3", "host_serial_priority": 6}
                    ]
                }
            ]
        },
        {
            "app_name": "app3", "app_priority": 4, "group_pipeline_type": "serial",
            "groups": [
                {
                    "group_name": "group1", "group_serial_priority": 3, "my_hosts_pipeline_type": "parallel",
                    "hosts": [
                        {"host_name": "host1", "host_serial_priority": 1},
                        {"host_name": "host2", "host_serial_priority": 8},
                        {"host_name": "host3", "host_serial_priority": 4},
                        {"host_name": "host4", "host_serial_priority": 7}
                    ]
                },
                {
                    "group_name": "group2", "group_serial_priority": 6, "my_hosts_pipeline_type": "serial",
                    "hosts": [
                        {"host_name": "host1", "host_serial_priority": 9},
                        {"host_name": "host2", "host_serial_priority": 2}
                    ]
                },
                {
                    "group_name": "group3", "group_serial_priority": 1, "my_hosts_pipeline_type": "parallel",
                    "hosts": [
                        {"host_name": "host1", "host_serial_priority": 3}
                    ]
                }
            ]
        },
        {
            "app_name": "app4", "app_priority": 5, "group_pipeline_type": "parallel",
            "groups": [
                {
                    "group_name": "group1", "group_serial_priority": 7, "my_hosts_pipeline_type": "serial",
                    "hosts": [
                        {"host_name": "host1", "host_serial_priority": 5},
                        {"host_name": "host2", "host_serial_priority": 6}
                    ]
                },
                {
                    "group_name": "group2", "group_serial_priority": 4, "my_hosts_pipeline_type": "parallel",
                    "hosts": [
                        {"host_name": "host1", "host_serial_priority": 2},
                        {"host_name": "host2", "host_serial_priority": 3}
                    ]
                },
                {
                    "group_name": "group3", "group_serial_priority": 9, "my_hosts_pipeline_type": "serial",
                    "hosts": [
                        {"host_name": "host1", "host_serial_priority": 1}
                    ]
                },
                {
                    "group_name": "group4", "group_serial_priority": 2, "my_hosts_pipeline_type": "parallel",
                    "hosts": [
                        {"host_name": "host1", "host_serial_priority": 4},
                        {"host_name": "host2", "host_serial_priority": 7}
                    ]
                }
            ]
        },
        {
            "app_name": "app5", "app_priority": 5, "group_pipeline_type": "serial",
            "groups": [
                {
                    "group_name": "group1", "group_serial_priority": 1, "my_hosts_pipeline_type": "parallel",
                    "hosts": [
                        {"host_name": "host1", "host_serial_priority": 3}
                    ]
                },
                {
                    "group_name": "group2", "group_serial_priority": 8, "my_hosts_pipeline_type": "serial",
                    "hosts": [
                        {"host_name": "host1", "host_serial_priority": 2},
                        {"host_name": "host2", "host_serial_priority": 5}
                    ]
                }
            ]
        }
    ]
    pass

def parallel_or_one_unit(income_node, act_list):
    """创建并行网关开始，汇聚网关结束的pipeline片段"""
    """新版并行单元构建方法"""
    if not act_list:
        return income_node

    if len(act_list) == 1:
        income_node.extend(act_list[0])
        return act_list[0]

    pg = ParallelGateway()
    cg = ConvergeGateway()

    (income_node.extend(pg)
     .connect(*act_list)
     .to(pg)
     .converge(cg))

    return cg

# def sorted_and_grouped(items, priority_key, ascending=True):
#     """按指定优先级字段排序并分组"""
#     sorted_items = sorted(items, key=lambda x: x[priority_key], reverse=not ascending)
#     grouped = []
#     for key, group in groupby(sorted_items, key=lambda x: x[priority_key]):
#         grouped.append((key, list(group)))
#     return grouped
def sorted_and_grouped(items, priority_key, ascending=True):
    sorted_items = sorted(items, key=lambda x: x[priority_key], reverse=not ascending)
    return [(k, list(g)) for k, g in groupby(sorted_items, lambda x: x[priority_key])]

# def build_group_subprocess(groups):
#     """可以运行，但group并行没实现"""
#     start = EmptyStartEvent()
#     current = start
#
#     group_priority_groups = sorted_and_grouped(groups, 'group_priority', ascending=True)
#
#     for _, group_batch in group_priority_groups:
#         for group in group_batch:
#             host_groups = sorted_and_grouped(group['hosts'], 'host_priority', ascending=True)
#
#             for _, hosts in host_groups:
#                 activities = [
#                     ServiceActivity(
#                         component_code='host_component',
#                         name=f"{group['group_name']}_{h['host_name']}"
#                     ) for h in hosts
#                 ]
#                 if activities:
#                     current = parallel_or_one_unit(current, activities)
#
#     end = EmptyEndEvent()
#     current.extend(end)
#     return {
#         'start': start,
#         'pipeline': builder.build_tree(start)
#     }

# def build_group_subprocess(groups):
#     start = EmptyStartEvent()
#     current = start
#
#     # 按group_priority升序分组
#     group_priority_groups = sorted_and_grouped(groups, 'group_priority', ascending=True)
#
#     for _, group_batch in group_priority_groups:
#         group_act_list = []
#         for group in group_batch:
#             # 构建当前group的处理流程
#             group_start = None
#             group_current = None
#
#             # 按host_priority升序分组
#             host_priority_groups = sorted_and_grouped(group['hosts'], 'host_priority', ascending=True)
#             for host_priority, hosts in host_priority_groups:
#                 # 创建ServiceActivity列表
#                 activities = [
#                     ServiceActivity(
#                         component_code='host_component',
#                         name=f"{group['group_name']}_{h['host_name']}"
#                     ) for h in hosts
#                 ]
#                 if not activities:
#                     continue
#
#                 # 处理当前host批次
#                 if group_start is None:
#                     # 第一个host批次，初始化group流程
#                     if len(activities) == 1:
#                         group_start = activities[0]
#                         group_current = activities[0]
#                     else:
#                         pg = ParallelGateway()
#                         cg = ConvergeGateway()
#                         pg.connect(*activities).to(cg)
#                         group_start = pg
#                         group_current = cg
#                 else:
#                     # 后续host批次，连接到当前流程
#                     if len(activities) == 1:
#                         group_current.extend(activities[0])
#                         group_current = activities[0]
#                     else:
#                         pg = ParallelGateway()
#                         cg = ConvergeGateway()
#                         group_current.extend(pg)
#                         pg.connect(*activities).to(cg)
#                         group_current = cg
#
#             if group_start is not None:
#                 group_act_list.append(group_start)
#
#         # 将当前批次的groups合并到主流程
#         if group_act_list:
#             current = parallel_or_one_unit(current, group_act_list)
#
#     # 连接结束事件
#     end = EmptyEndEvent()
#     current.extend(end)
#     return {
#         'start': start,
#         'pipeline': builder.build_tree(start)
#     }
def build_group_subprocess(groups):
    start = EmptyStartEvent()
    current = start

    # 按group_priority升序分组处理groups
    group_priority_groups = sorted_and_grouped(groups, 'group_priority', ascending=True)

    for _, group_batch in group_priority_groups:
        # 收集当前批次所有groups的处理流程起始节点
        batch_act_list = []
        for group in group_batch:
            group_activities = []
            group_current = None
            # 按host_priority升序分组处理hosts
            host_priority_groups = sorted_and_grouped(group['hosts'], 'host_priority', ascending=True)
            for _, hosts in host_priority_groups:
                # 创建ServiceActivity
                activities = [
                    ServiceActivity(
                        component_code='host_component',
                        name=f"{group['group_name']}_{h['host_name']}"
                    ) for h in hosts
                ]
                if not activities:
                    continue
                # 生成当前host批次的流程
                if not group_activities:
                    # 首个host批次，直接添加到group流程
                    group_activities.extend(activities)
                    group_current = activities[-1]
                else:
                    # 后续批次，连接到当前流程
                    group_current = parallel_or_one_unit(group_current, [activities])[-1]
            if group_activities:
                # 记录该group的起始节点
                batch_act_list.append(group_activities)

        # 将当前批次的groups用parallel_or_one_unit处理
        if batch_act_list:
            current = parallel_or_one_unit(current, batch_act_list)

    end = EmptyEndEvent()
    current.extend(end)
    return {
        'start': start,
        'pipeline': builder.build_tree(start)
    }

def gen_group_pipeline(groups):
    start = EmptyStartEvent()
    # 按group_priority升序分组
    batch_groups = sorted_and_grouped(groups, 'group_priority', ascending=True)

    #上一个node记录
    before_bat_group_tail = None
    bat_group_head = None
    # 遍历的每批次groups，与上一个批次串联
    for batch_prior, one_batch_groups in batch_groups:
        #1.当前批次groups开始,确定batch_group的head
        # 当前prior批次只有1个group
        if len(one_batch_groups) == 1:
            #如果前一个node为空,则为首个bat,由start开始
            if before_bat_group_tail is None:
                bat_group_head = start
            else:
                bat_group_head = before_bat_group_tail
        else:
            # 当前prior批次有多个group,group间并行
            pg = ParallelGateway(name="bat_group_"+str(batch_prior))
            # 如果前一个node为空,则为首个bat,由start开始
            if before_bat_group_tail is None:
                bat_group_head = start.extend(pg)
            else:
                bat_group_head = before_bat_group_tail.extend(pg)
            pass

        #2.当前批次group的body进行处理,记录每个group的tail list
        group_tail_node_list = []
        # 当前groups批次，循环内，每个group中的多个host
        for group in  one_batch_groups:
            hosts_in_batch = sorted_and_grouped(group['hosts'], 'host_priority', ascending=True)
            #当前1个group内仅会有1串hosts,遍历每个hosts批次：
            one_group_host_tail = bat_group_head
            for _, hosts in hosts_in_batch:
                # 当前批次hosts
                activities = list()
                for _h in hosts:
                    act = ServiceActivity(component_code='app_publish_sops_exec',name=f"{group['group_name']}({batch_prior})_{_h['host_name']}({_})")
                    act.component.inputs.sops_task_vars_name = Var(type=Var.PLAIN, value=_h["sops_task"]["vars"]["name"])
                    act.component.inputs.sops_task = Var(type=Var.PLAIN, value=_h["sops_task"])
                    activities.append(act)
                    pass
                # activities = [
                #     ServiceActivity(
                #         component_code='pipe_example_component',
                #         name=f"{group['group_name']}({batch_prior})_{h['host_name']}({_})"
                #     ) for h in hosts
                # ]
                if activities:
                    # 1次hosts和2次hosts进行拼接
                    one_group_host_tail = parallel_or_one_unit(one_group_host_tail, activities)


            # bat_group_head connect host_head
            group_tail_node_list.append(one_group_host_tail)
            pass

        #3.当前批次group,确定batch_group的tail
        if len(one_batch_groups) == 1:
            # 当前prior批次只有1个group,
            bat_group_tail = group_tail_node_list[0]
        else:
            # 当前prior批次有多个group,需要用cg汇聚
            cg = ConvergeGateway(name="bat_group_"+str(batch_prior))
            bat_group_head.converge(cg)  # 必须是个pg对象
            bat_group_tail = cg
            pass

        #4.当前批次group结束,当前tail是下一轮批次的head,记录到before中,准备下一轮循环使用
        before_bat_group_tail = bat_group_tail


        pass

    # 5.批次group全部遍历完成,组合最后主流程尾部
    end = EmptyEndEvent()
    before_bat_group_tail.extend(end)

    return {
        'start': start,
        'pipeline': builder.build_tree(start)
    }

def generate_pipeline(apps):
    start_event = EmptyStartEvent()
    current_node = start_event

    app_groups = sorted_and_grouped(apps, 'app_priority', ascending=True)

    for _, app_batch in app_groups:
        app_acts = []
        for app in app_batch:
            subproc = gen_group_pipeline(app['groups'])
            app_acts.append(
                SubProcess(
                    name=app['app_name'] + "(" + str(_) + ")",
                    start=subproc['start'],
                    data=Data(inputs={'${sub_input}': Var(type=Var.PLAIN, value=1)})
                )
            )

        current_node = parallel_or_one_unit(current_node, app_acts)

    end_event = EmptyEndEvent()
    current_node.extend(end_event)
    return builder.build_tree(start_event)
# def generate_pipeline(apps):
#     start_event = EmptyStartEvent()
#     current_node = start_event
#
#     # 按应用优先级分组排序
#     app_groups = sorted_and_grouped(apps, 'app_priority', ascending=True)
#
#     for app_priority, app_group in app_groups:
#         app_acts = []
#         for app in app_group:
#             logger.info(f"    App[{app['app_name']}],Priority[{app['app_priority']}]")
#             # 创建应用子流程
#             app_sub_start = EmptyStartEvent()
#             app_sub_current = app_sub_start
#
#             # 按组优先级排序
#             sorted_groups = sorted(app['groups'], key=lambda g: g['group_priority'])
#
#             # 按组顺序处理host
#             for group in sorted_groups:
#                 # 按host优先级分组排序
#                 host_groups = sorted_and_grouped(group['hosts'], 'host_priority', ascending=True)
#
#                 # 生成host节点并连接
#                 for host_priority, hosts in host_groups:
#                     activities = [
#                         ServiceActivity(
#                             component_code='host_component',
#                             name=f"{app['app_name']}_g{group['group_name']}_h{h['host_name']}"
#                         ) for h in hosts
#                     ]
#                     if activities:
#                         app_sub_current = parallel_pipe_unit(app_sub_current, activities)
#
#             # 完成应用子流程
#             app_sub_end = EmptyEndEvent()
#             app_sub_current.extend(app_sub_end)
#             app_subprocess = SubProcess(builder.build_tree(app_sub_start))
#             app_acts.append(app_subprocess)
#
#         # 连接应用子流程到主流程
#         current_node = parallel_pipe_unit(current_node, app_acts)
#
#     # 连接结束事件
#     end_event = EmptyEndEvent()
#     current_node.extend(end_event)
#     return builder.build_tree(start_event)

def build_parallel_groups_tree1(groups):
    """
    组间并行,组里host串/并
    :return:
    """
    groups1 = [
        {
            "group_name": "group1","group_serial_priority": 5,"my_hosts_pipeline_type": "parallel",
            "hosts": [
                {"host_name": "host1", "host_serial_priority": 3},
                {"host_name": "host2", "host_serial_priority": 7},
                {"host_name": "host3", "host_serial_priority": 1}
            ]
        }
    ]
    groups =[
        {
            "group_name": "group1","group_serial_priority": 5,"my_hosts_pipeline_type": "serial",
            "hosts": [
                {"host_name": "host1", "host_serial_priority": 3},
                {"host_name": "host2", "host_serial_priority": 7},
                {"host_name": "host3", "host_serial_priority": 1}
            ]
        },
        {
            "group_name": "group2","group_serial_priority": 2,"my_hosts_pipeline_type": "parallel",
            "hosts": [
                {"host_name": "host2", "host_serial_priority": 9}
            ]
        },
        {
            "group_name": "group3","group_serial_priority": 8,"my_hosts_pipeline_type": "serial",
            "hosts": [
                {"host_name": "host1", "host_serial_priority": 2},
                {"host_name": "host2", "host_serial_priority": 5},
                {"host_name": "host3", "host_serial_priority": 6}
            ]
        },
        {
            "group_name": "group4","group_serial_priority": 3,"my_hosts_pipeline_type": "parallel",
            "hosts": [
                {"host_name": "host1", "host_serial_priority": 1},
                {"host_name": "host2", "host_serial_priority": 8},
                {"host_name": "host3", "host_serial_priority": 4},
                {"host_name": "host4", "host_serial_priority": 7}
            ]
        },
        {
            "group_name": "group5","group_serial_priority": 6,"my_hosts_pipeline_type": "serial",
            "hosts": [
                {"host_name": "host2", "host_serial_priority": 2}
            ]
        }
    ]
    start_event = EmptyStartEvent()
    end_event = EmptyEndEvent()

    # 1. 遍历组
    main_converge_gw = ConvergeGateway()
    processed_groups = []
    for group in groups:
        group_name = group["group_name"]
        my_hosts_pipeline_type = group["my_hosts_pipeline_type"]
        my_hosts = group["hosts"]
        print(f"处理组: { group['group_name'] }, 并行")
        if my_hosts_pipeline_type == "serial":
            # 如果host是串行，按 host_serial_priority 排序
            sorted_hosts = sorted(my_hosts, key=lambda x: x["host_serial_priority"])
            sorted_seri_acts = []
            prev_act = None
            for host in sorted_hosts:
                print(f"  串行处理主机: {host['host_name']} (优先级: {host['host_serial_priority']})")
                act = ServiceActivity(component_code='pipe_example_component',
                                      name=str(group['group_name']) + '_act_p' + str(host['host_serial_priority']))
                sorted_seri_acts.append(act)
                if prev_act:
                    prev_act.extend(act)
                else:
                    first_act = act  # 记录入口节点
                prev_act = act

            # 记录组入口和出口
            processed_groups.append({
                "entry": first_act,
                "exit": sorted_seri_acts[-1]
            })
            pass
        elif my_hosts_pipeline_type == "parallel":
            if len(my_hosts) == 1:
                print(f"  串行处理主机: {host['host_name']} ")
                # 并行组但 hosts=1：直接创建单个 Activity
                host = my_hosts[0]
                act = ServiceActivity(
                    component_code="pipe_example_component",
                    name=f"{group_name}_{host['host_name']}"
                )
                # entry 和 exit 均为 act，无需网关
                processed_groups.append({"entry": act, "exit": act})
            else:
                for host in my_hosts:
                    print(f"  并行处理主机: {host['host_name']} ")
                # 并行组 hosts>1：创建子并行网关和汇聚网关
                parallel_gw = ParallelGateway(name=f"{group_name}_")
                converge_gw = ConvergeGateway()
                activities = [
                    ServiceActivity(
                        component_code="pipe_example_component",
                        name=f"{group_name}_{host['host_name']}"
                    ) for host in my_hosts
                ]
                # 连接子并行网关到所有 Activity，再汇聚到子汇聚网关
                parallel_gw.connect(*activities)
                for act in activities:
                    act.extend(converge_gw)
                processed_groups.append({"entry": parallel_gw, "exit": converge_gw})
    # 将所有组的出口连接到主汇聚网关
    # for group in processed_groups:
    #     group["exit"].extend(main_converge_gw)

    # 连接组间并行网关
    if len(processed_groups) > 1:
        main_parallel_gw = ParallelGateway()
        start_event.extend(main_parallel_gw)
        main_parallel_gw.connect(*[group["entry"] for group in processed_groups])\
            .to(main_parallel_gw).converge(main_converge_gw).extend(end_event)
    else:
        # 仅一个组时直接连接
        start_event.extend(processed_groups[0]["entry"]).tail().extend(end_event)

    # 主流程收尾
    # main_converge_gw.extend(end_event)
    print()
    # 构建管道
    pipeline = builder.build_tree(start_event)
    pprint(pipeline)
    return {"pipeline": pipeline, "start_event_obj":start_event}

def build_serial_groups_tree1(groups):
    """
    生成组间串行,组内host串/并
    :param groups: List<group_name,group_serial_priority,my_hosts_pipeline_type,hosts:List<> >
    :return: pipeline对象
    """
    groups = [
        {
            "group_name": "group2", "group_serial_priority": 2, "my_hosts_pipeline_type": "parallel",
            "hosts": [
                {"host_name": "host2", "host_serial_priority": 9}
            ]
        },
        {
            "group_name": "group5", "group_serial_priority": 6, "my_hosts_pipeline_type": "serial",
            "hosts": [
                {"host_name": "host1", "host_serial_priority": 9},
            ]
        }
    ]
    groups1 =[
        {
            "group_name": "group1","group_serial_priority": 5,"my_hosts_pipeline_type": "serial",
            "hosts": [
                {"host_name": "host1", "host_serial_priority": 3},
                {"host_name": "host2", "host_serial_priority": 7},
                {"host_name": "host3", "host_serial_priority": 1}
            ]
        },
        {
            "group_name": "group2","group_serial_priority": 2,"my_hosts_pipeline_type": "parallel",
            "hosts": [
                {"host_name": "host1", "host_serial_priority": 4},
                {"host_name": "host2", "host_serial_priority": 9}
            ]
        },
        {
            "group_name": "group3","group_serial_priority": 8,"my_hosts_pipeline_type": "serial",
            "hosts": [
                {"host_name": "host1", "host_serial_priority": 2},
                {"host_name": "host2", "host_serial_priority": 5},
                {"host_name": "host3", "host_serial_priority": 6}
            ]
        },
        {
            "group_name": "group4","group_serial_priority": 3,"my_hosts_pipeline_type": "parallel",
            "hosts": [
                {"host_name": "host1", "host_serial_priority": 1},
                {"host_name": "host2", "host_serial_priority": 8},
                {"host_name": "host3", "host_serial_priority": 4},
                {"host_name": "host4", "host_serial_priority": 7}
            ]
        },
        {
            "group_name": "group5","group_serial_priority": 6,"my_hosts_pipeline_type": "serial",
            "hosts": [
                {"host_name": "host1", "host_serial_priority": 9},
                {"host_name": "host2", "host_serial_priority": 2}
            ]
        }
    ]
    # 2. 按 group_serial_priority 对组排序
    sorted_groups = sorted(groups, key=lambda x: x["group_serial_priority"])
    start_event = EmptyStartEvent()
    end_event = EmptyEndEvent()
    # 3. 遍历排序后的组
    start_in_act = None  # 由start事件实例的act对象
    for group_idx, group in enumerate(sorted_groups):
        print(f"处理组: { group['group_name'] },"
              f"prior: {str(group['group_serial_priority'])}, ")
        my_hosts = group.get("hosts")
        if group["my_hosts_pipeline_type"] == "serial":
            # 如果host是串行，按 host_serial_priority 排序
            sorted_hosts = sorted(my_hosts, key=lambda x: x["host_serial_priority"])
            sorted_seri_acts = []
            for host in sorted_hosts:
                print(f"  串行处理主机: {host['host_name']} (优先级: {host['host_serial_priority']})")
                act = ServiceActivity(component_code='pipe_example_component',
                                      name=str(group['group_name']) + '_act_p' + str(host['host_serial_priority']))
                sorted_seri_acts.append(act)
            # 判定有序的group所在当前group位置
            if group_idx == 0:
                # 如果是第一个组，连接到 start_event
                start_in_act = serial_unit(start_event,sorted_seri_acts)
            else:
                # 不是第一个group,将上一次迭代操作后,保存的包含start头和act身体的对象start_in_act作为head与sorted_seri_acts串联
                start_in_act = serial_unit(start_in_act,sorted_seri_acts)
            pass
        else:
            # 如果host是并行，直接处理
            parallel_acts = []
            for host in my_hosts:
                print(f"  并行处理主机: {host['host_name']} (优先级: {host['host_serial_priority']})")
                act = ServiceActivity(component_code='pipe_example_component',
                                      name=str(group['group_name']) + '_act_p' + str(host['host_serial_priority']))
                parallel_acts.append(act)
            # 判定有序的group所在当前group位置
            if group_idx == 0:
                # 如果是第一个组，连接到 start_event
                start_in_act = parallel_unit(start_event, parallel_acts)
            else:
                # 不是第一个group,将上一次迭代操作后,保存的包含start头和act身体的对象start_in_act作为head,与并行单元串联
                start_in_act = parallel_unit(start_in_act, parallel_acts)
            pass
        pass
    #start拼接"串/并"act身体后,最终与end连接
    start_in_act.extend(end_event)

    # 构建管道
    pipeline = builder.build_tree(start_event)
    pprint(pipeline)
    return {"pipeline": pipeline, "start_event_obj":start_event}

def group_host_pipe():
    # 组间串行,组内host串行
    host1 = {
        "group_name":"group1","group_pipeline_type":"serial","group_serial_priority":1,
        "host_name":"host1","host_pipeline_type":"serial","host_serial_priority": 1}
    host2 = {
        "group_name":"group1","group_pipeline_type":"serial","group_serial_priority":1,
        "host_name":"host2","host_pipeline_type":"serial","host_serial_priority": 2}
    host3 = {
        "group_name":"group1","group_pipeline_type":"serial","group_serial_priority":1,
        "host_name":"host3","host_pipeline_type":"serial","host_serial_priority": 4}
    # 组间串行,组内host并行
    host4 = {
        "group_name":"group2","group_pipeline_type":"serial","group_serial_priority":3,
        "host_name":"host4","host_pipeline_type":"parallel","host_serial_priority": 4}
    host5 = {
        "group_name":"group2","group_pipeline_type":"serial","group_serial_priority":3,
        "host_name":"host5","host_pipeline_type":"parallel","host_serial_priority": -3}

    host_list = [host1,host2,host3,host5]

    # 1. 按 group_name 分组
    groups = defaultdict(list)
    for host in host_list:
        group_name = host["group_name"]
        groups[group_name].append(host)

    # 2. 按 group_serial_priority 对组排序
    # groups.items() 为 [ ('group1', [host1, host2, host3]), ('group2', [host4, host5]) ]
    # x代表每一个元祖,x[1] 是 host数组, x[1][0]是host第一个元素
    sorted_groups = sorted(groups.items(), key=lambda x: x[1][0]["group_serial_priority"])
    start_event = EmptyStartEvent()
    end_event = EmptyEndEvent()
    # 3. 遍历排序后的组
    start_in_act = None  # 由start事件实例的act对象
    for group_idx, (group_name, hosts_in_group) in enumerate(sorted_groups):

        print(f"处理组: {group_name},"
              f"prior: {str(hosts_in_group[0]['group_serial_priority'])}, {hosts_in_group[0]['group_pipeline_type']}")

        # 判断组内主机的 pipeline_type
        if hosts_in_group[0]["host_pipeline_type"] == "serial":
            # 如果host是串行，按 host_serial_priority 排序
            sorted_hosts = sorted(hosts_in_group, key=lambda x: x["host_serial_priority"])
            sorted_seri_acts = []
            for host in sorted_hosts:
                print(f"  串行处理主机: {host['host_name']} (优先级: {host['host_serial_priority']})")
                act = ServiceActivity(component_code='pipe_example_component',
                                       name=str(group_name) + '_act_p' + str(host['host_serial_priority']))
                sorted_seri_acts.append(act)

            # # 将组内的活动按顺序连接
            # for i in range(1, len(sorted_seri_acts)):
            #     sorted_seri_acts[i - 1].extend(sorted_seri_acts[i])
            # # 记录当前组的第一个和最后一个活动
            # current_group_first_act = sorted_seri_acts[0] if sorted_seri_acts else None
            # current_group_last_act = sorted_seri_acts[-1] if sorted_seri_acts else None

            # 判定有序的group所在当前group位置
            if group_idx == 0:
                # 如果是第一个组，连接到 start_event
                start_in_act = serial_unit(start_event,sorted_seri_acts)
            else:
                # 不是第一个group,将上一次迭代操作后,保存的包含start头和act身体的对象start_in_act作为head与sorted_seri_acts串联
                start_in_act = serial_unit(start_in_act,sorted_seri_acts)

        else:
            # 如果host是并行，直接处理
            parallel_acts = []
            for host in hosts_in_group:
                print(f"  并行处理主机: {host['host_name']} (优先级: {host['host_serial_priority']})")
                act = ServiceActivity(component_code='pipe_example_component',
                                      name=str(group_name) + '_act_p' + str(host['host_serial_priority']))
                parallel_acts.append(act)
            # 判定有序的group所在当前group位置
            if group_idx == 0:
                # 如果是第一个组，连接到 start_event
                start_in_act = parallel_unit(start_event, parallel_acts)
            else:
                # 不是第一个group,将上一次迭代操作后,保存的包含start头和act身体的对象start_in_act作为head,与并行单元串联
                start_in_act = parallel_unit(start_in_act, parallel_acts)

    #start拼接"串/并"act身体后,最终与end连接
    start_in_act.extend(end_event)

    # 构建管道
    pipeline = builder.build_tree(start_event)
    pprint(pipeline)
    return {"pipeline": pipeline}


def group_in_serial_by_priority():
    """
    多个组间并行,组内act按优先级
    group（node）:并行/串行+串行prior, host（process）:并行/串行+串行prior
    :return:
    """
    from collections import defaultdict
    from typing import Dict, List, Optional, Union
    from itertools import groupby
    from functools import reduce

    #gourp（node）:并行/串行+串行prior, host（process）:并行/串行+串行prior

    # 创建一些示例 JobTask 对象
    job_task1 = JobTask(1,
                        {"topo_level_info": {"node_key_field": "node1", "process_info": {"process": {"priority": 1}}}})
    job_task2 = JobTask(2,
                        {"topo_level_info": {"node_key_field": "node1", "process_info": {"process": {"priority": 2}}}})
    # 与 id2 重复,实际2/3/4都为p2,只生成了1个act-p2
    job_task3 = JobTask(3,
                        {"topo_level_info": {"node_key_field": "node1", "process_info": {"process": {"priority": 2}}}})
    job_task4 = JobTask(4,
                        {"topo_level_info": {"node_key_field": "node1", "process_info": {"process": {"priority": 2}}}})
    job_task5 = JobTask(5,
                        {"topo_level_info": {"node_key_field": "node1", "process_info": {"process": {"priority": 3}}}})
    job_tasks = [job_task1, job_task2, job_task3, job_task4, job_task5]

    job_tasks_gby_node_key: Dict[Optional[Union[str, int]], List[JobTask]] = defaultdict(list)
    #按node_key_field分组
    # 返回 job_tasks_gby_node_key ,dict=(node1=[job...],node2=[],node3=[])
    for job_task in job_tasks:
        # 定义这个变量,类型可能是Str或int (py3写法,类似java)
        one_group_name: Optional[Union[str, int]] =job_task.extra_data["topo_level_info"].get("node_key_field")
        job_tasks_gby_node_key[one_group_name].append(job_task)
        pass

    groups = []
    weights = 1  # 正序,升序, weights = -1 时为逆序
    # for job_tasks_under_node_key in job_tasks_gby_node_key.values():
    for node_key, job_tasks_under_node_key in job_tasks_gby_node_key.items():
        # 每node组里的成员：按照 priority 优先级进行排序
        ordered_job_tasks = sorted(
            job_tasks_under_node_key,
            key=lambda x: weights * x.extra_data["topo_level_info"]["process_info"]["process"]["priority"],
        )
        # 每node组里的成员：按priority进行分组, 返回 grouped_job_tasks : {1:task_list,2:task_list}
        grouped_job_tasks = groupby(
            ordered_job_tasks, lambda x: x.extra_data["topo_level_info"]["process_info"]["process"]["priority"]
        )
        # 每node组里的成员：对分组结构进行遍历 {1:task_list,2:task_list,3:...}
        ordered_activities: List[ServiceActivity] = []
        for priority, priority_job_tasks in grouped_job_tasks:
            # 优先级priority为N的,组中的成员:
            logger.info(f"[{node_key}] creating pipeline with priority[{priority}]")
            for job_task in priority_job_tasks:
                logger.info(f"  [{job_task.extra_data}]")
                pass

            # activities: List[ServiceActivity] = []
            act1 = ServiceActivity(component_code='pipe_example_component',
                                   name=str(node_key) + '_act1_p' + str(priority))

            # 过滤掉None后,使用List的extend功能,将每个activities_List成员串联起来,组成汇总的ordered_activities_List
            ordered_activities.append(act1)
            pass

        # 迭代ordered_activities,按顺序首次取出2个成员,l代表act1,r代表act2,执行act1.extend(act2),下次取出第3号成员act1_2.extend(a3).
        reduce(lambda l, r: l.extend(r), ordered_activities)
        groups.append(ordered_activities[0])
        pass

    start_event = EmptyStartEvent()
    end_event = EmptyEndEvent()
    #判定 node组的数量,多个的使用并行网关生成,1个的无需
    if len(groups) > 1:
        # node组内按优先级排序
        parallel_gw = ParallelGateway()
        converge_gw = ConvergeGateway()
        start_event.extend(parallel_gw).connect(*groups).to(parallel_gw).converge(converge_gw).extend(end_event)
        pass

    if len(groups) == 1:
        grp_act = groups[0]
        start_event.extend(grp_act).tail().extend(end_event)
        pass

    pipeline = builder.build_tree(start_event)
    pprint(pipeline)
    return {"pipeline": pipeline}

def add_pipe_demo():
    """
                StartEvent
                     |
               ParallelGateway
                     |
            -------------------
            |        | ..n... |
          prio=1   prio=3   prio=2n
           act1    act1    act1
            |        |        |
          prio=1   prio=3   prio=2n
           act2    act2     ct2
            |        |        |
          prio=2     |      prio=3n
           act1      |      act1
            |        |        |
          prio=2     |      prio=3n
           act2      |      act2
            |        |        |
            -------------------
                     |
               ConvergeGateway
                     |
                  EndEvent
    :return:
    """
    from collections import defaultdict
    from typing import Dict, List, Optional, Union
    from itertools import groupby
    from functools import reduce

    # 创建一些示例 JobTask 对象
    job_task1 = JobTask(1,
                        {"topo_level_info": {"node_key_field": "node1", "process_info": {"process": {"priority": 1}}}})
    job_task2 = JobTask(2,
                        {"topo_level_info": {"node_key_field": "node2", "process_info": {"process": {"priority": 2}}}})
    job_task3 = JobTask(3,
                        {"topo_level_info": {"node_key_field": "node1", "process_info": {"process": {"priority": 2}}}})
    job_task4 = JobTask(4,
                        {"topo_level_info": {"node_key_field": "node3", "process_info": {"process": {"priority": 2}}}})
    job_task5 = JobTask(5,
                        {"topo_level_info": {"node_key_field": "node2", "process_info": {"process": {"priority": 3}}}})
    # 这个与id=2重复,生成act时,存在问题，实际p2存在1次只生成1套.,p2的job_task对象无论有多少个,p2都只有1和act1和act2
    job_task6 = JobTask(6,
                        {"topo_level_info": {"node_key_field": "node2", "process_info": {"process": {"priority": 2}}}})
    job_tasks = [job_task1, job_task2, job_task3, job_task4, job_task5, job_task6]

    # 定义job_tasks_gby_node_key变量为Dict类型,dict的key是str或int，value是list
    # defaultdict工厂,可以让get.key时不报错
    job_tasks_gby_node_key: Dict[Optional[Union[str, int]], List[JobTask]] = defaultdict(list)
    #按node_key_field分组,k为node_key,v为job_task列表
    # 返回 job_tasks_gby_node_key ,dict=(node1=[job...],node2=[],node3=[])
    for job_task in job_tasks:
        # 定义这个变量,类型可能是Str或int (py3写法,类似java)
        one_group_name: Optional[Union[str, int]] =job_task.extra_data["topo_level_info"].get("node_key_field")
        job_tasks_gby_node_key[one_group_name].append(job_task)
        pass

    # 若后续涉及db对象,则加入事务 with transaction.atomic():
    # job_tasks_gby_node_key循环迭代,遍历每个node key小组
    # job_tasks_under_node_key：在每个node下的job_tasks成员
    sub_processes = []
    weights = 1 #正序,升序, weights = -1 时为逆序
    # for job_tasks_under_node_key in job_tasks_gby_node_key.values():
    for node_key,job_tasks_under_node_key in job_tasks_gby_node_key.items():
        # 每node组里的成员：按照 priority 优先级进行排序
        ordered_job_tasks = sorted(
            job_tasks_under_node_key,
            key=lambda x: weights * x.extra_data["topo_level_info"]["process_info"]["process"]["priority"],
        )
        # 每node组里的成员：按priority进行分组, 返回 grouped_job_tasks : {1:task_list,2:task_list}
        grouped_job_tasks = groupby(
            ordered_job_tasks, lambda x: x.extra_data["topo_level_info"]["process_info"]["process"]["priority"]
        )
        # 每node组里的成员：对分组结构进行遍历 {1:task_list,2:task_list,3:...}
        ordered_activities: List[ServiceActivity] = []
        for priority, priority_job_tasks in grouped_job_tasks:
            # 优先级priority为N的,组中的成员:
            logger.info(f"[{node_key}] creating pipeline with priority[{priority}]")
            for job_task in priority_job_tasks:
                logger.info(f"  [{job_task.extra_data}]")
                pass

            #activities: List[ServiceActivity] = []
            act1 = ServiceActivity(component_code='pipe_example_component', name=str(node_key)+'_act1_p' + str(priority))
            act2 = ServiceActivity(component_code='pipe_example_component', name=str(node_key)+'_act2_p' + str(priority))
            activities = [act1,act2]
            # 过滤掉None后,使用List的extend功能,将每个activities_List成员串联起来,组成汇总的ordered_activities_List
            ordered_activities.extend(list(filter(None, activities)))
            pass

        # 迭代ordered_activities,按顺序首次取出2个成员,l代表act1,r代表act2,执行act1.extend(act2),下次取出第3号成员act1_2.extend(a3).
        reduce(lambda l, r: l.extend(r), ordered_activities)
        sub_processes.append(ordered_activities[0])
        pass
    start_event = EmptyStartEvent()
    parallel_gw = ParallelGateway()
    converge_gw = ConvergeGateway()
    end_event = EmptyEndEvent()
    start_event.extend(parallel_gw).connect(*sub_processes).to(parallel_gw).converge(converge_gw).extend(end_event)

    pipeline = builder.build_tree(start_event)
    pprint(pipeline)
    return {"pipeline": pipeline}

def create_many_subproc(data):
    # 假设你有一个活动列表
    act_list = [
        ServiceActivity(component_code='pipe_example_component', name='build_sub_name1'),
        ServiceActivity(component_code='pipe_example_component', name='build_sub_name2'),
        # ... 更多的活动
    ]

    subproc_start = EmptyStartEvent()
    subproc_end = EmptyEndEvent()

    # 创建 串行
    subproc = None
    for index, act in enumerate(act_list):
        if index == 0:
            # start节点连接第一个活动
            subproc = subproc_start.extend(act)
        else:
            # 后续活动，通过 extend 方法添加
            subproc = subproc.extend(act)

    # 添加结束事件并返回 SubProcess
    subproc.extend(subproc_end)

    subproc = SubProcess(start=subproc_start, data=data)
    return subproc

def sub_process(data):
    subproc_start = EmptyStartEvent()
    subproc_act1 = ServiceActivity(component_code='pipe_example_component', name='build_sub_name')
    subproc_act2 = ServiceActivity(component_code='pipe_example_component', name='build_sub_name2')
    subproc_end = EmptyEndEvent()

    subproc_start.extend(subproc_act1).extend(subproc_act2).extend(subproc_end)

    #subproc_act.component.inputs.sub_input = Var(type=Var.SPLICE,   value='${sub_input}')

    return SubProcess(start=subproc_start, data=data)


def build_pipeline_tree():
    start = EmptyStartEvent()
    act_1 = ServiceActivity(component_code='pipe_example_component', name='act_1')
    end = EmptyEndEvent()

    sub_pipeline_data_1 = Data(inputs={'${sub_input}': Var(type=Var.PLAIN, value=1)})
    act_sub_proc1 = create_many_subproc(sub_pipeline_data_1)

    start.extend(act_1).extend(act_sub_proc1).extend(end)

    pipeline = builder.build_tree(start)
    pprint(pipeline)
    return pipeline


def inject_constants(pipeline_tree: dict):
    """
    通常由web生成时存在,但为非前端构造/builder生成pipeline时,需注入constants
    1个pipeline_tree有1套constants,为全局流程变量,组件参数可以引用
    在pipeline_inst创建时需要,否则执行dispatch任务时,web pipeline转换器报错
    :out: dict包含 'constants': {},
    """
    if "constants" not in pipeline_tree:
        pipeline_tree["constants"]=dict()
        # for sub
        for act_id, act in pipeline_tree[PE.activities].items():
            # act["template_node_id"] = act.get("template_node_id") or act_id
            if act[PE.type] == PE.SubProcess:
                if "pipeline_tree" in act:
                    # inject_template_node_id(act["pipeline_tree"])
                    # 操作pipeline对象增加constant
                    inject_constants(act["pipeline_tree"])
                    pass
                if "pipeline" in act:
                    # inject_template_node_id(act["pipeline"])
                    # 操作pipeline对象增加constant
                    inject_constants(act["pipeline"])
                    pass
        pass

    pass


def inject_outputs(pipeline_tree: dict):
    """
    通常由web生成时存在outputs:[],但当非前端构造/builder生成pipeline时,注入outputs
    :out: dict包含 'outputs': [],
    """
    if "outputs" not in pipeline_tree:
        pipeline_tree["outputs"] = []
        # for sub
        for act_id, act in pipeline_tree[PE.activities].items():
            if act[PE.type] == PE.SubProcess:
                if "pipeline_tree" in act:
                    inject_outputs(act["pipeline_tree"])
                    pass
                if "pipeline" in act:
                    inject_outputs(act["pipeline"])
                    pass
        pass

    pass


def inject_component_data(pipeline_tree: dict):
    """
    builder而来时,考虑从act.component.inputs.（dict）中 获取value,在注入data
    'component': {'code': 'pipe_example_component','inputs': {}}, ==>
    'component': {'code': 'pipe_example_component','data':{},'inputs': {}},
    :return:
    """
    for act_id, act in pipeline_tree[PE.activities].items():
        if act[PE.type] == PE.ServiceActivity:
            if "data" not in act["component"]:
                act["component"]["data"] = {}
                for k in act["component"]["inputs"]:
                    entry = {"hook": False, "need_render": False, "value": act["component"]["inputs"][k].get("value")}
                    act["component"]["data"][k] = entry
                    pass
                # act["component"]["data"] = {}
                pass
            pass
        if act[PE.type] == PE.SubProcess:
            if "pipeline_tree" in act:
                inject_component_data(act["pipeline_tree"])
                pass
            if "pipeline" in act:
                inject_component_data(act["pipeline"])
                pass
    pass



def create_inst_task(test3dict):

    # 使用builder创建tree,区别:此tree缺少constants(必须,若无start时会报错)、template_node_id
    # test3dict=build_pipeline_tree()

    #给pipeline的dict注入2个必须的key
    inject_constants(test3dict)
    inject_outputs(test3dict)
    inject_component_data(test3dict)
    # 为每个act注入template_node_id, dict入参后被增加key
    inject_template_node_id(test3dict)
    pprint(test3dict)
    params = dict(
        name="myname2",
        # pipeline_tree=TEST_PIPELINE_TREE_WITH_SUB_INST_TREE,
        pipeline_tree=test3dict,
        description=""
    )
    project = Project.objects.get(id=1)
    logger.info(
        "[API] fast_create_task info, project_id: {project_id}, params: {params}".format(
            project_id=project.id, params=params
        )
    )
    try:
        pipeline_tree = params["pipeline_tree"]
        pipeline_template_data = {
            "name": standardize_name(params["name"], TASK_NAME_MAX_LENGTH),
            "creator": "api-yzg",
            "description": params.get("description", ""),
        }
    except (KeyError, ValueError) as e:
        return {"result": False, "message": "invalid params: %s" % str(e), "code": err_code.REQUEST_PARAM_INVALID.code}


    pipeline_instance, id_maps = PipelineInstance.objects.create_instance(
        None, pipeline_tree, spread=True, **pipeline_template_data
    )
    # create node in instance
    NodeInInstance.objects.create_nodes_in_instance(pipeline_instance, pipeline_tree)

    taskflow_kwargs = {
        "project": project,
        "pipeline_instance": pipeline_instance,
        "template_source": ONETIME,
        "create_method": TaskCreateMethod.API.value,
        "category":"OpsTools",
        "flow_type":"common",
        "current_flow": "execute_task"
    }

    task = TaskFlowInstance.objects.create(**taskflow_kwargs)
    return {
        "result": True,
        "data": {"task_id": task.id, "task_url": task.url, "pipeline_tree": task.pipeline_tree},
        "code": err_code.SUCCESS.code,
    }


def start_inst_task(task_id):
    username = 'admin'
    project = Project.objects.get(id=1)
    # task_id = 18

    queue, routing_key = PrepareAndStartTaskQueueResolver(
        settings.API_TASK_QUEUE_NAME_V2
    ).resolve_task_queue_and_routing_key()

    prepare_and_start_task.apply_async(
        kwargs=dict(task_id=task_id, project_id=project.id, username=username), queue=queue, routing_key=routing_key
    )

    task_url = TaskFlowInstance.task_url(project_id=project.id, task_id=task_id)

    return {
        "task_url": task_url,
        "data": {"task_url": task_url},
        "result": True,
        "code": err_code.SUCCESS.code,
    }


def fast_create_task():

    params = dict(
        name="myname",
        pipeline_tree=TEST_PIPELINE_TREE,
        description=""
    )
    project = Project.objects.get(id=1)
    logger.info(
        "[API] fast_create_task info, project_id: {project_id}, params: {params}".format(
            project_id=project.id, params=params
        )
    )

    try:
        pipeline_tree = params["pipeline_tree"]
        pipeline_instance_kwargs = {
            "name": standardize_name(params["name"], TASK_NAME_MAX_LENGTH),
            "creator": "api-yzg",
            "pipeline_tree": pipeline_tree,
            "description": params.get("description", ""),
        }
    except (KeyError, ValueError) as e:
        return {"result": False, "message": "invalid params: %s" % str(e), "code": err_code.REQUEST_PARAM_INVALID.code}

    #has_common_subprocess = params.get("has_common_subprocess", False)
    try:
        template = (
            TaskTemplate(pipeline_template=None)
        )
        pipeline_instance = TaskFlowInstance.objects.create_pipeline_instance(
            template=template, **pipeline_instance_kwargs
        )


    except PipelineException as e:
        message = "[API] fast_create_task create pipeline error: %s" % str(e)
        logger.exception(message)
        return {"result": False, "message": message, "code": err_code.UNKNOWN_ERROR.code}

    taskflow_kwargs = {
        "project": project,
        "pipeline_instance": pipeline_instance,
        "template_source": ONETIME,
        "create_method": TaskCreateMethod.API.value,
        "category":"OpsTools",
        "flow_type":"common",
        "current_flow": "execute_task"
    }

    task = TaskFlowInstance.objects.create(**taskflow_kwargs)
    return {
        "result": True,
        "data": {"task_id": task.id, "task_url": task.url, "pipeline_tree": task.pipeline_tree},
        "code": err_code.SUCCESS.code,
    }
