# # -*- coding: utf-8 -*-
# from datetime import timedelta
# from jms.dm.dm_agg_collect_aging_dt import jms_dm__dm_agg_collect_aging_dt
# from utils.operators.spark_sql_operator import SparkSqlOperator
#
# jms_dm__dm_network_collect_aging_dt = SparkSqlOperator(
#     task_id='jms_dm__dm_network_collect_aging_dt',
#     email='lukunming@jtexpress.com',
#     pool_slots=2,
#     master='yarn',
#     name='jms_dm__dm_network_collect_aging_dt_{{ execution_date | cst_ds_nodash }}',
#     sql='jms/dm/dm_network_collect_aging_dt/execute.hql',
#     executor_cores=2,
#     executor_memory='3G',
#     num_executors=5,  # spark.dynamicAllocation.enabled 为 True 时，num_executors 表示最少 Executor 数
#     conf={'spark.dynamicAllocation.enabled'                  : 'true',  # 动态资源开启
#           'spark.shuffle.service.enabled'                    : 'true',  # 动态资源 Shuffle 服务开启
#           'spark.dynamicAllocation.maxExecutors'             : 11,  # 动态资源最大扩容 Executor 数
#           'spark.dynamicAllocation.cachedExecutorIdleTimeout': 60,  # 动态资源自动释放闲置 Executor 的超时时间(s)
#           'spark.executor.memoryOverhead'                    : '1G',  # 堆外内存
#           },
#     yarn_queue='pro',
#     execution_timeout=timedelta(minutes=60),
# )
#
# jms_dm__dm_network_collect_aging_dt << jms_dm__dm_agg_collect_aging_dt
