# # -*- coding: utf-8 -*-
# from datetime import timedelta
# from jms.ods.tab.tab_barscan_notallow import jms_ods__tab_barscan_notallow
# from utils.operators.spark_sql_operator import SparkSqlOperator
#
# yl_ods__tab_barscan_notallow = SparkSqlOperator(
#     task_id='yl_ods__tab_barscan_notallow',
#     email='chenhongping@yl-scm.com',
#     depends_on_past=True,
#     pool_slots=1,
#     master='yarn',
#     execution_timeout=timedelta(minutes=60),
#     name='yl_ods__tab_barscan_notallow_{{ execution_date | cst_ds_nodash }}',
#     sql='jms/ods2/tab/tab_barscan_notallow/bushu.hql',
#     executor_cores=2,
#     executor_memory='3G',
#     num_executors=3,  # spark.dynamicAllocation.enabled 为 True 时，num_executors 表示最少 Executor 数
#     conf={'spark.dynamicAllocation.enabled': 'true',  # 动态资源开启
#           'spark.shuffle.service.enabled': 'true',  # 动态资源 Shuffle 服务开启
#           'spark.dynamicAllocation.maxExecutors': 8,  # 动态资源最大扩容 Executor 数
#           'spark.dynamicAllocation.cachedExecutorIdleTimeout': 60,  # 动态资源自动释放闲置 Executor 的超时时间(s)
#           'spark.sql.sources.partitionOverwriteMode': 'dynamic',  # 允许删改已存在的分区
#           'spark.executor.memoryOverhead': '1G',  # 堆外内存
#           },
#     hiveconf={'hive.exec.dynamic.partition': 'true',  # 动态分区
#               'hive.exec.dynamic.partition.mode': 'nonstrict',
#               'hive.exec.max.dynamic.partitions': 3,  # 每天生成 20 个分区
#               'hive.exec.max.dynamic.partitions.pernode': 3,  # 每天生成 20 个分区
#               },
# )
#
# yl_ods__tab_barscan_notallow << jms_ods__tab_barscan_notallow
