# # -*- coding: utf-8 -*-
# from datetime import timedelta
# from jms.ods.tab.tab_barscan_other import jms_ods__tab_barscan_other
# from utils.operators.spark_sql_operator import SparkSqlOperator
# 
# jms_dwd__dwd_barscan_other_dt = SparkSqlOperator(
#     task_id='jms_dwd__dwd_barscan_other_dt',
#     email='chenhongping@yl-scm.com',
#     master='yarn',
#     name='jms_dwd__dwd_barscan_other_dt_{{ execution_date | date_add(1) | cst_ds }}',
#     sql='jms/dwd/tab/dwd_barscan_other_dt/prior_actual_sign.hql',
#     pool_slots=3,
#     executor_cores=2,
#     executor_memory='3G',
#     num_executors=2,
#     conf={'spark.dynamicAllocation.enabled'                  : 'true',
#           'spark.shuffle.service.enabled'                    : 'true',
#           'spark.dynamicAllocation.maxExecutors'             : 20,
#           'spark.dynamicAllocation.cachedExecutorIdleTimeout': 30,
#           'spark.sql.sources.partitionOverwriteMode'         : 'dynamic',
#           'spark.executor.memoryOverhead'                    : '1G',
#           },
#     hiveconf={'hive.exec.dynamic.partition'             : 'true',  # 动态分区
#               'hive.exec.dynamic.partition.mode'        : 'nonstrict',
#               'hive.exec.max.dynamic.partitions'        : 300,  # 最大分区
#               'hive.exec.max.dynamic.partitions.pernode': 300,  # 最大分区
#               },
#     yarn_queue='pro',
#     execution_timeout=timedelta(minutes=80),
# )
# 
# jms_dwd__dwd_barscan_other_dt << jms_ods__tab_barscan_other
