# -*- coding: utf-8 -*-
# __init__.py create by kumiler
# on 2022/11/7 15:44
# desc


from datetime import timedelta
from jms.ods.mysql.claim_work_order import jms_ods__claim_work_order
from utils.operators.cluster_for_spark_sql_operator import SparkSqlOperator

jms_dwd__dwd_claim_work_order_new_base_dt = SparkSqlOperator(
    task_id='jms_dwd__dwd_claim_work_order_new_base_dt',
    email= ['lukunming@jtexpress.com','yl_bigdata@yl-scm.com'],
    master='yarn',
    name='jms_dwd__dwd_claim_work_order_new_base_dt_{{ execution_date | date_add(1) | cst_ds }}',
    sql='jms/dwd/sqs/work_order_duplicate/dwd_claim_work_order_new_base_dt/execute.sql',
    pool_slots=2,
    executor_cores=2 , 
    executor_memory='2G' , 
    num_executors=2 , 
    conf={'spark.dynamicAllocation.enabled'                  : 'true',
          'spark.shuffle.service.enabled'                    : 'true',
        'spark.dynamicAllocation.maxExecutors'             : 2 , 
          'spark.dynamicAllocation.cachedExecutorIdleTimeout': 30,
          'spark.sql.sources.partitionOverwriteMode'         : 'dynamic',
        'spark.executor.memoryOverhead'             : '2G' , 
          },
    yarn_queue='pro',
    execution_timeout=timedelta(minutes=30),
)

jms_dwd__dwd_claim_work_order_new_base_dt << jms_ods__claim_work_order

