from datetime import timedelta
from utils.operators.cluster_for_spark_sql_operator import SparkSqlOperator
from jms.dwd.dwd_netwrok_metric_dictionary_dt import jms_dwd__dwd_netwrok_metric_dictionary_dt


jms_dm__dm_sqs_summary_net_dt = SparkSqlOperator(
    task_id='jms_dm__dm_sqs_summary_net_dt',
    email=['jarl.huang@jtexpress.com', 'yl_bigdata@yl-scm.com'],
    name='jms_dm__dm_sqs_summary_net_dt_{{ execution_date | date_add(1) | cst_ds }}',
    sql='jms/dm/dm_sqs_summary_net_dt/execute.sql',
    pool_slots=1,
    executor_cores=2 , 
    executor_memory='1G' , 
    driver_memory='3G' , 
    num_executors=3 , 
    conf={
        'spark.dynamicAllocation.maxExecutors'             : 4 , 
          'spark.sql.sources.partitionOverwriteMode': 'dynamic',
          'spark.dynamicallocation.enabled': 'true',
          'spark.dynamicAllocation.cachedExecutorIdleTimeout': 120,
          'spark.shuffle.service.enabled': 'true',
          'spark.sql.shuffle.partitions': 600
          },
    hiveconf={'hive.exec.dynamic.partition': 'true',
              'hive.exec.dynamic.partition.mode': 'nonstrict',
              'hive.exec.max.dynamic.partitions.pernode': 200,
              'hive.exec.max.dynamic.partitions': 200
              },
    yarn_queue='pro',
    execution_timeout=timedelta(minutes=60),
)

jms_dm__dm_sqs_summary_net_dt << [
    jms_dwd__dwd_netwrok_metric_dictionary_dt,
]


