# -*- coding: utf-8 -*-
from datetime import timedelta
from utils.operators.cluster_for_spark_sql_operator import SparkSqlOperator

from jms.dim import jms_dim__dim_lmdm_sys_network
from jms.dwd.sqs.dwd_sqs_leave_message_base_dt import jms_dwd__dwd_sqs_leave_message_base_dt
from jms.dim.dim_sys_manage_region import jms_dim__dim_sys_manage_region

jms_dm__dm_cn_leave_message_regist_detail_dt = SparkSqlOperator(
    task_id='jms_dm__dm_cn_leave_message_regist_detail_dt',
    email=['jarl.huang@jtexpress.com', 'yl_bigdata@yl-scm.com'],
    name='jms_dm__dm_cn_leave_message_regist_detail_dt_{{ execution_date | date_add(1) | cst_ds }}',
    email_on_retry=True,
    retries=2,
    sql='jms/dm/dm_cn_leave_message_regist_detail_dt/execute.sql',
    yarn_queue='pro',
    driver_memory='8G',
    driver_cores=4,
    executor_memory='12G',
    executor_cores=4,
    pool_slots=2,
    pool='unlimited_pool',
    num_executors=20,  # spark.dynamicAllocation.enabled 为 True 时，num_executors 表示最少 Executor 数
    conf={'spark.executor.memoryOverhead': '5G',
          'spark.dynamicAllocation.enabled': 'true',
          'spark.shuffle.service.enabled': 'true',
          'spark.dynamicAllocation.maxExecutors': 50,
          'spark.sql.sources.partitionOverwriteMode': 'dynamic',
          'spark.dynamicallocation.enabled': 'true',
          'spark.dynamicAllocation.cachedExecutorIdleTimeout': 120,
          'spark.sql.shuffle.partitions': 200,
          'spark.default.paralleism': '200',  # spark.default.parallelism只有在处理RDD时有效.
          },
    # hiveconf={'hive.exec.dynamic.partition': 'true',
    #           'hive.exec.dynamic.partition.mode': 'nonstrict',
    #           'hive.exec.max.dynamic.partitions.pernode': 1000,
    #           'hive.exec.max.dynamic.partitions': 3000
    #           },
    execution_timeout=timedelta(minutes=30),
)

jms_dm__dm_cn_leave_message_regist_detail_dt << [
    jms_dwd__dwd_sqs_leave_message_base_dt,
    jms_dim__dim_sys_manage_region,
    jms_dim__dim_lmdm_sys_network,
]
