# -*- coding: utf-8 -*-
from datetime import timedelta
from utils.operators.spark_submit import SparkSubmitOperator

t_sub_2_dt = '{{ execution_date | date_add(-60) | cst_ds }}'

ex_tbl_list = '["spmi_dwd.dwd_spmi_opt_refund_bill_base_dt","spmi_dwd.dwd_spmi_apack_bill_new_dt","spmi_dwd.dwd_spmi_tof_refund_bill_base_dt","spmi_dwd.dwd_spmi_piece_bill_new_dt","spmi_dwd.dwd_spmi_refund_bill_base_dt","jms_dwd.dwd_merger_small_files_info_dt"]'


json = """{
    "mode":"database",
    "db_or_tbWithDB":"spmi_dwd",
    "numThreads":20,
    "env":"pro",
    "excludeTbList":ex_tbl_list,
    "realTimeHdfsLocations":[],
    "saveMergerInfo":1,
    "continueDays":2,
    "topN":20,
    "mergerInfoInsertTbName":"jms_dwd.dwd_merger_small_files_info_dt",
    "partitionStart":"start_date",
    "partitionEnd":"end_date"
}""".replace("start_date",t_sub_2_dt)\
    .replace("end_date",t_sub_2_dt)\
    .replace("ex_tbl_list",ex_tbl_list)

spmi_dwd_merger = SparkSubmitOperator(
    task_id='spmi_dwd_merger',
    email=['lukunming@jtexpress.com','yl_bigdata@yl-scm.com'],
    name='spmi_dwd_merger_{{ execution_date | date_add(1) | cst_ds }}',
    pool_slots=4,
    retries=1,
    execution_timeout=timedelta(hours=2),
    driver_cores=4,
    driver_memory='32G',
    executor_cores=4,
    executor_memory='8G',
    num_executors=50,
    conf={'spark.dynamicAllocation.enabled': 'true',
          'spark.shuffle.service.enabled': 'true',
          'spark.dynamicAllocation.maxExecutors': 100,
          'spark.dynamicAllocation.cachedExecutorIdleTimeout': 60,
          'spark.sql.sources.partitionOverwriteMode': 'dynamic',
          'spark.executor.memoryOverhead': '2G',
          'spark.sql.shuffle.partitions': 600,
          'spark.yarn.maxAppAttempts': 1,
          },
    jars='hdfs:///scheduler/jms/spark/wangbiao/small_files_merger/common-1.0-SNAPSHOT.jar',  # 依赖 jar 包
    java_class='com.yunlu.bigdata.jobs.export.mergerSmallFile.SmallFileMergerTest4',  # spark 主类
    application='hdfs:///scheduler/jms/spark/wangbiao/small_files_merger/original-jobs-1.0-SNAPSHOT.jar',  # spark jar 包
    yarn_queue='pro',
    application_args=[json,],
)
