# -*- coding: utf-8 -*-
from datetime import timedelta

from utils.operators.spark_submit import SparkSubmitOperator

from utils.operators.latest_only_bash_operator import LatestOnlyBashOperator

jms_dwd__dwd_statistics_transfer_detail_dt = SparkSubmitOperator(
    task_id='jms_dwd__dwd_statistics_transfer_detail_dt',
    email=['wangmenglei@jtexpress.com','yl_bigdata@yl-scm.com'],
    name='jms_dwd__dwd_statistics_transfer_detail_dt_{{ execution_date | date_add(1) | cst_ds }}',
    pool_slots=5,
    driver_memory='8G',
    driver_cores=2,
    executor_memory='4G',
    executor_cores=2,
    num_executors=100,
    conf={'spark.executor.memoryOverhead': 2048,
          'spark.core.connection.ack.wait.timeout': 300,
          'spark.default.parallelism': 600,
          'spark.locality.wait': 60,
          },
    hiveconf={'hive.exec.dynamic.partition': 'true',
              'hive.exec.dynamic.partition.mode': 'nonstrict',
              'hive.exec.max.dynamic.partitions': 300,
              'hive.exec.max.dynamic.partitions.pernode': 300,
              },
    jars='hdfs:///scheduler/jms/spark/chk/report/statistics_transfer_data/common-1.0-SNAPSHOT.jar',  # 依赖 jar 包
    java_class='com.yunlu.bigdata.jobs.export.SqlTestData',  # spark 主类
    application='hdfs:///scheduler/jms/spark/chk/report/statistics_transfer_data/original-jobs-1.0-SNAPSHOT.jar',
    # spark jar 包
    application_args=['{{ execution_date | cst_ds }}'],  # 参数dt 跑数据当天
    execution_timeout=timedelta(hours=6),
)

upload__dwd_statistics_transfer_detail_dt = LatestOnlyBashOperator(
    task_id="upload__dwd_statistics_transfer_detail_dt",
    email=['wangmenglei@jtexpress.com','yl_bigdata@yl-scm.com'],
    pool='unlimited_pool',
    bash_command='jms/oss/dwd_statistics_transfer_detail_dt/upload_dwd_statistics_transfer_detail_dt.sh',
)

# 设置依赖
upload__dwd_statistics_transfer_detail_dt << jms_dwd__dwd_statistics_transfer_detail_dt
