# -*- coding: utf-8 -*-
from datetime import timedelta
from utils.operators.spark_submit import SparkSubmitOperator
from ..dm_traffic_flow_details import  jms_dm__dm_traffic_flow_details_dt

jms_dm__dm_traffic_flow_network_dt = SparkSubmitOperator(
    task_id='jms_dm__dm_traffic_flow_network_dt',
    email=['hejian@yl-scm.com','yl_bigdata@yl-scm.com'],
    name='jms_dm__dm_traffic_flow_network_dt_{{ execution_date | date_add(1) | cst_ds }}',
    # sla=timedelta(hours=7),
    driver_memory='2G',
    executor_memory='16G',
    executor_cores=4,
    num_executors=40,
    pool_slots=5,
    conf={
          'spark.default.parallelism'                        : 340,
          'spark.sql.shuffle.partitions'                     : 340
          },
    #jars='hdfs:///user/spark/work/dm/hejian/dm_traffic_flow_network/common-1.0-SNAPSHOT.jar',  # 依赖 jar 包
    jars='hdfs:///scheduler/jms/spark/hj/dm_traffic_flow_network/common-1.0-SNAPSHOT.jar',  # 依赖 jar 包
    java_class='com.yunlu.bigdata.jobs.export.TrafficFlowNetworkNew',  # spark 主类
    #application='hdfs:///user/spark/work/dm/hejian/dm_traffic_flow_network/original-jobs-1.0-SNAPSHOT.jar',  # spark jar 包
    application='hdfs:///scheduler/jms/spark/hj/dm_traffic_flow_network/original-jobs-1.0-SNAPSHOT.jar',  # spark jar 包
    application_args=['{{ execution_date | cst_ds }}'],
    execution_timeout=timedelta(hours=3),
)

jms_dm__dm_traffic_flow_network_dt << jms_dm__dm_traffic_flow_details_dt