# -*- coding: utf-8 -*-
from utils.operators.spark_submit import SparkSubmitOperator

from jms.ods import jms_ods__tab_barscan_centersend, \
    jms_ods__yl_oms_oms_waybill, \
    jms_ods__tab_statistics, \
    jms_ods__yl_tms_branch_time_effective, \
    jms_ods__yl_oms_interceptorrecord, \
    jms_ods__tab_barscan_sitearrival, \
    jms_ods__tab_barscan_sitesend, \
    jms_ods__tab_barscan_sign, \
    jms_ods__tab_barscan_collect
from jms.dim.dim_network_whole_massage import jms_dim__dim_network_whole_massage

jms_dm__collect_analysis = SparkSubmitOperator(
    task_id='jms_dm__collect_analysis',
    email=['guoruiling@jtexpress.com','yl_bigdata@yl-scm.com'],
    pool_slots=3,
    task_concurrency=1,  # 如果任务不支持并发，则将 task_concurrency 设为 1
    name='jms_dm__collect_analysis_{{ execution_date | cst_ds }}',  # yarn 任务名称
    driver_memory='10G',
    executor_memory='4G',
    executor_cores=4,
    num_executors=12,
    jars='hdfs:///scheduler/jms/spark/zyt/collect_analysisi/common-1.0-SNAPSHOT.jar',
    application='hdfs:///scheduler/jms/spark/zyt/collect_analysisi/CollectAnalysisi.jar',
    java_class='com.yunlu.bigdata.jobs.route.export.CollectTimeAnalysis',  # spark 主类

    application_args=['{{ execution_date | cst_ds }}'],  # 参数dt
)

# 设置依赖
jms_dm__collect_analysis << [
    jms_dim__dim_network_whole_massage,
    jms_ods__tab_barscan_centersend,
    jms_ods__yl_oms_oms_waybill,
    jms_ods__tab_statistics,
    jms_ods__yl_tms_branch_time_effective,
    jms_ods__yl_oms_interceptorrecord,
    jms_ods__tab_barscan_sitearrival,
    jms_ods__tab_barscan_sitesend,
    jms_ods__tab_barscan_sign,
    jms_ods__tab_barscan_collect
]
