# -*- coding: utf-8 -*-
from datetime import timedelta
from jms.time_sensor.time_after_05_00 import time_after_05_00
from jms.dim.dim_lmdm_sys_network import jms_dim__dim_lmdm_sys_network
from jms.ods.tab.tab_barscan_taking import jms_ods__tab_barscan_taking
from jms.ods.tab.tab_barscan_warehousing import jms_ods__tab_barscan_warehousing
from jms.ods.tab.tab_barscan_bagging import jms_ods__tab_barscan_bagging
from jms.ods.tab.tab_barscan_unloading import jms_ods__tab_barscan_unloading
from jms.ods.tab.tab_barscan_arrival import jms_ods__tab_barscan_arrival
from jms.ods.tab.tab_barscan_loading import jms_ods__tab_barscan_loading
from jms.ods.tab.tab_barscan_send import jms_ods__tab_barscan_send
from jms.ods.tab.tab_barscan_deliver import jms_ods__tab_barscan_deliver
from jms.ods.tab.tab_barscan_sign import jms_ods__tab_barscan_sign
from jms.ods.tab.tab_barscan_collect import jms_ods__tab_barscan_collect
from jms.ods.tms.yl_tmsnew_branch_shipment_stop import jms_ods__yl_tmsnew_branch_shipment_stop
from jms.ods.tms.yl_tmsnew_tms_shipment_stop import jms_ods__yl_tmsnew_tms_shipment_stop
from utils.operators.spark_sql_operator import SparkSqlOperator

dwd_s01_whole_operations_basic_mid_dt = SparkSqlOperator(
    task_id='dwd_s01_whole_operations_basic_mid_dt',
    task_concurrency=1,
    pool_slots=18,
    master='yarn',
    execution_timeout=timedelta(hours=3),
    email=['chenhongping@yl-scm.com','yl_bigdata@yl-scm.com'],
    name='dwd_s01_whole_operations_basic_mid_dt_{{ execution_date | date_add(1) | cst_ds }}',
    sql='jms/dwd/dwd_s01_whole_operations_basic_mid_dt/execute.hql',
    driver_memory='32G',
    driver_cores=4,
    executor_cores=2,
    executor_memory='4G',
    num_executors=10,  # spark.dynamicAllocation.enabled 为 True 时，num_executors 表示最少 Executor 数
    conf={'spark.dynamicAllocation.enabled': 'true',  # 动态资源开启
          'spark.shuffle.service.enabled': 'true',  # 动态资源 Shuffle 服务开启
          'spark.dynamicAllocation.maxExecutors': 210,  # 动态资源最大扩容 Executor 数
          'spark.dynamicAllocation.cachedExecutorIdleTimeout': 210,  # 动态资源自动释放闲置 Executor 的超时时间(s)
          'spark.sql.sources.partitionOverwriteMode': 'dynamic',  # 允许删改已存在的分区
          'spark.executor.memoryOverhead': '2G',  # 堆外内存
          'spark.sql.shuffle.partitions': 3600,
          'spark.default.paralleism': 3600,
          'spark.hadoop.hive.exec.dynamic.partition.mode': 'true',
          'spark.network.timeout': 300,
          'spark.sql.autoBroadcastJoinThreshold': 104857600,
          },
    hiveconf={'hive.exec.dynamic.partition'             : 'true',  # 动态分区
              'hive.exec.dynamic.partition.mode'        : 'nonstrict',
              'hive.exec.max.dynamic.partitions'        : 35,  # 每天生成 20 个分区
              'hive.exec.max.dynamic.partitions.pernode': 35,  # 每天生成 20 个分区
              },
    yarn_queue='pro',
)

dwd_s01_whole_operations_basic_mid_dt << [
    jms_ods__tab_barscan_taking,
    jms_ods__tab_barscan_warehousing,
    jms_ods__tab_barscan_bagging,
    jms_ods__tab_barscan_unloading,
    jms_ods__tab_barscan_arrival,
    jms_ods__tab_barscan_loading,
    jms_ods__tab_barscan_send,
    jms_ods__tab_barscan_deliver,
    jms_ods__tab_barscan_sign,
    jms_ods__tab_barscan_collect,
    jms_ods__yl_tmsnew_branch_shipment_stop,
    jms_ods__yl_tmsnew_tms_shipment_stop,
    jms_dim__dim_lmdm_sys_network,
    time_after_05_00,
]
