# -*- coding: utf-8 -*-
from datetime import timedelta

from utils.operators.spark_submit import SparkSubmitOperator

from jms.dm.route.dm_route_branch_in import dm__dm_route_branch_in
from jms.dm.route.dm_route_branch_out import dm__dm_route_branch_out
from jms.dm.route.dm_route_main_two import dm__dm_route_main_two

dm__dm_route_whole_huanan_base = SparkSubmitOperator(
    conn_id='spark_route',
    task_id='dm__dm_route_whole_huanan_base',
    email=['zhangqinglin@jtexpress.com','yl_bigdata@yl-scm.com'],
    pool_slots=6,
    name='dm__dm_route_whole_huanan_base',
    driver_memory='5G',
    executor_memory='10G',
    executor_cores=5,
    num_executors=110,
    conf={'spark.dynamicAllocation.enabled': 'true',  # 动态资源开启
          'spark.shuffle.service.enabled': 'true',  # 动态资源 Shuffle 服务开启
          'spark.dynamicAllocation.maxExecutors': 120,  # 动态资源最大扩容 Executor 数
          'spark.dynamicAllocation.cachedExecutorIdleTimeout': 60,  # 动态资源自动释放闲置 Executor 的超时时间(s)
          'spark.sql.sources.partitionOverwriteMode': 'dynamic',  # 允许删改已存在的分区
          'spark.executor.memoryOverhead': '4G',  # 堆外内存
          'spark.sql.shuffle.partitions': 1700,
          'spark.reducer.maxSizeInFlight':'96m',
          'spark.shuffle.consolidateFiles': 'true',
          'spark.shuffle.memoryFraction': '0.8',
          'spark.shuffle.file.buffer':'64k',
          'spark.executor.extraJavaOptions': '-XX:+UseG1GC -XX:ParallelGCThreads=5'
          },
    jars='hdfs:///route/package/graphframes-0.7.0-spark2.3-s_2.11.jar,hdfs:///route/package/common-1.0-SNAPSHOT.jar',
    java_class='com.yunlu.bigdata.jobs.route.export.WholeRouteExportReorgnized',
    application='hdfs:///route/package/original-jobs-1.0-SNAPSHOT.jar',
    application_args=['{{ execution_date | cst_ds }}', 'huanan','1500'],
    execution_timeout=timedelta(hours=3),
)

dm__dm_route_whole_huanan_base << [dm__dm_route_branch_in,
                              dm__dm_route_branch_out,
                              dm__dm_route_main_two, ]
