# -*- coding: utf-8 -*-
from datetime import timedelta

from utils.operators.cluster_for_spark_sql_operator import SparkSqlOperator
from jms.aigroup.ai_dw.yl_check_threesegcode_sample import jms_ai_dw__yl_check_threesegcode_sample
from jms.time_sensor.time_after_05_45 import time_after_05_45


ai_dw__yl_check_threesegcode_sample_day = SparkSqlOperator(
    task_id='ai_dw__yl_check_threesegcode_sample_day',
    conn_id='spark_default',
    pool_slots=2,
    task_concurrency=1,  # 如果任务不支持并发，则将 task_concurrency 设为 1
    name='ai_dw__yl_check_threesegcode_sample_day_{{ execution_date | date_add(1) | cst_ds }}',
    email=['yushuo@jtexpress.com','yl_bigdata@yl-scm.com'],
    email_on_retry=True,
    sql='jms/aigroup/ai_dw/yl_check_threesegcode_sample_day/execute.sql',
    yarn_queue='pyspark',
    driver_memory='4G',
    driver_cores=2,
    executor_memory='12G',
    executor_cores=4,
    num_executors=10,
    depends_on_past=False,
    conf={'spark.core.connection.ack.wait.timeout': 300,
          'spark.locality.wait': 60,
          'spark.dynamicAllocation.maxExecutors': 12,
          'spark.dynamicAllocation.cachedExecutorIdleTimeout': 40,
          'spark.dynamicAllocation.enabled': 'true',
          'spark.shuffle.service.enabled': 'true',  # NodeManager中一个长期运行的辅助服务，用于提升Shuffle计算性能
          'spark.executor.memoryOverhead': '2G',
          'spark.default.paralleism': '1000',  # spark.default.parallelism只有在处理RDD时有效.
          'spark.sql.shuffle.partitions': 1000,  # spark.sql.shuffle.partitions则是只对SparkSQL有效
          'spark.sql.autoBroadcastJoinThreshold': 524288000
},

    execution_timeout=timedelta(hours=1),
    # on_success_callback=yl_threeSegCodeOnSuccess(kwargs(), dingding_conn_id="dingding_ThreeSeg_etl_info"),
    # on_failure_callback=yl_threeSegCodeOnFailure(kwargs(), dingding_conn_id="dingding_ThreeSeg_etl_alert"),

)

ai_dw__yl_check_threesegcode_sample_day << [
jms_ai_dw__yl_check_threesegcode_sample
    
,time_after_05_45
]
