# -*- coding: utf-8 -*-
from datetime import timedelta
from jms.aigroup.ai_dw import ai_dw__yl_ml_clean_address_new_day
from utils.operators.cluster_for_spark_sql_operator import SparkSqlOperator
from jms.aigroup import jms_aigroup__geo_info_day
from jms.time_sensor.time_after_04_30 import time_after_04_30
# def kwargs():
#     kwargs = {
#         "db": "ai_group",
#         "table": "train_sample",
#         "desc": "模型训练经纬度地址数据",
#         "taskid": "10060",
#         "ifprivacy": 0,
#         "warnignore": 0,
#     }
#     return kwargs

ai_dw__train_sample = SparkSqlOperator(
    task_id='ai_dw__train_sample',
    conn_id='spark_default',
    pool_slots=15,
    task_concurrency=1,  # 如果任务不支持并发，则将 task_concurrency 设为 1
    name='ai_dw__train_sample_{{ execution_date | date_add(1) | cst_ds }}',
    email=['yushuo@jtexpress.com','yl_bigdata@yl-scm.com'],
    email_on_retry=True,
    sql='jms/aigroup/ai_dw/train_sample/execute.sql',
    yarn_queue='pyspark',
    driver_memory='10G',
    driver_cores=2,
    executor_memory='10G',
    executor_cores=4,
    num_executors=20,
    depends_on_past=False,
    conf={'spark.core.connection.ack.wait.timeout': 300,
          'spark.locality.wait': 60,
          'spark.dynamicAllocation.maxExecutors': 40,
          'spark.dynamicAllocation.cachedExecutorIdleTimeout': 40,
          'spark.dynamicAllocation.enabled': 'true',
          'spark.shuffle.service.enabled': 'true',  # NodeManager中一个长期运行的辅助服务，用于提升Shuffle计算性能
          'spark.executor.memoryOverhead': '4G',
          'spark.sql.shuffle.partitions': 1300,  # spark.sql.shuffle.partitions则是只对SparkSQL有效
          'spark.shuffle.memoryFraction': '0.8',
          'spark.executor.extraJavaOptions': '-XX:+UseG1GC -XX:ParallelGCThreads=6'
},
    hiveconf={'hive.exec.dynamic.partition': 'true',  # 动态分区
              'hive.exec.dynamic.partition.mode': 'nonstrict',
              'hive.exec.max.dynamic.partitions': 100000,  #
              'hive.exec.max.dynamic.partitions.pernode': 1000,  #
              },
    execution_timeout=timedelta(hours=3),
    # on_success_callback=yl_threeSegCodeOnSuccess(kwargs(), dingding_conn_id="dingding_ThreeSeg_etl_info"),
    # on_failure_callback=yl_threeSegCodeOnFailure(kwargs(), dingding_conn_id="dingding_ThreeSeg_etl_alert"),

)

ai_dw__train_sample << [
    ai_dw__yl_ml_clean_address_new_day,time_after_04_30,
    jms_aigroup__geo_info_day]
