# -*- coding: utf-8 -*-
from datetime import timedelta
from airflow.providers.apache.hdfs.sensors.web_hdfs import WebHdfsSensor
from utils.alerts.yl_threeSegCodeOnFailue import yl_threeSegCodeOnFailure
from utils.alerts.yl_threeSegCodeOnSuccess import yl_threeSegCodeOnSuccess

from jms.aigroup.ai_group.geo_info_day import jms_aigroup__geo_info_day
from utils.operators.cluster_for_spark_sql_operator import SparkSqlOperator

# def kwargs():
#     kwargs = {
#         "db": "ai_group",
#         "table": "geo_info_day",
#         "desc": "爬虫经纬度geo数据",
#         "taskid": "10160",
#         "ifprivacy": 0,
#         "warnignore": 0,
#     }
#     return kwargs

# ai_dw__geo_info_day = WebHdfsSensor(
#     pool='unlimited_pool',
#     task_id="ai_dw__geo_info_day",
#     filepath='/user/oozie-apps/hive/daily/done_flag/warehouse/ai_group/three_code/geo_info_day/{{ execution_date | cst_ds_nodash }}',
#     execution_timeout=timedelta(hours=7),
#     email=['liyuxian@yl-scm.com', 'songjun@yl-scm.com'],
#     retries=2,
#     # on_success_callback=yl_threeSegCodeOnSuccess(kwargs(), dingding_conn_id="dingding_ThreeSeg_etl_info"),
#     # on_failure_callback=yl_threeSegCodeOnFailure(kwargs(), dingding_conn_id="dingding_ThreeSeg_etl_alert"),
# )

ai_dw__geo_info_day = SparkSqlOperator(
    task_id='ai_dw__geo_info_day',
    email=['yushuo@jtexpress.com','yl_bigdata@yl-scm.com'],
    conn_id='spark_default',
    pool_slots=4,
    task_concurrency=1,  # 如果任务不支持并发，则将 task_concurrency 设为 1
    name='ai_dw__geo_info_day_{{ execution_date | date_add(1) | cst_ds }}',
    email_on_retry=True,
    sql='jms/aigroup/ai_dw/geo_info_day/execute.sql',
    yarn_queue='pyspark',
    driver_memory='4G',
    executor_memory='8G',
    executor_cores=2,
    num_executors=30,  # spark.dynamicAllocation.enabled 为 True 时，num_executors 表示最少 Executor 数
    conf={'spark.executor.memoryOverhead': '2G',
          'spark.dynamicAllocation.enabled': 'true',
          'spark.shuffle.service.enabled': 'true',
          #'spark.dynamicAllocation.maxExecutors': 100,
          'spark.dynamicAllocation.maxExecutors': 50,
          'spark.sql.sources.partitionOverwriteMode': 'dynamic',
          'spark.dynamicallocation.enabled': 'true',
          'spark.dynamicAllocation.cachedExecutorIdleTimeout': 120,
          'spark.sql.shuffle.partitions': 600
          },
    hiveconf={'hive.exec.dynamic.partition': 'true',
              'hive.exec.dynamic.partition.mode': 'nonstrict',
              'hive.exec.max.dynamic.partitions.pernode': 5000,
              'hive.exec.max.dynamic.partitions': 30000
              },
    execution_timeout=timedelta(hours=1),
    # on_success_callback=yl_threeSegCodeOnSuccess(kwargs(), dingding_conn_id="dingding_ThreeSeg_etl_info"),
    # on_failure_callback=yl_threeSegCodeOnFailure(kwargs(), dingding_conn_id="dingding_ThreeSeg_etl_alert"),

)

ai_dw__geo_info_day << jms_aigroup__geo_info_day