# -*- coding: utf-8 -*-
import pendulum
from airflow.exceptions import AirflowSkipException
from datetime import timedelta
from airflow.providers.apache.hdfs.sensors.web_hdfs import WebHdfsSensor
from utils.operators.cluster_for_spark_sql_operator  import SparkSqlOperator
from ai_ageing_cusc_hi.dm.dm_complaints_warning_indicator_hi import jms_ai_group__dm_complaints_warning_indicator_hi
from ai_ageing_cusc_hi.dm.dm_complaints_warning_hi.dm_complaints_warning_monitor_hi import send_message

cst = pendulum.timezone('Asia/Shanghai')
class WarningWebHdfsSensor(WebHdfsSensor):
    def pre_execute(self, context):
        day = cst.convert(context['ti'].execution_date) + timedelta(hours=6)

        schedule_date = ['00']

        if day.strftime('%H') in schedule_date:
            print(f'{day.strftime("%H")} in {schedule_date}, should skip')
            super().pre_execute(context)
            raise AirflowSkipException()
        else:
            print(f'{day.strftime("%H")} not in {schedule_date}, run now')
            super().pre_execute(context)

jms_dwd__dwd_work_order_base_dt = WarningWebHdfsSensor(
    pool='unlimited_pool',
    task_id="jms_dwd__dwd_work_order_base_dt",
    filepath='/dw/hive/jms_dwd.db/external/dwd_work_order_base_dt/dt={{ execution_date | hour_add(6) | date_add(-1) | cst_ds }}',
    execution_timeout=timedelta(minutes=15),
    email=['jarl.huang@jtexpress.com','yl_bigdata@yl-scm.com'],
    retries=2,
)

jms_dwd__dwd_project_work_order_base_dt = WarningWebHdfsSensor(
    pool='unlimited_pool',
    task_id="jms_dwd__dwd_project_work_order_base_dt",
    filepath='/dw/hive/jms_dwd.db/external/dwd_project_work_order_base_dt/dt={{ execution_date | hour_add(6) | date_add(-1) | cst_ds }}',
    execution_timeout=timedelta(minutes=15),
    email=['jarl.huang@jtexpress.com','yl_bigdata@yl-scm.com'],
    retries=2,
)

# jms_dwd__dwd_claim_work_order_base_hi = WarningWebHdfsSensor(
#     pool='unlimited_pool',
#     task_id="jms_dwd__dwd_claim_work_order_base_hi",
#     filepath='/dw/hive/jms_dwd.db/external/dwd_claim_work_order_base_hi/dt={{ execution_date | hour_add(6) | cst_ds }}',
#     execution_timeout=timedelta(minutes=15),
#     email=['jarl.huang@jtexpress.com','yl_bigdata@yl-scm.com'],
#     retries=2,
# )

# jms_dwd__dwd_appeal_info_base_dt = WarningWebHdfsSensor(
#     pool='unlimited_pool',
#     task_id="jms_dwd__dwd_appeal_info_base_dt",
#     filepath='/dw/hive/jms_dwd.db/external/dwd_appeal_info_base_dt/dt={{ execution_date | hour_add(6) | date_add(-1) | cst_ds }}',
#     execution_timeout=timedelta(minutes=15),
#     email=['jarl.huang@jtexpress.com','yl_bigdata@yl-scm.com'],
#     retries=2,
# )

class WarningSparkSqlOperator(SparkSqlOperator):
    def pre_execute(self, context):
        day = cst.convert(context['ti'].execution_date) + timedelta(hours=6)

        schedule_date = ['00']

        if day.strftime('%H') in schedule_date:
            print(f'{day.strftime("%H")} in {schedule_date}, should skip')
            super().pre_execute(context)
            raise AirflowSkipException()
        else:
            print(f'{day.strftime("%H")} not in {schedule_date}, run now')
            super().pre_execute(context)

url="https://bgdmapi.jtexpress.com.cn/monitor/feishu/send"
webhook="https://open.feishu.cn/open-apis/bot/v2/hook/e320e6a1-e9d9-42f3-9855-75e3ed7e64c0"

jms_ai_group__dm_complaints_warning_hi = WarningSparkSqlOperator(
    task_id='jms_ai_group__dm_complaints_warning_hi',
    email=['jarl.huang@jtexpress.com', 'yl_bigdata@yl-scm.com'],
    name='jms_ai_group__dm_complaints_warning_hi_{{ execution_date | date_add(1) | cst_hour }}',
    email_on_retry=True,
    retries=0,
    sql='ai_ageing_cusc_hi/dm/dm_complaints_warning_hi/execute.sql',
    yarn_queue='pro',
    driver_memory='4G',
    driver_cores=4,
    executor_memory='16G',
    executor_cores=4,
    pool_slots=2,
    pool='unlimited_pool',
    num_executors=50,  # spark.dynamicAllocation.enabled 为 True 时，num_executors 表示最少 Executor 数
    conf={'spark.executor.memoryOverhead': '5G',
          'spark.dynamicAllocation.enabled': 'true',
          'spark.shuffle.service.enabled': 'true',
          'spark.dynamicAllocation.maxExecutors': 60,
          'spark.sql.sources.partitionOverwriteMode': 'dynamic',
          'spark.dynamicallocation.enabled': 'true',
          'spark.dynamicAllocation.cachedExecutorIdleTimeout': 120,
          'spark.sql.shuffle.partitions': 600,
          'spark.port.maxRetries': 100,
          'spark.locality.wait.node': '100ms'
          },
    hiveconf={'hive.exec.dynamic.partition': 'true',
              'hive.exec.dynamic.partition.mode': 'nonstrict',
              'hive.exec.max.dynamic.partitions.pernode': 5000,
              'hive.exec.max.dynamic.partitions': 30000
              },
    execution_timeout=timedelta(hours=2),
    priority_weight=20,
    on_success_callback=send_message(url, webhook, "任务执行成功", "任务执行成功！"),
    on_failure_callback=send_message(url, webhook, "任务执行失败", "任务执行失败，请及时处理！", "red")
)  # type: SparkSqlOperator

jms_ai_group__dm_complaints_warning_hi << [
    jms_ai_group__dm_complaints_warning_indicator_hi,
    jms_dwd__dwd_work_order_base_dt,
    jms_dwd__dwd_project_work_order_base_dt,
    # jms_dwd__dwd_claim_work_order_base_hi,
    # jms_dwd__dwd_appeal_info_base_dt
]
