# -*- coding: utf-8 -*-
import pendulum
from datetime import timedelta
from utils.operators.cluster_for_spark_sql_operator import SparkSqlOperator
from airflow.exceptions import AirflowSkipException
from jms_hi.dm.dm_center_transfer_summary_new_dt import jms_dm__dm_center_transfer_summary_new_dt
from airflow.providers.apache.hdfs.sensors.web_hdfs import WebHdfsSensor

cst = pendulum.timezone('Asia/Shanghai')

class BranchRichSqlSensor(SparkSqlOperator):

    def pre_execute(self, context):
        day = cst.convert(context['ti'].execution_date) + timedelta(hours=1)  #小时任务

        schedule_date = ['03']   #三点开始跑

        if day.strftime('%d') not in schedule_date:
            print(f'{day.strftime("%d")} not in {schedule_date}, should skip')
            super().pre_execute(context)
            raise AirflowSkipException()
        else:
            print(f'{day.strftime("%d")} in {schedule_date}, run now')
            super().pre_execute(context)


#Hive表名
hive_table = "dm_center_transfer_summary_dt"
#任务名
task_id = f"jms_dm__{hive_table}"

jms_dm__dm_center_transfer_summary_dt = BranchRichSqlSensor(
    task_id='jms_dm__dm_center_transfer_summary_dt',
    task_concurrency=1,
    pool_slots=2,
    master='yarn',
    name=f'{task_id}_{{{{ execution_date | date_add(1) | cst_ds }}}}',
    sql=f'jms_hi/dm/dm_center_transfer_summary_dt/execute.sql',
    retries=0,
    pool='spmi_piece',
    email=['wangmenglei@jtexpress.com', 'yl_bigdata@yl-scm.com'],
    execution_timeout=timedelta(hours=1),
    yarn_queue='pro',

    driver_memory='null' , 
    driver_cores=null , 
    executor_cores=2 , 
    executor_memory='2G' , 
    num_executors=2 , 
    conf={'spark.dynamicAllocation.enabled': 'true',  # 动态资源开启
          'spark.shuffle.service.enabled': 'true',  # 动态资源 Shuffle 服务开启
        'spark.dynamicAllocation.maxExecutors'             : 2 , 
          'spark.dynamicAllocation.cachedExecutorIdleTimeout': 120,  # 动态资源自动释放闲置 Executor 的超时时间(s)
          'spark.sql.sources.partitionOverwriteMode': 'dynamic',  # 允许删改已存在的分区
        'spark.executor.memoryOverhead'             : '2G' , 
          'spark.sql.shuffle.partitions': 200,
          },
)

jms_dm__dm_center_transfer_summary_dt << [
    jms_dm__dm_center_transfer_summary_new_dt
]
