# -*- coding: utf-8 -*-
from datetime import timedelta
from utils.operators.cluster_for_spark_sql_for_oracle_operator import Hive2OracleOperator



oracle__dm_weight_flow_daily_summary_dt = Hive2OracleOperator(
    task_id='oracle__dm_weight_flow_daily_summary_dt',
    email=['jarl.huang@jtexpress.com','yl_bigdata@yl-scm.com'],
    master='yarn',
    name='oracle__dm_weight_flow_daily_summary_dt_{{ execution_date | date_add(1) | cst_ds }}',
    pool_slots=1,
    driver_cores=1,
    driver_memory='2g',
    executor_cores=1,
    executor_memory='2G',
    num_executors=4,
    conf={'spark.dynamicAllocation.enabled': 'true',
          'spark.shuffle.service.enabled': 'true',
          'spark.dynamicAllocation.maxExecutors': 5,
          'spark.dynamicAllocation.cachedExecutorIdleTimeout': 600,
          'spark.network.timeout': 600,
          'spark.sql.sources.partitionOverwriteMode': 'dynamic',
          'spark.executor.memoryOverhead': '1G',
          'spark.sql.shuffle.partitions': 200,
          'spark.oracle.connection_key':'uat_oracle',#Oracle连接信息，airflow参数配置列表中的key值
          'spark.hive.table': 'jms_dm.dm_weight_flow_daily_summary_dt', #Hive推送表
          'spark.oracle.table': 'dm_weight_flow_daily_summary_dt', #oracle目标表
          'spark.hive.where_info': "where dt='{{ execution_date | cst_ds }}' AND STATISTIC_DATE='{{ execution_date | cst_ds }}' LIMIT 1000", #Hive推送数据分区条件
          'spark.call.sql': "begin pro_merge_temp_to_result ('dm_weight_flow_daily_summary_dt_tmp','dm_weight_flow_daily_summary_dt','{{ execution_date | date_add(-60) | cst_ds }} 00:00:00','{{ execution_date | cst_ds }} 23:59:59'); end;",#存储过程调用语句，可为空，为空则认为是全量覆盖目标表场景（如：网点资料表）【格式：begin 存储过程名(参数1,参数2,...); end;】
          'spark.oracle.target.columns': ""#自定义导入oracle字段，可为空，如果为空则按oracle表字段进行解析【格式（逗号分隔）：column1,column2,column3,...】
          },
    hiveconf={'hive.exec.dynamic.partition': 'true',
              'hive.exec.dynamic.partition.mode': 'nonstrict',
              'hive.exec.max.dynamic.partitions': 2000,
              'hive.exec.max.dynamic.partitions.pernode': 2000,
              },
    yarn_queue='pro',
    retries=0,
    execution_timeout=timedelta(minutes=5),#超时时间
    sla=timedelta(hours=2)
)