# -*- coding: utf-8 -*-
# __init__.py create by kumiler
# on 2023/4/23 14:34
# desc
# from datetime import timedelta
# from utils.operators.spark_sql_operator import SparkSqlOperator
# from utils.operators.conn_oracle import OracleConnector
#
#
# user = 'uat_bigdata'
# password = 'px8BDl2c71Mz'
#
# oracleConnector = OracleConnector(ip='10.24.13.11',
#                                   port='1521',
#                                   user='uat_bigdata',
#                                   pwd='px8BDl2c71Mz')
# conn=oracleConnector.get_conn(user,password)
#
# oracleConnector.drop(conn,"jms_dwd","sys_agency_area_manager","dt","p20230419,p20230418")
# conn.close()
#
# oracle_sys_agency_area_manager = SparkSqlOperator(
#     task_id='oracle_sys_agency_area_manager',
#     task_concurrency=1,
#     pool_slots=10,
#     master='yarn',
#     name='jms_dim__dim_lq_service_site_new_base_{{ execution_date | date_add(1) | cst_ds }}',
#     sql='jms/oracle/execute.sql',
#     executor_cores=2,
#     executor_memory='3G',
#     email={'rabie.zhuang@jtexpress.com','yl_bigdata@yl-scm.com'},
#     num_executors=4,  # spark.dynamicAllocation.enabled 为 True 时，num_executors 表示最少 Executor 数
#     conf={'spark.dynamicAllocation.enabled'                  : 'true',  # 动态资源开启
#           'spark.shuffle.service.enabled'                    : 'true',  # 动态资源 Shuffle 服务开启
#           'spark.dynamicAllocation.maxExecutors'             : 10,  # 动态资源最大扩容 Executor 数
#           'spark.dynamicAllocation.cachedExecutorIdleTimeout': 60,  # 动态资源自动释放闲置 Executor 的超时时间(s)
#           'spark.sql.sources.partitionOverwriteMode'         : 'dynamic',  # 允许删改已存在的分区
#           'spark.executor.memoryOverhead'                    : '1G',  # 堆外内存
#           'spark.sql.shuffle.partitions'                     : 20,
#           'spark.debug.maxToStringFields'                    : 100,
#           'spark.sql.parquet.writeLegacyFormat'              : 'true',
#           },
#     hiveconf={'hive.exec.dynamic.partition'             : 'true',  # 动态分区
#               'hive.exec.dynamic.partition.mode'        : 'nonstrict',
#               'hive.exec.max.dynamic.partitions'        : 20,  # 每天生成 20 个分区
#               'hive.exec.max.dynamic.partitions.pernode': 20,  # 每天生成 20 个分区
#               },
#     queue='pro',
#     #execution_timeout=timedelta(hours=5)
#     #excel平均时长:0
#     execution_timeout = timedelta(minutes=30),
# )




