# -*- coding: utf-8 -*-
from datetime import timedelta
from utils.operators.cluster_for_spark_sql_operator import SparkSqlOperator
from jms_point_hi.ods.dwm_yl_rt_ssmx_bill_detail_hi import jms_ods__dwm_yl_rt_ssmx_bill_detail_hi
from jms_point_hi.dim.dim_network_whole_massage import jms_dim__dim_network_whole_massage
from airflow.operators.dummy_operator import DummyOperator

#
# jms_dwd__dwd_wide_yl_rt_ssmx_bill_detail_hf = SparkSqlOperator(
#     task_id='jms_dwd__dwd_wide_yl_rt_ssmx_bill_detail_hf',
#     email=['lukunming@jtexpress.com','yl_etl@yl-scm.com'],
#     master='yarn',
#     name='jms_dwd__dwd_wide_yl_rt_ssmx_bill_detail_hf_{{ execution_date | cst_hour }}',
#     sql='jms_point_hi/dwd/dwd_wide_yl_rt_ssmx_bill_detail_hf/execute.sql',
#     pool_slots=2,
#     driver_cores=4,
#     driver_memory='6g',
#     executor_cores=6,
#     executor_memory='12G',
#     num_executors=100,
#     conf={'spark.dynamicAllocation.enabled'                  : 'true',
#           'spark.shuffle.service.enabled'                    : 'true',
#           'spark.dynamicAllocation.maxExecutors'             : 130,
#           'spark.dynamicAllocation.cachedExecutorIdleTimeout': 600,
#           'spark.network.timeout' : 600,
#           'spark.sql.sources.partitionOverwriteMode'         : 'dynamic',
#           'spark.executor.memoryOverhead'                    : '5G',
#           'spark.sql.shuffle.partitions'                     : 1500,
#           'spark.executor.extraJavaOptions'                  : '-XX:+UseG1GC -XX:ParallelGCThreads=6',
#           'spark.shuffle.memoryFraction'                     : 0.8,
#           # 'spark.storage.memoryFraction'                     : 0.3
#           # 'spark.reducer.maxSizeInFlight': '96M',
#           'spark.shuffle.file.buffer': '64k'
#           },
#     hiveconf={'hive.exec.dynamic.partition'             : 'true',  # 动态分区
#               'hive.exec.dynamic.partition.mode'        : 'nonstrict',
#               'hive.exec.max.dynamic.partitions'        : 200,  # 最大分区
#               'hive.exec.max.dynamic.partitions.pernode': 200,  # 最大分区
#               },
#     yarn_queue='warehouse',
#     execution_timeout=timedelta(minutes=40),
#     retries=0,
#     sla=timedelta(hours=2))
#
#
# jms_dwd__dwd_wide_yl_rt_ssmx_bill_detail_hf << [jms_ods__dwm_yl_rt_ssmx_bill_detail_hi,jms_dim__dim_network_whole_massage]


jms_dwd__dwd_wide_yl_rt_ssmx_bill_detail_hf_a = SparkSqlOperator(
    task_id='jms_dwd__dwd_wide_yl_rt_ssmx_bill_detail_hf_a',
    email=['lukunming@jtexpress.com','yl_etl@yl-scm.com'],
    master='yarn',
    name='jms_dwd__dwd_wide_yl_rt_ssmx_bill_detail_hf_a_{{ execution_date | cst_hour }}',
    sql='jms_point_hi/dwd/dwd_wide_yl_rt_ssmx_bill_detail_hf/execute_a.sql',
    pool_slots=2,
    driver_cores=4,
    driver_memory='6g',
    executor_cores=6,
    executor_memory='12G',
    num_executors=80,
    conf={'spark.dynamicAllocation.enabled'                  : 'true',
          'spark.shuffle.service.enabled'                    : 'true',
          'spark.dynamicAllocation.maxExecutors'             : 90,
          'spark.dynamicAllocation.cachedExecutorIdleTimeout': 120,
          'spark.network.timeout' : 600,
          'spark.sql.sources.partitionOverwriteMode'         : 'dynamic',
          'spark.executor.memoryOverhead'                    : '4G',
          'spark.sql.shuffle.partitions'                     : 2400,
          'spark.io.compression.codec': 'org.apache.spark.io.ZStdCompressionCodec',
          'spark.executor.extraJavaOptions'                  : '-XX:+UseG1GC -XX:ParallelGCThreads=6',
          'spark.shuffle.memoryFraction'                     : 0.8,
          'spark.reducer.maxSizeInFlight': '96M',
          'spark.shuffle.file.buffer': '64k'
          # 'spark.io.compression.codec':'lz4',
          # 'spark.io.compression.lz4.blockSize':'64k'
          },
    hiveconf={'hive.exec.dynamic.partition'             : 'true',  # 动态分区
              'hive.exec.dynamic.partition.mode'        : 'nonstrict',
              'hive.exec.max.dynamic.partitions'        : 200,  # 最大分区
              'hive.exec.max.dynamic.partitions.pernode': 200,  # 最大分区
              # 'hive.merge.sparkfiles':'true',
              # 'hive.merge.smallfiles.avgsize':'64m'

              },
    yarn_queue='warehouse',
    execution_timeout=timedelta(minutes=35),
    retries=0,
    sla=timedelta(hours=2))


jms_dwd__dwd_wide_yl_rt_ssmx_bill_detail_hf_a << [jms_ods__dwm_yl_rt_ssmx_bill_detail_hi,jms_dim__dim_network_whole_massage]



jms_dwd__dwd_wide_yl_rt_ssmx_bill_detail_hf_b = SparkSqlOperator(
    task_id='jms_dwd__dwd_wide_yl_rt_ssmx_bill_detail_hf_b',
    email=['lukunming@jtexpress.com','yl_etl@yl-scm.com'],
    master='yarn',
    name='jms_dwd__dwd_wide_yl_rt_ssmx_bill_detail_hf_b_{{ execution_date | cst_hour }}',
    sql='jms_point_hi/dwd/dwd_wide_yl_rt_ssmx_bill_detail_hf/execute_b.sql',
    pool_slots=2,
    driver_cores=4,
    driver_memory='6g',
    executor_cores=6,
    executor_memory='12G',
    num_executors=80,
    conf={'spark.dynamicAllocation.enabled'                  : 'true',
          'spark.shuffle.service.enabled'                    : 'true',
          'spark.dynamicAllocation.maxExecutors'             : 90,
          'spark.dynamicAllocation.cachedExecutorIdleTimeout': 120,
          'spark.network.timeout' : 600,
          'spark.sql.sources.partitionOverwriteMode'         : 'dynamic',
          'spark.executor.memoryOverhead'                    : '4G',
          'spark.sql.shuffle.partitions'                     : 2400,
          'spark.io.compression.codec': 'org.apache.spark.io.ZStdCompressionCodec',
          'spark.executor.extraJavaOptions'                  : '-XX:+UseG1GC -XX:ParallelGCThreads=6',
          'spark.shuffle.memoryFraction'                     : 0.8,
          'spark.reducer.maxSizeInFlight': '96M',
          'spark.shuffle.file.buffer': '64k'
          # 'spark.io.compression.codec': 'lz4',
          # 'spark.io.compression.lz4.blockSize': '64k'
          },
    hiveconf={'hive.exec.dynamic.partition'             : 'true',  # 动态分区
              'hive.exec.dynamic.partition.mode'        : 'nonstrict',
              'hive.exec.max.dynamic.partitions'        : 200,  # 最大分区
              'hive.exec.max.dynamic.partitions.pernode': 200,  # 最大分区
              # 'hive.merge.sparkfiles':'true',
              # 'hive.merge.smallfiles.avgsize':'64m'

              },
    yarn_queue='warehouse',
    execution_timeout=timedelta(minutes=35),
    retries=0,
    sla=timedelta(hours=2))


jms_dwd__dwd_wide_yl_rt_ssmx_bill_detail_hf_b << [jms_ods__dwm_yl_rt_ssmx_bill_detail_hi,jms_dim__dim_network_whole_massage]

jms_dwd__dwd_wide_yl_rt_ssmx_bill_detail_hf = SparkSqlOperator(
    task_id='jms_dwd__dwd_wide_yl_rt_ssmx_bill_detail_hf',
    email=['lukunming@jtexpress.com','yl_etl@yl-scm.com'],
    master='yarn',
    name='jms_dwd__dwd_wide_yl_rt_ssmx_bill_detail_hf_{{ execution_date | cst_hour }}',
    sql='jms_point_hi/dwd/dwd_wide_yl_rt_ssmx_bill_detail_hf/execute_dep.sql',
    pool_slots=2,
    driver_cores=2,
    driver_memory='2g',
    executor_cores=2,
    executor_memory='4G',
    num_executors=2,
    conf={'spark.dynamicAllocation.enabled'                  : 'true',
          'spark.shuffle.service.enabled'                    : 'true',
          'spark.dynamicAllocation.maxExecutors'             : 5,
          'spark.dynamicAllocation.cachedExecutorIdleTimeout': 600,
          'spark.network.timeout' : 600,
          'spark.sql.sources.partitionOverwriteMode'         : 'dynamic',
          'spark.executor.memoryOverhead'                    : '4G',
          'spark.sql.shuffle.partitions'                     : 30
          },
    hiveconf={'hive.exec.dynamic.partition'             : 'true',  # 动态分区
              'hive.exec.dynamic.partition.mode'        : 'nonstrict',
              'hive.exec.max.dynamic.partitions'        : 200,  # 最大分区
              'hive.exec.max.dynamic.partitions.pernode': 200,  # 最大分区
              },
    yarn_queue='warehouse',
    execution_timeout=timedelta(minutes=5),
    retries=0,
    sla=timedelta(hours=2))

jms_dwd__dwd_wide_yl_rt_ssmx_bill_detail_hf << [
    jms_dwd__dwd_wide_yl_rt_ssmx_bill_detail_hf_a,
    jms_dwd__dwd_wide_yl_rt_ssmx_bill_detail_hf_b,
]
