# -*- coding: utf-8 -*-
from datetime import timedelta
from utils.operators.spark_submit import SparkSubmitOperator
from jms.time_sensor import time_after_05_00

jdbcUrl = '{{ var.json.oracle_tab.url }}'
username = '{{ var.json.oracle_tab.username }}'
password = '{{ var.json.oracle_tab.password }}'
nowdt = '{{ execution_date | cst_ds }}'
nextdt = '{{ execution_date | date_add(1) | cst_ds }}'
table = "tab_report_errorseparatedetail"
env = '{{ var.value.env_sync }}'

jsonpara = """{
"reader":{
"connect":{
"url":"jdbcUrlpara",
"username":"usernamepara",
"password":"passwordpara",
"driver":"oracle.jdbc.driver.OracleDriver"
},
"dbtype":"oracle",
"tableName":"tablepara",
"where":"scan_time>=to_date('nowdt 00:00:00','yyyy-mm-dd hh24:mi:ss') and scan_time<to_date('nextdt 00:00:00','yyyy-mm-dd hh24:mi:ss')",
"query":"",
"splitColumn":"billcode",
"equalitySectioning":1,
"containsnull":0,
"fetchsize":"1000",
"threadNumber":1
},
"channel":{
"filterAbnormalCharacter":1
},
"writer":{
"dbtype":"hive",
"tableName":"tablepara",
"database":"jms_ods",
"writeMode": "overwrite",
"partitionColumn":"dt",
"partitionValue":"nowdt"},
"settting":{
"env":"envpara"}
}""".replace("jdbcUrlpara", jdbcUrl).replace("usernamepara", username).replace("passwordpara", password). \
    replace("nowdt", nowdt).replace("nextdt", nextdt). \
    replace("tablepara", table). \
    replace("envpara", env)

jms_ods__tab_report_errorseparatedetail__t_1 = SparkSubmitOperator(
    task_id='jms_ods__tab_report_errorseparatedetail__t_1',
    email='chenhongping@yl-scm.com',
    name='jms_ods__tab_report_errorseparatedetail__t_1_{{ execution_date | date_add(1) | cst_ds }}',
    pool='oracle_tab',
    pool_slots=2,
    execution_timeout=timedelta(hours=2),
    driver_memory='2G',
    executor_memory='3G',
    executor_cores=2,
    num_executors=1,
    conf={'spark.dynamicAllocation.enabled': 'true',
          'spark.shuffle.service.enabled': 'true',
          'spark.dynamicAllocation.maxExecutors': 2,
          'spark.dynamicAllocation.cachedExecutorIdleTimeout': 30,
          'spark.sql.sources.partitionOverwriteMode': 'dynamic',
          'spark.executor.memoryOverhead': '1G',
          },
    java_class='com.yunlu.bigdata.jobs.synchrotool.DataSynchDriver',  # spark 主类
    application='hdfs:///scheduler/jms/spark/sync/spark_sync.jar',  # spark jar 包
    application_args=[jsonpara, ],
    sla=timedelta(hours=7),
)

time_after_05_00 >> jms_ods__tab_report_errorseparatedetail__t_1
