# -*- coding: utf-8 -*-
from datetime import timedelta
from utils.operators.spark_submit import SparkSubmitOperator

jdbcUrl='{{ var.json.mysql_personas.url }}'
username='{{ var.json.mysql_personas.username }}'
password='{{ var.json.mysql_personas.password }}'
# db = "yl_css"
# host = "pro-css-bigdata-mysql.rwlb.rds.aliyuncs.com"
# jdbcUrl = 'jdbc:mysql://{host}/{db}?tinyInt1isBit=false&serverTimezone=Asia/Shanghai&useSSL=false'.format(
#     host=host, db=db
# )
# username = 'bigdata_sync'
# password = 'eAvxawdHuzofupUD'
nowdt = '{{ execution_date | cst_ds }}'
nextdt = '{{ execution_date | date_add(1) | cst_ds }}'
table = "registration_problem_piece"
env = '{{ var.value.env_sync }}'

jsonpara = """{
"reader":{
"connect":{
"url":"jdbcUrlpara",
"username":"usernamepara",
"password":"passwordpara",
"driver":"com.mysql.cj.jdbc.Driver"
},
"dbtype":"mysql",
"tableName":"tablepara",
"subTableList":"[0,31]",
"where":"update_time between str_to_date('nowdt 00:00:00', '%Y-%m-%d %H:%i:%s') and str_to_date('nextdt 06:00:00', '%Y-%m-%d %H:%i:%s')",
"query":"",
"splitColumn":"update_time",
"equalitySectioning":0,
"containsnull":0,
"fetchsize":"1024",
"threadNumber":0
},
"channel":{
"filterAbnormalCharacter":0
},
"writer":{
"dbtype":"hive",
"tableName":"tablepara",
"database":"jms_ods",
"writeMode": "overwrite",
"partitionColumn":"dt",
"partitionValue":"nowdt"},
"settting":{
"env":"envpara"}
}""".replace("jdbcUrlpara", jdbcUrl).replace("usernamepara", username).replace("passwordpara", password). \
    replace("nowdt", nowdt).replace("nextdt", nextdt). \
    replace("tablepara", table). \
    replace("envpara", env)

jms_ods__registration_problem_piece = SparkSubmitOperator(
    task_id='jms_ods__registration_problem_piece',
    email=['chenhongping@yl-scm.com','yl_bigdata@yl-scm.com'],
    name='jms_ods__registration_problem_piece_{{ execution_date | date_add(1) | cst_ds }}',
    pool='mysql_personas',
    pool_slots=2,
    execution_timeout=timedelta(hours=1),
    driver_memory='2G',
    executor_memory='3G',
    executor_cores=2,
    num_executors=3,
    java_class='com.yunlu.bigdata.jobs.synchrotool.DataSynchDriver',  # spark 主类
    application='hdfs:///scheduler/jms/spark/sync/mysql/spark_sync.jar',  # spark jar 包
    application_args=[jsonpara, ],
)
