#!/bin/sh

# 用于处理hive数据同步导Oracle
BASE_DIR="$(cd "$(dirname "$0")"; pwd)"
echo -e "INFO - Base Directory: ${BASE_DIR} \n"

# +-------------------------
# |   Oracle配置
# +-------------------------
#Oracle库名
oracle_db="jms_bigdata_report"
#Oracle目标表名
oracle_tb="dm_weight_flow_daily_summary_dimension_mt"

#主机
oracle_host="pro-bigdatareport-2023-readwrite-ora.yl.com"
#端口
oracle_port="1521"
#用户名
oracle_user="jms_bigdata_report"
#密码
oracle_pass="H15DCte03YPW3B2pEA"

hiveDataBase="jms_dm"
HiveTableName="dm_weight_flow_daily_summary_dimension_mt"
oracleTable="dm_weight_flow_daily_summary_dimension_mt"

oracleUrl="jdbc:oracle:thin:@//pro-bigdatareport-2023-readwrite-ora.yl.com:1521/orcl"
oracleUser="jms_bigdata_report"
oraclePassword="H15DCte03YPW3B2pEA"

# +-------------------------
# |   数据导入
# +-------------------------
sqlText="
  CREATE OR REPLACE TEMPORARY VIEW jdbcTable
    USING org.apache.spark.sql.jdbc
    OPTIONS (
      url 'jdbc:oracle:thin:@//${oracle_host}:${oracle_port}/orcl',
      dbtable '${oracle_tb}',
      driver 'oracle.jdbc.driver.OracleDriver',
      user '${oracle_user}',
      password '${oracle_pass}',
      isolationLevel 'NONE',
      batchsize 1000,
      truncate true
    );
  INSERT OVERWRITE TABLE jdbcTable SELECT statistic_date, send_agent_id, send_agent_code, send_agent_name, send_network_code, send_network_name, receiver_province_id, receiver_province_name, waybill_num, customer_fee, cost_weight, sales_price, piece_fee_cost, transfer_cost, operation_cost, center_packge_cost, distribu_depart_cost, distribu_opera_cost, distribu_packge_cost, duration_whole, signed_num, update_time FROM jms_dm.dm_weight_flow_daily_summary_dimension_mt WHERE mt BETWEEN DATE_FORMAT(ADD_MONTHS('{{ execution_date | cst_ds }}', -3), 'yyyy-MM-01') AND DATE_FORMAT('{{ execution_date | cst_ds }}', 'yyyy-MM-01');
"

if [ $? -ne 0 ];then
  echo -e "ERROR - 数据导入SQL加载失败 \n"
  exit -1
else
  echo -e "INFO - Execution Sql: ${sqlText} \n"
fi

spark-sql --driver-memory 10G \
          --executor-cores 4  \
          --num-executors 10  \
          --executor-memory 4g   \
          --conf spark.dynamicallocation.enabled=true  \
          --conf spark.dynamicAllocation.maxExecutors=20  \
          --conf spark.dynamicAllocation.cachedExecutorIdleTimeout=120 \
          --conf spark.sql.sources.partitionOverwriteMode=dynamic \
          --conf spark.shuffle.service.enabled=true  \
          --conf spark.driver.maxResultSize=12G  \
          --conf spark.sql.broadcastTimeout=3600 \
          --conf spark.executor.memoryOverhead=1G \
          --conf spark.sql.shuffle.partitions=200 \
          --conf spark.sql.autoBroadcastJoinThreshold=104857600 \
          --hiveconf hive.exec.dynamic.partition=true  \
          --hiveconf hive.exec.dynamic.partition.mode=nonstrict  \
          --hiveconf hive.exec.max.dynamic.partitions.pernode=2000 \
          --queue route \
          --jars hdfs:///scheduler/jms/spark/oracle/ojdbc8-19.3.0.0.jar \
          --name ${oracle_tb}_{{ execution_date | cst_ds }} \
          -e "${sqlText}" || exit -1




