#!/bin/sh

set -x

# 用于处理hive数据同步导oracle

hiveDataBase="jms_dwm"
HiveTableName="dwm_overall_target_network_sum_day_dt"
oracleTable="dwm_overall_target_network_sum_day_dt"
execData="{{ execution_date | cst_ds }}"
dueDayNum="31"
columnList="/usr/local/airflow/dags/jms_dm_duty/dm/dwm_overall_target_network_sum_day_dt/dwm_overall_target_network_sum_day_dt.txt"
dataColfield="stat_date"

startDate=`date -d "${execData} -${dueDayNum} days" "+%Y-%m-%d"`
echo ${startDate}

hiveAllTableName=${hiveDataBase}.${HiveTableName}
orcDatabase="uat_bigdata"
viewName=${HiveTableName}_view
tmp_allOrcDatabase=${orcDatabase}.${oracleTable}_tmp
dir_allOrcDatabase=${orcDatabase}.${oracleTable}
Database="uat_bigdata"
oracleUrl="jdbc:oracle:thin:@//10.24.13.11:1521/pdb"
oracleUser="uat_bigdata"
oraclePassword="px8BDl2c71Mz"

sqlText1="CREATE TEMPORARY VIEW ${viewName}
USING org.apache.spark.sql.jdbc
OPTIONS (
  url '${oracleUrl}',
  dbtable '${oracleTable}',
  driver 'oracle.jdbc.driver.OracleDriver',
  user '${oracleUser}',
  password '${oraclePassword}',
  isolationLevel 'NONE',
  batchsize 10000,
  truncate true
);
"

echo $sqlText1

columnList1=`cat ${columnList}`
echo ${columnList1}

sqlText2="INSERT overwrite TABLE ${viewName} SELECT ${columnList1} from ${hiveAllTableName} where dt >='${startDate}' and dt<= '${execData}'"

echo $sqlText2

ip=`ifconfig -a|grep inet|grep -v 127.0.0.1|grep -v inet6|awk "{print $2}"|tr -d "addr:"`

echo "-------ip是多少------------"$ip"------------------------"

spark-sql --driver-memory 10G   --executor-cores 4  --num-executors 50  --executor-memory 4g   --conf spark.dynamicAllocation.maxExecutors=80  --conf spark.sql.sources.partitionOverwriteMode=dynamic --conf spark.dynamicallocation.enabled=true   --conf spark.dynamicAllocation.cachedExecutorIdleTimeout=120 --conf spark.shuffle.service.enabled=true   --conf spark.driver.maxResultSize=12G  --hiveconf hive.exec.dynamic.partition=true   --hiveconf hive.exec.dynamic.partition.mode=nonstrict   --hiveconf hive.exec.max.dynamic.partitions.pernode=2000 --conf spark.sql.broadcastTimeout=3600 --conf spark.executor.memoryOverhead=1G --conf spark.sql.shuffle.partitions=500 --conf spark.sql.autoBroadcastJoinThreshold=104857600  --queue route --jars hdfs:///scheduler/jms/spark/oracle/ojdbc8-19.3.0.0.jar --name ${oracleTable} -e "${sqlText1}${sqlText2}"
##rm /tmp/yl-stream-batch-1.0-SNAPSHOT.jar
##/usr/bin/hadoop fs -get /scheduler/jms/spark/oracle/yl-stream-batch-1.0-SNAPSHOT.jar /tmp

