#!/bin/sh

#sh hive2oracle_load.sh jms_dm dm_network_customer_analysis_detail_dt dm_network_customer_analysis_detail_dt 2023-04-25 30 11.txt
#set -x

# 用于处理hive数据同步导oracle

hiveDataBase="jms_dm"
HiveTableName="dm_network_customer_analysis_detail_dt"
oracleTable="dm_network_customer_analysis_detail_dt"
execData="{{ execution_date | cst_ds }}"
dueDayNum="0"
columnList="/usr/local/airflow/dags/jms_oclock_12/oracle/dm_network_customer_analysis_detail_dt/${HiveTableName}.txt"
dataColfield="date_time"

startDate=`date -d "${execData} -${dueDayNum} days" "+%Y-%m-%d"`
echo ${startDate}

hiveAllTableName=${hiveDataBase}.${HiveTableName}
orcDatabase="jms_bigdata_report"
viewName=${HiveTableName}_view
tmp_allOrcDatabase=${orcDatabase}.${oracleTable}_tmp
dir_allOrcDatabase=${orcDatabase}.${oracleTable}
Database="jms_bigdata_report"
oracleUrl="jdbc:oracle:thin:@//pro-bigdatareport-2023-readwrite-ora.yl.com:1521/orcl"
oracleUser="jms_bigdata_report"
oraclePassword="H15DCte03YPW3B2pEA"

sqlText1="CREATE TEMPORARY VIEW ${viewName}
USING org.apache.spark.sql.jdbc
OPTIONS (
  url '${oracleUrl}',
  dbtable '${tmp_allOrcDatabase}',
  driver 'oracle.jdbc.driver.OracleDriver',
  user '${oracleUser}',
  password '${oraclePassword}',
  isolationLevel 'NONE',
  batchsize 10000,
  truncate true
);
"

echo $sqlText1

columnList1=`cat ${columnList}`
echo ${columnList1}

sqlText2="INSERT OVERWRITE TABLE ${viewName} SELECT ${columnList1} from ${hiveAllTableName} where dt >=date_sub('${execData}',${dueDayNum}) and dt<='${execData}'"

echo $sqlText2

ip=`ifconfig -a|grep inet|grep -v 127.0.0.1|grep -v inet6|awk "{print $2}"|tr -d "addr:"`

echo "-------ip是多少------------"$ip"------------------------"


echo '' > /tmp/sqlText.sql
echo $sqlText1 >>/tmp/sqlText.sql
echo $sqlText2 >>/tmp/sqlText.sql
hdfs dfs -mkdir -p /user/hive/${dir_allOrcDatabase}
hdfs dfs -put -f /tmp/sqlText.sql /user/hive/${dir_allOrcDatabase}
spark-submit --conf spark.dynamicAllocation.enabled=true --conf spark.shuffle.service.enabled=true --conf spark.dynamicAllocation.maxExecutors=80 --conf spark.dynamicAllocation.cachedExecutorIdleTimeout=600 --conf spark.network.timeout=600 --conf spark.sql.sources.partitionOverwriteMode=dynamic --conf spark.executor.memoryOverhead=1G --conf spark.sql.shuffle.partitions=600 --conf spark.yarn.appMasterEnv.PYSPARK_PYTHON=/usr/local/python3.7.6/bin/python3 --conf spark.yarn.dist.jars=hdfs://yl-bg-hdp:8020/user/hive/sparklib/lineage-spark.jar,hdfs:///scheduler/jms/spark/oracle/ojdbc8-19.3.0.0.jar --conf spark.pyspark.driver.python=/usr/local/python3.7.6/bin/python3 --conf spark.pyspark.python=python3env/usr/local/python3.7.6/bin/python3 --py-files hdfs://yl-bg-hdp:8020/user/hive/jinja2.zip,hdfs://yl-bg-hdp:8020/user/hive/markupsafe.zip --conf spark.sql.queryExecutionListeners=com.lineage.SparkLineage2 --conf spark.hadoop.hive.exec.dynamic.partition=true --conf spark.hadoop.hive.exec.dynamic.partition.mode=nonstrict --conf spark.hadoop.hive.exec.max.dynamic.partitions=2000 --conf spark.hadoop.hive.exec.max.dynamic.partitions.pernode=2000 --executor-cores 4 --executor-memory 2G  --driver-memory 4g --num-executors 10 --master yarn --name test_cluster_new_2023-09-05-09 --queue pro_hi --deploy-mode cluster --name ${oracleTable} hdfs://yl-bg-hdp:8020/user/hive/sparklib/SparkSubmitYarnClusterForSqlFileBlood.py ${dir_allOrcDatabase}/sqlText.sql ${name} ${execData}

#spark-sql --driver-memory 10G   --executor-cores 4  --num-executors 10  --executor-memory 4g   --conf spark.dynamicAllocation.maxExecutors=80  --conf spark.sql.sources.partitionOverwriteMode=dynamic --conf spark.dynamicallocation.enabled=true   --conf spark.dynamicAllocation.cachedExecutorIdleTimeout=120 --conf spark.shuffle.service.enabled=true   --conf spark.driver.maxResultSize=12G  --hiveconf hive.exec.dynamic.partition=true   --hiveconf hive.exec.dynamic.partition.mode=nonstrict   --hiveconf hive.exec.max.dynamic.partitions.pernode=2000 --conf spark.sql.broadcastTimeout=3600 --conf spark.executor.memoryOverhead=1G --conf spark.sql.shuffle.partitions=200 --conf spark.sql.autoBroadcastJoinThreshold=104857600  --queue route --jars hdfs:///scheduler/jms/spark/oracle/ojdbc8-19.3.0.0.jar --name ${oracleTable} -e "${sqlText1}${sqlText2}"
jarFile="/tmp/yl-stream-batch-1.0-SNAPSHOT.jar"
if [ -f ${jarFile} ]
  then
    echo "${jarFile} exist"
  else
   echo "/usr/bin/hadoop fs -get /scheduler/jms/spark/oracle/yl-stream-batch-1.0-SNAPSHOT.jar /tmp"
   /usr/bin/hadoop fs -get /scheduler/jms/spark/oracle/yl-stream-batch-1.0-SNAPSHOT.jar /tmp
   if [ -f ${jarFile} ]
    then
     echo "${jarFile} get successful"
        else
         echo "${jarFile} get failed"
        fi
fi
echo `date "+%Y-%m-%d %H:%M:%S"`
echo $?

if [ $? -eq 0 ]; then
        #再次执行java 脚本
       /usr/local/java/bin/java -cp /tmp/yl-stream-batch-1.0-SNAPSHOT.jar com.yl.bigdata.app.base31.exportwarning.ora.CallMergeOracle ${tmp_allOrcDatabase} ${dir_allOrcDatabase} ${startDate} ${execData} ${dataColfield}
else
    echo "failed"
fi

