#!/bin/sh

set -x

# 用于处理hive数据同步导oracle

hiveDataBase="jms_dwm"
HiveTableName="dwm_overall_target_network_sum_month_dt"
oracleTable="dwm_overall_target_network_sum_month_dt"
execData="{{ execution_date | cst_ds }}"
###判断传入日期是否是当月1号或者2号
date_dd="`date -d ${execData} +%d`"
echo ${date_dd}
day=`expr $date_dd + 0`
echo $day
if [ ${day} -lt 2 ]
then
  startDate="`date -d "${execData} -1 month" +%Y-%m-01`"
else
  startDate="`date -d "${execData}" +%Y-%m-01`"
fi
echo ${startDate}
###

columnList="/usr/local/airflow/dags/jms_target_all/oracle/dwm_overall_target_network_sum_month_dt/dwm_overall_target_network_sum_month_dt.txt"
dataColfield="stat_date"

echo ${startDate}

hiveAllTableName=${hiveDataBase}.${HiveTableName}
orcDatabase="jms_bigdata_report"
viewName=${HiveTableName}_view
tmp_allOrcDatabase=${orcDatabase}.${oracleTable}_tmp
dir_allOrcDatabase=${orcDatabase}.${oracleTable}
Database="jms_bigdata_report"
oracleUrl="jdbc:oracle:thin:@//pro-bigdatareport-2023-readwrite-ora.yl.com:1521/orcl"
oracleUser="jms_bigdata_report"
oraclePassword="H15DCte03YPW3B2pEA"

sqlText1="CREATE TEMPORARY VIEW ${viewName}
USING org.apache.spark.sql.jdbc
OPTIONS (
  url '${oracleUrl}',
  dbtable '${tmp_allOrcDatabase}',
  driver 'oracle.jdbc.driver.OracleDriver',
  user '${oracleUser}',
  password '${oraclePassword}',
  isolationLevel 'NONE',
  batchsize 10000,
  truncate true
);
"

echo $sqlText1

columnList1=`cat ${columnList}`
echo ${columnList1}

sqlText2="INSERT OVERWRITE TABLE ${viewName}
SELECT stat_date
       ,network_type
       ,network_code
       ,network_name
       ,franchisee_code
       ,franchisee_name
       ,agent_code
       ,agent_name
       ,virt_code
       ,virt_name
       ,kpi_code
       ,kpi_name
       ,kpi_type
       ,kpi_fz
       ,kpi_fm
       ,kpi_values
       ,kpi_param
       ,agent_principal_name
       ,from_unixtime(unix_timestamp(),'yyyy-MM-dd HH:mm:ss')   as update_time
       ,franchisee_principal_name
       ,PRINCIPAL_NAME
from ${hiveAllTableName} where dt>='${startDate}' and dt<='${execData}'"

echo $sqlText2

ip=`ifconfig -a|grep inet|grep -v 127.0.0.1|grep -v inet6|awk "{print $2}"|tr -d "addr:"`

echo "-------ip是多少------------"$ip"------------------------"

spark-sql --driver-memory 10G   --executor-cores 4  --num-executors 50  --executor-memory 4g   --conf spark.dynamicAllocation.maxExecutors=80  --conf spark.sql.sources.partitionOverwriteMode=dynamic --conf spark.dynamicallocation.enabled=true   --conf spark.dynamicAllocation.cachedExecutorIdleTimeout=120 --conf spark.shuffle.service.enabled=true   --conf spark.driver.maxResultSize=12G  --hiveconf hive.exec.dynamic.partition=true   --hiveconf hive.exec.dynamic.partition.mode=nonstrict   --hiveconf hive.exec.max.dynamic.partitions.pernode=2000 --conf spark.sql.broadcastTimeout=3600 --conf spark.executor.memoryOverhead=1G --conf spark.sql.shuffle.partitions=500 --conf spark.sql.autoBroadcastJoinThreshold=104857600  --queue route --jars hdfs:///scheduler/jms/spark/oracle/ojdbc8-19.3.0.0.jar --name ${oracleTable} -e "${sqlText1}${sqlText2}"
##rm /tmp/yl-stream-batch-1.0-SNAPSHOT.jar
##/usr/bin/hadoop fs -get /scheduler/jms/spark/oracle/yl-stream-batch-1.0-SNAPSHOT.jar /tmp
jarFile="/tmp/yl-stream-batch-1.0-SNAPSHOT.jar"
if [ -f ${jarFile} ]
  then
    echo "${jarFile} exist"
  else
   echo "/usr/bin/hadoop fs -get /scheduler/jms/spark/oracle/yl-stream-batch-1.0-SNAPSHOT.jar /tmp"
   /usr/bin/hadoop fs -get /scheduler/jms/spark/oracle/yl-stream-batch-1.0-SNAPSHOT.jar /tmp
   if [ -f ${jarFile} ]
    then
     echo "${jarFile} get successful"
        else
         echo "${jarFile} get failed"
        fi
fi
echo `date "+%Y-%m-%d %H:%M:%S"`
echo $?

if [ $? -eq 0 ]; then
        #再次执行java 脚本
       /usr/local/java/bin/java -cp /tmp/yl-stream-batch-1.0-SNAPSHOT.jar com.yl.bigdata.app.base31.exportwarning.ora.CallMergeOracle ${tmp_allOrcDatabase} ${dir_allOrcDatabase} ${startDate} ${execData} ${dataColfield}
else
    echo "failed"
fi

