#!/bin/bash


export JAVA_HOME=/usr/java/jdk1.8.0_181-cloudera
export CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar
export PATH=$JAVA_HOME/bin:$PATH


table_name=$1
site_tmp=$2
let  starttimekeyNo=$#-1
let  endtimekeyNo=$#
eval starttimekey=\$${starttimekeyNo}
eval endtimekey=\$${endtimekeyNo}


start_timekey=`echo ${starttimekey}| awk '{print substr($1,1,14)}'`
start_time=${start_timekey:0:4}"-"${start_timekey:4:2}"-"${start_timekey:6:2}" "${start_timekey:8:2}":"${start_timekey:10:2}":"${start_timekey:12:2}

end_timekey=`echo ${endtimekey}| awk '{print substr($1,1,14)}'`
end_time=${end_timekey:0:4}"-"${end_timekey:4:2}"-"${end_timekey:6:2}" "${end_timekey:8:2}":"${end_timekey:10:2}":"${end_timekey:12:2}


start_timekey_length=`echo ${start_timekey}| awk '{print length($1)}'`
end_timekey_length=`echo ${end_timekey}| awk '{print length($1)}'`


#判断参数个数和格式是否有误

 if [[ ! $table_name == *.* ]];then
 	echo "目标表名格式不正确。format：schema.table_name , eg: psd.s_edat7_array_glass_t"
 	exit 1 
 fi 
		
 if [[ $table_name == *rptpid.i_f* ]] ; then 
		if [ $# -ne 4 ];then
			echo "参数非法，目标表是rptpid库事实表时，需要输入4个参数。eg: rptpid.i_f_t_mes_hist_pnl_inline_tat_mod t1 20210101073000 20210101083000"
			exit 1 
		fi
	elif [[ $table_name == *rptpid.i_d* ]] ; then
		if [ $# -ne 3 ];then
			echo "参数非法，目标表是rptpid库的维度表时，需要输入3个参数。eg: rptpid.i_d_t_mes_prod 20210101073000 20210101083000"
			exit 1 
		fi
	else 
		if [ $# -ne 3 ];then
			echo "参数非法，目标表非rptpid库时，需要输入3个参数。eg: psd.s_edat7_array_defect_t 20210101073000 20210101083000"
			exit 1 
		fi 	
 fi

 
if [[ ! $table_name == *rptpid.i_d* ]] ; then
	if [ ${start_timekey_length} -ne 14 ];then
	echo "参数非法，开始时间参数长度不应小于14。"
	exit 1 
	fi
	
	if [ ${end_timekey_length} -ne 14 ];then
	echo "参数非法，结束时间参数长度不应小于14。"
	exit 1 
	fi 
	
	#site转大写
	site_tmp=`echo ${site_tmp} | sed 's/[a-z]/\u&/g'`
	
	#获取site
	if [  ${site_tmp} == 'T6,T7' ] ; then
		site="\'"'T6'"\'","\'"'T7'"\'"	
	else site="\'"${site_tmp}"\'"
	fi
	
	if [  ${site_tmp} == 'T6,T7' ] ; then
		site_sql="'"'T6'"'","'"'T7'"'"	
	else site_sql="'"${site_tmp}"'"
	fi
	
	
	#获取start_shift_timkey
	if [  ${start_timekey:8:6} -ge 073000 -a ${start_timekey:8:6} -lt 193000 ] ; then
			start_shift_timkey=${start_timekey:0:8}" 073000"	
		elif  [  ${start_timekey:8:6} -ge 193000 -a ${start_timekey:8:6} -lt 240000 ] ; then
			start_shift_timkey=${start_timekey:0:8}" 193000"	
		elif  [  ${start_timekey:8:6} -ge 000000 -a ${start_timekey:8:6} -lt 073000 ] ; then
			start_shift_timkey=`date -d "1 day ago ${start_time}" +"%Y%m%d"`" 193000"
	fi
	
	#获取end_shift_timkey
	if [  ${end_timekey:8:6} -ge 073000 -a ${end_timekey:8:6} -lt 193000 ] ; then
			end_shift_timkey=${end_timekey:0:8}" 073000"	

		elif  [  ${end_timekey:8:6} -ge 193000 -a ${end_timekey:8:6} -lt 240000 ] ; then
			end_shift_timkey=${end_timekey:0:8}" 193000"	
		elif  [  ${end_timekey:8:6} -ge 000000 -a ${end_timekey:8:6} -lt 073000 ] ; then
			end_shift_timkey=`date -d "1 day ago ${end_time}" +"%Y%m%d"`" 193000"
	fi 
fi

	#获取min_start_shift_timkey
	min_start_shift_timkey=`date -d "10 day ago ${start_time}" +"%Y%m%d%H%M%S"`
 
#datax安裝路径
datax_home="/opt/datax/addax-4.0.3/bin"
#json路径
jsonpath='/RPT/etlscript/datax/json'
#sql文件路径
sqlpath='/RPT/etlscript/datax/sql'

#json和hive名一样
hive_table_name_tmp=`echo ${table_name} | sed 's/\./_/g'`
hive_table_name="${hive_table_name_tmp}_datax"

#hive分区
if [[ $table_name == *rptpid.i_f* ]] ; then
		partition_value="${site_tmp}_${start_timekey}_${end_timekey}"	
	elif [[ $table_name == *rptpid.i_d* ]] ; then
		partition_value="${hive_table_name}"
	else partition_value="${start_timekey}_${end_timekey}"
fi


#判断json文件是否存在
if [ -a ${jsonpath}/${hive_table_name}.json ] ; then
   echo "json存在" 
   else echo "json不存在"
	exit 1
fi 


##判断时否同步到HDFS，是的话需要给hive表建分区
writer_is_hdfs=`cat ${jsonpath}/${hive_table_name}.json | grep "hdfswriter" `

for((i=0;i<=3;i++));
do

    if [ $i -ge 1 ] ; then
        echo "尝试第$i次重跑,等待30秒后执行"
        sleep 30

	returncode=0;
    fi

    if [[ $writer_is_hdfs == *hdfswriter* ]] ; then
		impala-shell -i 172.25.6.200:21000 -l --auth_creds_ok_in_clear -u csot.rptadmin --ldap_password_cmd="/RPT/etlscript/sqoop/shell/sqoop_impala_shell_ldap_password_rptadmin.sh"   \
			-q "alter table rptptm.${hive_table_name} drop IF EXISTS PARTITION(partition_value='${partition_value}');   \
			alter table rptptm.${hive_table_name} add IF NOT EXISTS PARTITION(partition_value='${partition_value}');  \
			quit; "
    else echo "写入插件非hdfswriter的话，不需要中间hive表"
    fi 

    returncode=$?
    echo "returncode=$returncode" 
    if [ $returncode -ne 0 ];then
        ## echo "exit $returncode"
        ##exit  $returncode
        continue
    fi 

    #执行datax，启动job
    echo ' python ' $datax_home'/addax.py -p " -Dsite='$site' -Dpartition_value='${partition_value}' -Dstart_shift_timkey='$start_shift_timkey' -Dend_shift_timkey='$end_shift_timkey' -Dstart_timekey='$start_timekey' -Dend_timekey='$end_timekey' -Dmin_start_shift_timkey='$min_start_shift_timkey'" --jvm="-Xms8G -Xmx8G" ${jsonpath}/${hive_table_name}.json'
    python ${datax_home}/addax.py -p " -Dsite="$site" -Dpartition_value='${partition_value}' -Dstart_shift_timkey='$start_shift_timkey' -Dend_shift_timkey='$end_shift_timkey' -Dstart_timekey='$start_timekey' -Dend_timekey='$end_timekey' -Dmin_start_shift_timkey='$min_start_shift_timkey'" --jvm="-Xms8G -Xmx8G" ${jsonpath}/${hive_table_name}.json

    returncode=$?
    echo "returncode=$returncode"
    if [ $returncode -ne 0 ];then
         ##echo "exit $returncode"
         ## exit  $returncode
         continue
    fi 

    hdfs_path=/data/db/rptptm

    if [[ $writer_is_hdfs == *hdfswriter* ]] ; then 
        ##执行sql脚本,将hive数据同步到kudu
	impala-shell -i 172.25.6.200:21000 -l --auth_creds_ok_in_clear -u csot.rptadmin --ldap_password_cmd="/RPT/etlscript/sqoop/shell/sqoop_impala_shell_ldap_password_rptadmin.sh" -B --var=site_sql=${site_sql} --var=partition_value=${partition_value} --var=start_timekey=${start_timekey} --var=end_timekey=${end_timekey} --var=site_tmp=${site_tmp} --var=start_shift_timkey="${start_shift_timkey}" --var=end_shift_timkey="${end_shift_timkey}" -f ${sqlpath}/${hive_table_name}.sql

	returncode=$? 
	if [ $returncode -ne 0 ];then
		  ##echo "exit $returncode"
		  ##exit  $returncode
		  continue
	fi 

	##删除hdfs数据
        hadoop fs -rm -r -skipTrash ${hdfs_path}/${hive_table_name}/partition_value=${partition_value} 

        returncode=$?
        if [ $returncode -ne 0 ];then
                  ##echo "exit $returncode"
                  ##exit  $returncode
                  continue
        fi 

     else echo "写入插件非hdfswriter的话，不需要中间hive表"
     fi 

     if [ $returncode -ge 0 ];then
            ##echo "break for end....." 
            break
     fi

done

if [ $returncode -ne 0 ];then
            echo "第3次尝试重跑失败，ETL报错退出"
	    echo "exit $returncode"
            exit $returncode
fi

