#!/bin/sh
_process_name=`ps -lef | grep run_single_protocol_transform.sh | grep -v grep | grep -v vi | grep -v $$`
[[ ${_process_name} ]] && exit

if [ -z $SPARK_HOME ];then
    export SPARK_HOME=/usr/hdp/2.6.4.0-91/spark2
fi
CUR_PATH=$(cd "$(dirname "$0")"; pwd)
#hdfs的namenode的节点，多个用逗号分开

if [ $# -ge 2 ]; then
    echo "Shell 传递参数$#个！"
	echo "执行的文件名：$0"
	echo "第一个参数【原始HDFS待处理文件目录】为：$1"
	echo "第二个参数【HIVE外部表文件临时目录】为：$2"
else
    echo "参数错误！"
    echo "执行的文件名：$0 第一个参数 第二个参数" 
	echo "第一个参数【hive数据库】"
	echo "第二个参数【想要转换的协议】"
    exit 1
fi

hive_db=$1
protocol_config_file=$2

submit_task(){
curr_db=$1
protocol_config_file=$2
$SPARK_HOME/bin/spark-submit \
--master ${spark_master} \
--queue ${spark_queue} \
--deploy-mode ${spark_deploy_mode} \
--files "$CUR_PATH/hql/${curr_db}/${FILENAME},${CUR_PATH}/log4j.properties" \
--py-files "$CUR_PATH/utils/hdfs_utils.py,$CUR_PATH/utils/obs_utils.py" \
--driver-memory ${spark_driver_memory} \
--num-executors ${spark_executor_num} \
--executor-cores ${spark_executor_cores} \
--executor-memory ${spark_executor_memory} \
--conf spark.dynamicAllocation.enabled=${dynamic_allocation_enable} \
--conf spark.blacklist.enabled=${spark_blacklist_enabled} \
--conf spark.sql.catalogImplementation=${spark_sql_catalogImplementation} \
--conf spark.hadoop.hive.exec.dynamic.partition.mode=nonstrict \
--conf spark.memory.fraction=${spark_memory_fraction} \
--conf spark.memory.storageFraction=${spark_memory_storageFraction} \
--conf spark.pyspark.driver.python="${spark_pyspark_driver_python}" \
--conf spark.pyspark.python="${spark_pyspark_python}" \
--conf spark.debug.maxToStringFields=${spark_debug_maxToStringFields} \
--conf spark.default.parallelism=${spark_default_parallelism} \
--conf spark.yarn.executor.memoryOverhead=${sparky_executor_memoryOverhead} \
--conf spark.scheduler.listenerbus.eventqueue.capacity=${spark_scheduler_listenerbus_eventqueue_capacity} \
--conf spark.executor.extraJavaOptions="-XX:+UseCompressedOops -XX:-UseGCOverheadLimit -Dlog4j.configuration=log4j.properties" \
--conf spark.driver.extraJavaOptions="-Dlog4j.configuration=file:${CUR_PATH}/log4j.properties" \
--jars ${dependency_jars} \
$CUR_PATH/data_converter.py ${NAME_NODE} $TARGET_TABLE $HDFS_OPR $FLUME_DATA_PATH $HIVE_TEMP_PATH $PARTITION_PARAM $CURR_PROTOCOL $FILENAME $LIMITNUM $curr_db ${HANDLE_ERROR_DATE_DATA}
}


source ${CUR_PATH}/spark_config/spark_task.config
#这里可以自定义路径，青汽除外（因为青汽车场的监控文件已经定了）
if [ ! -d "${monitoring_path}" ];then
    mkdir -p ${monitoring_path}
fi
monitoring_file_path=${monitoring_path}/monitor_hive_parquet.log

source ${CUR_PATH}/protocol_configs/${hive_db}/$protocol_config_file
echo "${NAME_NODE} ${TARGET_TABLE} ${HDFS_OPR} ${FLUME_DATA_PATH} ${HIVE_TEMP_PATH} ${PARTITION_PARAM} ${CURR_PROTOCOL} ${FILENAME} ${LIMITNUM} ${hive_db}"
_start=$(date +%s)
submit_task ${hive_db} ${CUR_PATH}/protocol_configs/${hive_db}/$protocol_config_file
if [ $? -ne 0 ]; then
	_end=$(date +%s)
       	time=$(( $_end - $_start ))
       	echo "spark 任务执行失败，协议名为${CURR_PROTOCOL},时间为：$(date)"
        `ssh root@${mail_server} "python ${mail_script_path}/send_mail.py '生产环境任务执行异常' '${CURR_PROTOCOL} ${TARGET_TABLE} 表协议落盘失败'" > /dev/null 2>&1 &`
else
    	_end=$(date +%s)
        time=$(( $_end - $_start ))
       	if [ ${time} -gt 5400 ]; then
            `ssh root@${mail_server} "python ${mail_script_path}/send_mail.py '生产环境任务执行异常' '${CURR_PROTOCOL} ${TARGET_TABLE} 表协议落盘时间过长'" > /dev/null 2>&1 &`
        fi
fi

