#!/bin/bash

#利用其中部分方法
. init_metadata.sh

#从mysql中获取指定状态的一个文件
function get_file_info_with_status(){
	sql="select id, full_file_path, table_name from hive_load.load_file_infos where flag in ($1) limit 1;"
	exec_mysql $sql
}

#更新一个文件的状态
function update_status_of_file_info(){
	sql="update hive_load.load_file_infos set flag='$2' where id=$1;"
	exec_mysql $sql
}

#从mysql表中取一条数据进行处理
function hive_exec(){
	V_RESULT=`hive -e "$1"`
	if [[ $? -eq 0 ]];then
		if [[ -n ${V_RESULT} ]];then
			echo V_RESULT: ${V_RESULT}
		fi
	else
		exit 9
	fi
}

#从mysql表中取一条数据进行处理
function process_one_file(){
	echo "process_one_file begin ..."
	file_info=`get_file_info_with_status "'','failure'"`
	echo $file_info
	#如果获取不到文件，表示已经处理完成
	if [[ -z $file_info ]];then
		echo "[-] Can not found one file in table will be processed."
		return 2
	fi
	
	id=`echo $file_info | awk '{print $1}'`
	full_file_path=`echo $file_info | awk '{print $2}'`
	table_name=`echo $file_info | awk '{print $3}'`
	echo table_name: $table_name
	#更新状态为正在运行中
	update_status_of_file_info $id running
	file_name=$(basename $full_file_path)
	
	#异步处理
	nohup bash $BASE_PATH/shell/parallel_hive_process_launcher.sh process_one_file_core $id $full_file_path $table_name > $BASE_PATH/logs/${file_name}.log 2>&1 &

	echo "process_one_file end ..."
}

#核心处理流程
function process_one_file_core(){
	echo "process_one_file_core begin ..."
	
	id=$1
	full_file_path=$2
	table_name=$3
	echo id: $id
	echo full_file_path: $full_file_path
	echo table_name: $table_name
	
	if [[ -z $id || -z $full_file_path || -z $table_name ]];then
		echo "id、full_file_path、table_name can not be empty."
		update_status_of_file_info $id failure
		exit 3
	fi
	
	hql="LOAD DATA LOCAL INPATH '$full_file_path' OVERWRITE INTO TABLE $table_name PARTITION (P_MONTH='$D_DATE');"
	hive -e "$hql"
	if [[ $? -eq 0 ]];then
		#更新状态为处理成功
		update_status_of_file_info $id done
	else
		#更新状态为处理失败
		update_status_of_file_info $id failure
	
	fi
	echo "process_one_file_core end ..."
}

#并行处理文件
function parallel_process_file(){
	echo "parallel_process_file begin ..."
	current_parallel_number=0
	is_last=0		#是否已经到最后
	
	#初始化测试当前并行数
	count_hive_launch=`ps ax | grep 'hive -e LOAD DATA LOCAL INPATH' | grep -v grep | awk '{print $1}' | wc -l`
	count_load=`ps ax | grep 'hive-pbc-udf.jar -e LOAD DATA LOCAL INPATH' | grep -v grep | awk '{print $1}' | wc -l`
	((current_parallel_number=$count_hive_launch+$count_load))
	echo current_parallel_number: $current_parallel_number
	
	while true;do
		. parallel_param.sh
					
		if [ $current_parallel_number -lt $PARALLEL_NUMBER ];then
			if [[ $is_last == 0 ]];then
				sleep 1
				process_one_file			#没有到最后，获取数据
				if [[ $? == 2 ]];then
					is_last=1
				else
					((current_parallel_number++))		#还有数据，当前并发数加1
				fi
			else
				if [[ $current_parallel_number -eq 0 ]];then
					echo "process is finished."			#最后一个已经处理完成，返回
					return 0
				else
					echo sleeping ...
					sleep $SLEEP_TIME
					count_hive_launch=`ps ax | grep 'hive -e LOAD DATA LOCAL INPATH' | grep -v grep | awk '{print $1}' | wc -l`
					count_load=`ps ax | grep 'hive-pbc-udf.jar -e LOAD DATA LOCAL INPATH' | grep -v grep | awk '{print $1}' | wc -l`
					((current_parallel_number=$count_hive_launch+$count_load))
				fi
			fi
		else
			#睡眠指定秒
			echo sleeping ...
			sleep $SLEEP_TIME
			#检测是否有执行完毕的
			count_hive_launch=`ps ax | grep 'hive -e LOAD DATA LOCAL INPATH' | grep -v grep | awk '{print $1}' | wc -l`
			echo count_hive_launch: $count_hive_launch
			count_load=`ps ax | grep 'hive-pbc-udf.jar -e LOAD DATA LOCAL INPATH' | grep -v grep | awk '{print $1}' | wc -l`
			echo count_load: $count_load
			((count=$count_hive_launch+$count_load))
			#echo count: $count
			
			if [ $count -lt $PARALLEL_NUMBER ];then
				current_parallel_number=$count
			fi
		fi
		
		echo current_parallel_number: $current_parallel_number
	done
	echo "parallel_process_file end ..."
}