#########################################
#!/bin/bash
#version 1.3

########################################################################
# 函 数 名  : InitEnv
# 功能描述  : 初始化全局变量，定义时间格式，建立目录
# 输入参数  : 无
# 返 回 值  : 无
# 调用函数  : 无
# 修改历史      :
########################################################################
function InitEnv
{
        #脚本名，去.sh后缀
        SCRIPT_NAME="$(basename $0 .sh)"
        #脚本安装路径
        DIR_INSTALL=$(cd $(dirname $0);pwd)
        #脚本运行日期
        DATE=$(date +'%Y%m%d')
        HOUR=$(date +'%H')
        TIME=$(date +'%H%M%S')
        #脚本运行UTC时间
        UTC=$(date +'%s')
        #日志目录路径
        DIR_LOG="${DIR_INSTALL}/log/"
        #脚本运行正常日志
        LOG_NORMAL="${DIR_LOG}/${SCRIPT_NAME}_${DATE}.log"
        #脚本运行错误日志
        LOG_ERROR="${DIR_LOG}/${SCRIPT_NAME}_${DATE}.err"
        #建立日志存放目录
        mkdir -p ${DIR_LOG}
		    exec 2>"${LOG_ERROR}"
        source ~/.bashrc
}

########################################################################
# 函 数 名  : Echo
# 功能描述  : 将信息写入脚本运行日志
# 输入参数  : 1：需写入信息
# 返 回 值  : 无
# 调用函数  : 无
########################################################################
function Echo
{
    LOGTIME=$(date '+%Y-%m-%d %H:%M:%S')
    if (($# == 2));then
        message=$1
        logType=$2
        if [ ${logType} == 1 ]; then
            echo -e "$LOGTIME INFO ${SCRIPT_NAME}:${message}" >> "${LOG_NORMAL}"
            echo "$LOGTIME INFO ${SCRIPT_NAME}:${message}"
        else 
            if [ ${logType} == 2 ]; then
                echo -e "$LOGTIME WARN ${SCRIPT_NAME}:${message}" >> "${LOG_NORMAL}" 
                echo  "$LOGTIME WARN ${SCRIPT_NAME}:${message}"
            else 
                echo -e "$LOGTIME ERROR ${SCRIPT_NAME}:${message}" >> "${LOG_NORMAL}" 
                echo "$LOGTIME ERROR ${SCRIPT_NAME}:${message}"
            fi
        fi
    else
        echo -e "Function[Echo] usage error" >> "${LOG_ERROR}"
    fi
}

########################################################################
# 函 数 名  : mv_files
# 功能描述  : 移动文件
# 输入参数  : 1：需写入信息
# 返 回 值  : 无
# 调用函数  : 无
########################################################################
mv_files()
{
	#允许的进程数
	THREAD_NUM=$1
	#定义描述符为9的管道
	mkfifo -m 777 /tmp/mvXdrTmp
	exec 9<>/tmp/mvXdrTmp
	if [ $? == 0 ];then
		Echo "创建状态文件mvXdrTmp成功，继续执行" 1
	else
		Echo "创建状态文件mvXdrTmp失败，退出" 2
		exit 2
	fi
	#预先写入指定数量的换行符，一个换行符代表一个进程
	for ((i=0;i<$THREAD_NUM;i++))
	do
		echo -ne "\n" 1>&9
	done
	rm -f /tmp/mvXdrTmp
  
	local countA=$(ls /data1/$DISK_MONITOR_PATH/*.csv|wc -l)
	local countB=$(ls /data2/$DISK_MONITOR_PATH/*.csv|wc -l)
	local countC=$(ls /data3/$DISK_MONITOR_PATH/*.csv|wc -l)
	echo "countA" + ${countA}
	echo "countB" + ${countB}
	echo "countC" + ${countC}
	if [ $countA -lt ${fileMaxCount} ] || [ $countB -lt ${fileMaxCount} ] || [ $countC -lt ${fileMaxCount} ] ;then
		
		if [ $countA -lt ${fileMaxCount} ] && [ $countB -lt ${fileMaxCount} ] && [ $countC -lt ${fileMaxCount} ] ;then
			dirNum=3
			pathList=(/data1 /data2 /data3/)
		elif [ $countA -lt ${fileMaxCount} ] && [ $countB -lt ${fileMaxCount} ] && [ $countC -ge ${fileMaxCount} ] ;then
			dirNum=2
			pathList=(/data1 /data2)
		elif [ $countA -lt ${fileMaxCount} ] && [ $countB -ge ${fileMaxCount} ] && [ $countC -lt ${fileMaxCount} ] ;then
			dirNum=2
			pathList=(/data1 /data3/)
		elif [ $countA -lt ${fileMaxCount} ] && [ $countB -ge ${fileMaxCount} ] && [ $countC -ge ${fileMaxCount} ] ;then
			dirNum=1
			pathList=(/data1)
		elif [ $countA -ge ${fileMaxCount} ] && [ $countB -lt ${fileMaxCount} ] && [ $countC -lt ${fileMaxCount} ] ;then
			dirNum=2
			pathList=(/data2 /data3/)
		elif [ $countA -ge ${fileMaxCount} ] && [ $countB -lt ${fileMaxCount} ] && [ $countC -ge ${fileMaxCount} ] ;then
			dirNum=1
			pathList=(/data2)
		elif [ $countA -ge ${fileMaxCount} ] && [ $countB -ge ${fileMaxCount} ] && [ $countC -lt ${fileMaxCount} ] ;then
			dirNum=1
			pathList=(/data3)
		else
			exit 2
		fi
		filelist=$(hadoop fs -ls ${LOCAL_STATUS_PATH}|grep "status"| awk -F '/' '{print $NF}'|head -n 1)
		if [ "${filelist}" != "" ];then
			for file in ${filelist}
			do
				local DAY=${file:0:8}
				local HOUR=${file:8:4}
				local HDFS_DATA_PATH=${LOCAL_DATA_PATH}/${DAY}/${HOUR}/${local_ip}/
				hdfsFileList=$(hadoop fs -ls ${HDFS_DATA_PATH}|grep "log"| awk -F '/' '{print $NF}')
				if [ "${hdfsFileList}" != "" ];then
					local fileCount=0
					for fname in ${hdfsFileList}
					do
					{
						dir_num=$(expr ${RANDOM} % ${dirNum})
						path_aux=${pathList[${dir_num}]}
						read -u 9
						{
						Echo "${HDFS_DATA_PATH}/$fname" 1
						USE_DISK_TMP_PATH=${path_aux}/$DISK_TMP_PATH/
						USE_DISK_MONITOR_PATH=${path_aux}/$DISK_MONITOR_PATH/
						USE_DISK_RENAME_PATH=${path_aux}/$DISK_RENAME_PATH/
						mkdir -p $USE_DISK_TMP_PATH
						mkdir -p $USE_DISK_MONITOR_PATH
						mkdir -p $USE_DISK_RENAME_PATH
						if [ -f $USE_DISK_TMP_PATH/$fname ];then
							rm -f $USE_DISK_TMP_PATH/$fname
						fi
						
						/usr/bin/hdfs dfs -get $HDFS_DATA_PATH/$fname $USE_DISK_TMP_PATH
						if [ $? != 0 ];then
							Echo "从HDFS下载 $HDFS_DATA_PATH/$fname 失败,请检查,即将退出" 2
							exit 2
						else
							Echo "从HDFS下载 $HDFS_DATA_PATH/$fname 成功,下载到${USE_DISK_TMP_PATH},继续执行" 1
							chmod 777 $USE_DISK_TMP_PATH/$fname
						fi
						Echo "将文件从$USE_DISK_TMP_PATH/$fname 移动到$USE_DISK_MONITOR_PATH/,继续执行" 1
						fileName=$(basename $fname .csv)
						fileSplitCount=$(echo $fileName |awk -F '_' '{print NF}')
						if [ $fileSplitCount -eq 6 ];then
							
							mv $USE_DISK_TMP_PATH/$fname $USE_DISK_MONITOR_PATH/
							if [ $? != 0 ];then
								Echo "将文件从$USE_DISK_TMP_PATH/$fname 移动到 $USE_DISK_MONITOR_PATH/ 失败,请检查,即将退出" 2
								exit 2
							else
								Echo "将文件从$USE_DISK_TMP_PATH/$fname 移动到 $USE_DISK_MONITOR_PATH/ 成功,继续执行" 1
							fi
						else
							fileType=$(echo $fileName|awk -F '_' '{print $1"_"$2"_"$3"_"$4"_"$5}')
							Echo $fileType 1
							fileDate=$(echo $fileName|awk -F '_' '{print $1}')
							fileTime=$(echo $fileName|awk -F '_' '{print $2}')
							fileTimeNow=$(date -d "$fileDate $fileTime" +'%Y%m%d%H')
							fileTimeOneHourAfter=$(date -d "$fileDate $fileTime - 1 hour" +'%Y%m%d%H')
							fileTimeTwoHourAfter=$(date -d "$fileDate $fileTime - 2 hour" +'%Y%m%d%H')
							fileTimeOneHourLa=$(date -d "$fileDate $fileTime + 1 hour" +'%Y%m%d%H')
							
							cat $USE_DISK_TMP_PATH/$fname | awk -F '|' -v t=$fileTimeNow '{if(substr($11,0,10) == t) {print $0}}' >> ${USE_DISK_RENAME_PATH}/${fileType}_$fileTimeNow.csv
							cat $USE_DISK_TMP_PATH/$fname | awk -F '|' -v t=$fileTimeOneHourAfter '{if(substr($11,0,10) == t) {print $0}}' >> ${USE_DISK_RENAME_PATH}/${fileType}_$fileTimeOneHourAfter.csv
							cat $USE_DISK_TMP_PATH/$fname | awk -F '|' -v t=$fileTimeTwoHourAfter '{if(substr($11,0,10) == t) {print $0}}' >> ${USE_DISK_RENAME_PATH}/${fileType}_$fileTimeTwoHourAfter.csv
							cat $USE_DISK_TMP_PATH/$fname | awk -F '|' -v t=$fileTimeOneHourLa '{if(substr($11,0,10) == t) {print $0}}' >> ${USE_DISK_RENAME_PATH}/${fileType}_$fileTimeOneHourLa.csv
							rm -f $USE_DISK_TMP_PATH/$fname
							mv ${USE_DISK_RENAME_PATH}/${fileType}_$fileTimeNow.csv $USE_DISK_MONITOR_PATH/
							mv ${USE_DISK_RENAME_PATH}/${fileType}_$fileTimeOneHourAfter.csv $USE_DISK_MONITOR_PATH/
							mv ${USE_DISK_RENAME_PATH}/${fileType}_$fileTimeTwoHourAfter.csv $USE_DISK_MONITOR_PATH/
							mv ${USE_DISK_RENAME_PATH}/${fileType}_$fileTimeOneHourLa.csv $USE_DISK_MONITOR_PATH/
						fi
						
						Echo "将文件从$USE_DISK_TMP_PATH/$fname 删除成功,继续执行" 1
						find $USE_DISK_MONITOR_PATH/ -name "*.csv" -size 0 |xargs rm -f 
						
						echo -ne "\n" 1>&9
						} &
						fileCount=$(expr ${fileCount} + 1)
					}
					done
					
					wait
					
					su - hdfs -c "/usr/bin/hdfs dfs -rmr -skipTrash $HDFS_STATUS_PATH/${file}"
					if [ $? != 0 ];then
						Echo "从HDFS删除状态文件 $HDFS_STATUS_PATH/${file} 失败,请检查,即将退出" 2
						exit 2
					else
						Echo "从HDFS删除状态文件 $HDFS_STATUS_PATH/${file} 成功" 1
					fi
					
					local delDate=$(date -d '-4 day' +'%Y%m%d')
					if [ ${DAY} -le ${delDate} ];then
						su - hdfs -c "/usr/bin/hdfs dfs -rmr -skipTrash $HDFS_DATA_PATH/"
						if [ $? != 0 ];then
							Echo "从HDFS删除状态文件 $HDFS_DATA_PATH 失败,请检查,即将退出" 2
							exit 2
						else
							Echo "从HDFS删除状态文件 $HDFS_DATA_PATH 成功" 1
						fi
					else
						local HDFS_OLD_DATA_PATH=${LOCAL_DATA_PATH}/${delDate}/${HOUR}/${local_ip}/
						su - hdfs -c "/usr/bin/hdfs dfs -rmr -skipTrash $HDFS_OLD_DATA_PATH/"
						if [ $? != 0 ];then
							Echo "从HDFS删除状态文件 $HDFS_OLD_DATA_PATH 失败,请检查,即将退出" 2
							exit 2
						else
							Echo "从HDFS删除状态文件 $HDFS_OLD_DATA_PATH 成功" 1
						fi
					fi
					
					
					local countD=$(ls /data1/$DISK_MONITOR_PATH/*.csv|wc -l)
					local countE=$(ls /data2/$DISK_MONITOR_PATH/*.csv|wc -l)
					local countF=$(ls /data3/$DISK_MONITOR_PATH/*.csv|wc -l)
			
					if [ $countD -ge ${fileMaxCount} ] && [ $countE -ge ${fileMaxCount} ] && [ $countF -ge ${fileMaxCount} ] ;then
						Echo "目录： /data1/$DISK_MONITOR_PATH/下的文件数量超过 ${fileMaxCount}, 退出" 2
						exit 2
					fi
				else
					su - hdfs -c "/usr/bin/hdfs dfs -rmr -skipTrash $HDFS_STATUS_PATH/${file}"
					if [ $? != 0 ];then
						Echo "从HDFS删除状态文件 $HDFS_STATUS_PATH/${file} 失败,请检查,即将退出" 2
						exit 2
					else
						Echo "从HDFS删除状态文件 $HDFS_STATUS_PATH/${file} 成功" 1
					fi
					Echo "${HDFS_DATA_PATH} is not exits continue"
					continue
				fi
				
			done
		else
			Echo "${LOCAL_STATUS_PATH} is not exits break"
			exit 
		fi
	else
		Echo "目录： /data*/$DISK_MONITOR_PATH/下的文件数量都超过 ${fileMaxCount}, 退出" 2
		exit 2
	fi
  
	rm -f /tmp/mvXdrTmp
}
########################################################################
########################################################################
InitEnv
rm -f /tmp/mvXdrTmp
local_ip=10.225.20.4
dst_ip=10.225.20.4
LOCAL_DATA_PATH=/xdrlog/jiake/output/upload/data/jzcc/log
LOCAL_STATUS_PATH=/xdrlog/jiake/output/upload/status/dpi/${local_ip}/
HDFS_STATUS_PATH=${LOCAL_STATUS_PATH}
DISK_TMP_PATH=${LOCAL_DATA_PATH:27}/${dst_ip}_tmp
DISK_RENAME_PATH=${LOCAL_DATA_PATH:27}/${dst_ip}_re
DISK_MONITOR_PATH=${LOCAL_DATA_PATH:27}/${dst_ip}
fileMaxCount=2000

mv_files 3

########################################################################
########################################################################