#!/bin/bash
# Author: 
# Func: 

set -e -x

curdir=`dirname $0`
# In production, "conf" dir should be in parallal with working dir which contains this running script.
export QITAN_CONF_PATH=${QITAN_CONF_PATH:-"$curdir/../conf"}
source $QITAN_CONF_PATH/user_rec.conf
# In production, "logutil.sh" file should be in the same working dir as current running script is.
source $curdir/logutil.sh

# get data path variables
hdfs_temp=${HDFS_TEMP_PATH:-"temp"}
ur_hdfs_data_path=${USER_REC_HDFS_DATA_PATH:-'user_rec/data'}
ur_hdfs_done_path=${USER_REC_HDFS_DONE_PATH:-'user_rec/done'}

# set parameters
if [ $# -ge 1 ]; then
    now_day=`date -d"$1" '+%Y-%m-%d'`
else # default: today
    now_day=`date '+%Y-%m-%d'`
fi
now_day_year=$(date -d"$now_day" '+%Y')
now_day_month=$(date -d"$now_day" '+%m')

yes_day=$(date -d"$now_day -1 day" '+%Y-%m-%d')
yes_day_year=$(date -d"$now_day -1 day" '+%Y')
yes_day_month=$(date -d"$now_day -1 day" '+%m')

queue_name=${QUEUE_NAME:-'temp'}

# from run_xxx.abc to xxx
now_level_name=$(level_name $0)

# define dependences
dep_level_name1=$now_level_name
dep_level_name2=$(level_name run_preprocess_user_relation_data.sh)

dep_input1=${ur_hdfs_data_path}/${dep_level_name1}/${yes_day_year}/${yes_day_month}/${yes_day} # aggregated results
dep_input2=${ur_hdfs_data_path}/${dep_level_name2}/${yes_day_year}/${yes_day_month}/${yes_day} # incremental results

dep_done_path1=${ur_hdfs_done_path}/${dep_level_name1}/${yes_day_year}/${yes_day_month}/${yes_day}.done
dep_done_path2=${ur_hdfs_done_path}/${dep_level_name2}/${yes_day_year}/${yes_day_month}/${yes_day}.done

declare -a dep_level_array=($dep_level_name1 $dep_level_name2)
declare -a dep_done_array=($dep_done_path1 $dep_done_path2 )

# defind data for current job
output=${ur_hdfs_data_path}/${now_level_name}/${now_day_year}/${now_day_month}/${now_day} 
outdone=${ur_hdfs_done_path}/${now_level_name}/${now_day_year}/${now_day_month}/${now_day}.done 
pid_path=${USER_REC_LOCAL_PLOG_PATH:-"$curdir/../logs_pid"}
pid_file=$pid_path/$now_level_name

####################################################################
#
#    test whether this script is excuted or not
#
####################################################################
if [ -e $pid_file ]
then
    running=$(is_run $pid_file)
    if [ "$running" = "1" ]
    then
        echo "$now_level_name is running"
        date
        exit 0
    fi
    rm -f $pid_file;
fi
pid=$$
mkdir -p $pid_path
echo $pid > $pid_file

####################################################################
#
#    test whether this job is done or not
#
####################################################################
exist=$(is_hfile_exist $outdone)
if [ "$exist" == "1" ]; then
    echo "$now_level_name in ${now_day} has been done successfully......";
    exit 0;
else
    echo "$now_level_name in ${now_day} has not been done yet, now continuing......";
fi

####################################################################
#
#    test whether required data are ready
#
####################################################################
for((i=0;i<${#dep_done_array[@]};i++))
do
    done_path=${dep_done_array[$i]}
    lev_name=${dep_level_array[$i]}
    exist=$(is_hfile_exist $done_path)
    if [ "$exist" == "0" ]; then
        echo "dep job $lev_name is not ready!"
        exit 1
    fi
done

##########################################################################
#
# preprocess raw user relation data
#
##########################################################################
$curdir/get_follow_list_stream.sh -a $dep_input1 -o $output -i $dep_input2
if [ "$?" == "0" ]; then
    echo "OK in $now_level_name in $now_day"
    hadoop fs -touchz $outdone
else
    echo "Error in $now_level_name in $now_day"
    exit 1
fi

exit 0

