##!/bin/bash!/bin/bash
# Aggregate all user recommendation results except pop user recommendation
set -e -x

curdir=`dirname $0`
hdfs_temp=${HDFS_TEMP_PATH:-"temp"}
source $curdir/logutil.sh
STREAMING_PATH=${STREAMING_PATH:-"/usr/lib/hadoop/contrib/streaming/hadoop-streaming-*.jar"}


reducer13=$curdir/reducer_13.py
mapper14=$curdir/mapper_14.py 
reducer14=$curdir/reducer_14.py 
mapper15=$curdir/mapper_15.py 
reducer15=$curdir/reducer_15.py 
mapper16=$curdir/mapper_16.py 
reducer16=$curdir/reducer_16.py 

function usage
{
    echo $(basename $0) -i raw_rec_result -f raw_feed -o output -q queue_name
}
date=`date '+%Y-%m-%d'`
raw_rec=''
raw_feed=''
output=''
queue_name='temp'

while getopts i:f:o:q: opt
do
    case "$opt" in
    i) raw_rec="$OPTARG";;
    f) raw_feed="$OPTARG";;
    o) output="$OPTARG";;
    q) queue_name="$OPTARG";;
    ?) usage
       exit 1;;
    esac
done

if [ "$raw_rec" == "" ] || [ "$raw_feed" == "" ] || [ "$output" == "" ]; then
    echo "ERROR! raw_rec, raw_feed, output cannot be empty"
    usage
    exit 1
fi
shift $((OPTIND-1))

pid=$$
tmpout1=$hdfs_temp/${date}_uu_rec_${pid} # output of reducer_13.py: uid1, uid2, recommend reason
tmpout2=$hdfs_temp/${date}_user_uptime_${pid}   # output of reducer_14.py: recent update time of one user
tmpout3=$hdfs_temp/${date}_uurec_withtime_${pid}  # output of reducer_15.py: uid1,uid2, recommend reason, time

#clean old data
declare -a allout=($output $tmpout1 $tmpout2 $tmpout3)
for o in ${allout[@]}; do
    exist=$(is_hfile_exist $o)
    if [ "$exist" == "1" ]; then
        hadoop fs -rmr -skipTrash $o 1>/dev/null 2>&1
    fi
done

jobname=$(level_name $0)
# step 1:
hadoop jar $STREAMING_PATH \
    -input "$raw_rec" \
    -mapper "cat" \
    -reducer "$reducer13" \
    -output "$tmpout1" \
    -file "$reducer13" \
    -file "$curdir/class_set.py" \
    -file "$curdir/helper.py" \
    -jobconf "mapred.output.compress=true" \
    -jobconf "mapred.output.compression.codec=org.apache.hadoop.io.compress.GzipCodec" \
    -jobconf "mapred.reduce.tasks=200" \
    -jobconf "mapred.job.name=${jobname} step1 $tmpout1" \
    -jobconf "mapred.job.queue.name=$queue_name" \
    -jobconf "mapred.map.tasks=5";
if [ $? != 0 ]
then
    hadoop fs -rmr -skipTrash $tmpout1
    echo "Error in ${jobname}"
    exit 1
fi


# step 2: 
hadoop jar $STREAMING_PATH \
    -input "$raw_feed" \
    -mapper "$mapper14" \
    -reducer "$reducer14" \
    -output "$tmpout2" \
    -file "$mapper14" \
    -file "$reducer14" \
    -file "$curdir/class_set.py" \
    -file "$curdir/helper.py" \
    -jobconf "mapred.output.compress=true" \
    -jobconf "mapred.output.compression.codec=org.apache.hadoop.io.compress.GzipCodec" \
    -jobconf "mapred.reduce.tasks=50" \
    -jobconf "mapred.job.name=${jobname} step2 $tmpout2" \
    -jobconf "mapred.job.queue.name=$queue_name" \
    -jobconf "mapred.map.tasks=5";
if [ $? != 0 ]
then
    hadoop fs -rmr -skipTrash $tmpout2
    echo "Error in ${jobname}"
    exit 1
fi

# step 3: 
hadoop jar $STREAMING_PATH \
    -input "$tmpout1,$tmpout2" \
    -mapper "$mapper15" \
    -reducer "$reducer15" \
    -output "$tmpout3" \
    -file "$mapper15" \
    -file "$reducer15" \
    -file "$curdir/class_set.py" \
    -file "$curdir/helper.py" \
    -jobconf "mapred.output.compress=true" \
    -jobconf "mapred.output.compression.codec=org.apache.hadoop.io.compress.GzipCodec" \
    -jobconf "mapred.reduce.tasks=50" \
    -jobconf "mapred.job.name=${jobname} step3 $tmpout3" \
    -jobconf "mapred.job.queue.name=$queue_name" \
    -jobconf "mapred.map.tasks=5";
if [ $? != 0 ]
then
    hadoop fs -rmr -skipTrash $tmpout3
    echo "Error in ${jobname}"
    exit 1
fi

# step 4: 
hadoop jar $STREAMING_PATH \
    -input "$tmpout3" \
    -mapper "$mapper16" \
    -reducer "$reducer16" \
    -output "$output" \
    -file "$mapper16" \
    -file "$reducer16" \
    -file "$curdir/class_set.py" \
    -file "$curdir/helper.py" \
    -jobconf "mapred.output.compress=true" \
    -jobconf "mapred.output.compression.codec=org.apache.hadoop.io.compress.GzipCodec" \
    -jobconf "mapred.reduce.tasks=100" \
    -jobconf "mapred.job.name=${jobname} step4 $output" \
    -jobconf "mapred.job.queue.name=$queue_name" \
    -jobconf "mapred.map.tasks=5";
if [ $? != 0 ]
then
    hadoop fs -rmr -skipTrash $output
    echo "Error in ${jobname}"
    exit 1
fi

exit 0

