##!/bin/bash!/bin/bash
# Aggregate all user recommendation results except pop user recommendation
set -e -x

curdir=`dirname $0`
hdfs_temp=${HDFS_TEMP_PATH:-"temp"}
source $curdir/logutil.sh
STREAMING_PATH=${STREAMING_PATH:-"/usr/lib/hadoop/contrib/streaming/hadoop-streaming-*.jar"}


mapper6=$curdir/mapper_6.py
mapper7=$curdir/mapper_7.py
reducer8=$curdir/reducer_8.py

function usage
{
    echo $(basename $0) -f user_feed -r feed -o output -q queue_name
}
date=`date '+%Y-%m-%d'`
user_feed=''
feed=''
output=''
queue_name='temp'

while getopts f:r:d:o:q: opt
do
    case "$opt" in
    f) user_feed="$OPTARG";;
    r) feed="$OPTARG";;
    o) output="$OPTARG";;
    q) queue_name="$OPTARG";;
    ?) usage
       exit 1;;
    esac
done

if [ "$user_feed" == "" ] || [ "$feed" == "" ] || [ "$output" == "" ]; then
    echo "ERROR! user_feed, feed and output cannot be empty"
    usage
    exit 1
fi
shift $((OPTIND-1))

pid=$$
tmpout1=$hdfs_temp/${date}_fed${pid} # output of mapper6: contentid \t tag (fed)
tmpout2=$hdfs_temp/${date}_feeding_${pid} # output of mapper7: contentid \t tag (feeding)

#clean old data
declare -a allout=($output $tmpout1 $tmpout2)
for o in ${allout[@]}; do
    exist=$(is_hfile_exist $o)
    if [ "$exist" == "1" ]; then
        hadoop fs -rmr -skipTrash $o 1>/dev/null 2>&1
    fi
done

jobname=$(level_name $0)
# step 1:
hadoop jar $STREAMING_PATH \
    -input "$user_feed" \
    -mapper "$mapper6" \
    -reducer "NONE" \
    -output "$tmpout1" \
    -file "$mapper6" \
    -file "$curdir/class_set.py" \
    -file "$curdir/helper.py" \
    -jobconf "mapred.output.compress=true" \
    -jobconf "mapred.output.compression.codec=org.apache.hadoop.io.compress.GzipCodec" \
    -jobconf "mapred.reduce.tasks=0" \
    -jobconf "mapred.job.name=${jobname} step1 $tmpout1" \
    -jobconf "mapred.job.queue.name=$queue_name" \
    -jobconf "mapred.map.tasks=5";
if [ $? != 0 ]
then
    hadoop fs -rmr -skipTrash $tmpout1
    echo "Error in ${jobname}"
    exit 1
fi


# step 2: 
hadoop jar $STREAMING_PATH \
    -input "$feed" \
    -mapper "$mapper7" \
    -reducer "NONE" \
    -output "$tmpout2" \
    -file "$mapper7" \
    -file "$curdir/class_set.py" \
    -file "$curdir/helper.py" \
    -jobconf "mapred.output.compress=true" \
    -jobconf "mapred.output.compression.codec=org.apache.hadoop.io.compress.GzipCodec" \
    -jobconf "mapred.reduce.tasks=0" \
    -jobconf "mapred.job.name=${jobname} step2 $tmpout2" \
    -jobconf "mapred.job.queue.name=$queue_name" \
    -jobconf "mapred.map.tasks=5";
if [ $? != 0 ]
then
    hadoop fs -rmr -skipTrash $tmpout2
    echo "Error in ${jobname}"
    exit 1
fi

# step 3: 
hadoop jar $STREAMING_PATH \
    -input "$tmpout1,$tmpout2" \
    -mapper "cat" \
    -reducer "$reducer8" \
    -output "$output" \
    -file "$reducer8" \
    -file "$curdir/class_set.py" \
    -file "$curdir/helper.py" \
    -jobconf "mapred.output.compress=true" \
    -jobconf "mapred.output.compression.codec=org.apache.hadoop.io.compress.GzipCodec" \
    -jobconf "mapred.reduce.tasks=50" \
    -jobconf "mapred.job.name=${jobname} step3 $output" \
    -jobconf "mapred.job.queue.name=$queue_name" \
    -jobconf "mapred.map.tasks=5";
if [ $? != 0 ]
then
    hadoop fs -rmr -skipTrash $output
    echo "Error in ${jobname}"
    exit 1
fi

exit 0

