##!/bin/bash!/bin/bash

set -e -x

curdir=`dirname $0`
hdfs_temp=${HDFS_TEMP_PATH:-"temp"}
source $curdir/logutil.sh
STREAMING_PATH=${STREAMING_PATH:-"/usr/lib/hadoop/contrib/streaming/hadoop-streaming-*.jar"}

mapper=$curdir/mapper_5.py 
reducer=$curdir/reducer_5.py

function usage
{
    echo $(basename $0) -i follow_list -o follow_recommendation -q queue_name
}

follow_recommendation=''
follow_list=''
queue_name='temp'

while getopts d:o:q:i: opt
do
    case "$opt" in
    o) follow_recommendation="$OPTARG";;
    q) queue_name="$OPTARG";;
    i) follow_list="$OPTARG";;
    ?) usage
       exit 1;;
    esac
done

if [ "$follow_recommendation" == "" ] || [ "$follow_list" == "" ] ; then
    echo "ERROR! follow_list and follow_recommendation cannot be empty"
    usage
    exit 1
fi
shift $((OPTIND-1))

#clean old data
declare -a allout=($follow_recommendation)
for o in ${allout[@]}; do
    exist=$(is_hfile_exist $o)
    if [ "$exist" == "1" ]; then
        hadoop fs -rmr -skipTrash $o 1>/dev/null 2>&1
    fi
done

jobname=$(level_name $0)
jobname="$jobname --> $follow_recommendation"

# step 1
# flatten click data and collect click count
# output: aid rec_aid UV|VV CLICK cnt
hadoop jar $STREAMING_PATH \
    -input "$follow_list" \
    -mapper "$mapper" \
    -reducer "$reducer" \
    -output "$follow_recommendation" \
    -file "$mapper" \
    -file "$reducer" \
    -file "$curdir/class_set.py" \
    -file "$curdir/helper.py" \
    -jobconf "mapred.output.compress=true" \
    -jobconf "mapred.output.compression.codec=org.apache.hadoop.io.compress.GzipCodec" \
    -jobconf "mapred.reduce.tasks=100" \
    -jobconf "mapred.job.name=${jobname}" \
    -jobconf "mapred.job.queue.name=$queue_name" \
    -jobconf "mapred.map.tasks=5";
if [ $? != 0 ]
then
    hadoop fs -rmr -skipTrash $follow_recommendation
    echo "Error in ${jobname}"
    exit 1
fi

exit 0

