##!/bin/bash!/bin/bash

set -e -x

curdir=`dirname $0`
hdfs_temp=${HDFS_TEMP_PATH:-"temp"}
source $curdir/logutil.sh
STREAMING_PATH=${STREAMING_PATH:-"/usr/lib/hadoop/contrib/streaming/hadoop-streaming-*.jar"}

mapper7=$curdir/mapper_7.py 
mapper8=$curdir/mapper_8.py 
reducer9=$curdir/reducer_9.py

function usage
{
    echo $(basename $0) -r raw_user2feed -f raw_feed -o output -q queue_name
}
date=`date '+%Y-%m-%d'`
raw_user2feed=''
raw_feed=''
output=''
queue_name='temp'

while getopts r:f:o:q: opt
do
    case "$opt" in
    r) raw_user2feed="$OPTARG";;
    f) raw_feed="$OPTARG";;
    o) output="$OPTARG";;
    q) queue_name="$OPTARG";;
    ?) usage
       exit 1;;
    esac
done

if [ "$raw_user2feed" == "" ] || [ "$raw_feed" == "" ] || [ "$output" == "" ]; then
    echo "ERROR! raw_user2feed, raw_feed, output cannot be empty"
    usage
    exit 1
fi
shift $((OPTIND-1))

pid=$$
tmpout1=$hdfs_temp/${date}_user2feed_tmp_${pid}
tmpout2=$hdfs_temp/${date}_feed_tmp_${pid}

#clean old data
declare -a allout=($output $tmpout1 $tmpout2)
for o in ${allout[@]}; do
    exist=$(is_hfile_exist $o)
    if [ "$exist" == "1" ]; then
        hadoop fs -rmr -skipTrash $o 1>/dev/null 2>&1
    fi
done

jobname=$(level_name $0)

# step 1: process raw user feed
hadoop jar $STREAMING_PATH \
    -input "$raw_user2feed" \
    -mapper "$mapper7" \
    -reducer "cat" \
    -output "$tmpout1" \
    -file "$mapper7" \
    -file "$curdir/class_set.py" \
    -file "$curdir/helper.py" \
    -jobconf "mapred.output.compress=true" \
    -jobconf "mapred.output.compression.codec=org.apache.hadoop.io.compress.GzipCodec" \
    -jobconf "mapred.reduce.tasks=20" \
    -jobconf "mapred.job.name=${jobname} --> $tmpout1" \
    -jobconf "mapred.job.queue.name=$queue_name" \
    -jobconf "mapred.map.tasks=5";
if [ $? != 0 ]
then
    hadoop fs -rmr -skipTrash $tmpout1
    echo "Error in ${jobname}"
    exit 1
fi

# step 2: process raw feed
hadoop jar $STREAMING_PATH \
    -input "$raw_feed" \
    -mapper "$mapper8" \
    -reducer "cat" \
    -output "$tmpout2" \
    -file "$mapper8" \
    -file "$curdir/class_set.py" \
    -file "$curdir/helper.py" \
    -jobconf "mapred.output.compress=true" \
    -jobconf "mapred.output.compression.codec=org.apache.hadoop.io.compress.GzipCodec" \
    -jobconf "mapred.reduce.tasks=20" \
    -jobconf "mapred.job.name=${jobname} --> $tmpout2" \
    -jobconf "mapred.job.queue.name=$queue_name" \
    -jobconf "mapred.map.tasks=5";
if [ $? != 0 ]
then
    hadoop fs -rmr -skipTrash $tmpout2
    echo "Error in ${jobname}"
    exit 1
fi

# step 3: get feed related recommendation
hadoop jar $STREAMING_PATH \
    -input "$tmpout1,$tmpout2" \
    -mapper "cat" \
    -reducer "$reducer9" \
    -output "$output" \
    -file "$reducer9" \
    -file "$curdir/class_set.py" \
    -file "$curdir/helper.py" \
    -jobconf "mapred.output.compress=true" \
    -jobconf "mapred.output.compression.codec=org.apache.hadoop.io.compress.GzipCodec" \
    -jobconf "mapred.reduce.tasks=50" \
    -jobconf "mapred.job.name=${jobname} --> $output" \
    -jobconf "mapred.job.queue.name=$queue_name" \
    -jobconf "mapred.map.tasks=5";
if [ $? != 0 ]
then
    hadoop fs -rmr -skipTrash $output
    echo "Error in ${jobname}"
    exit 1
fi

exit 0

