##!/bin/bash!/bin/bash

set -e -x

curdir=`dirname $0`
hdfs_temp=${HDFS_TEMP_PATH:-"temp"}
source $curdir/logutil.sh
STREAMING_PATH=${STREAMING_PATH:-"/usr/lib/hadoop/contrib/streaming/hadoop-streaming-*.jar"}

mapper=$curdir/mapper_10.sh
reducer=$curdir/reducer_10.sh

function usage
{
    echo $(basename $0) -d date -i raw_user2feed -o content_count -q queue_name
}

date=`date '+%Y-%m-%d'`
content_count=''
raw_user2feed=''
queue_name='temp'

while getopts d:o:q:i: opt
do
    case "$opt" in
    d) date="$OPTARG";;
    o) content_count="$OPTARG";;
    q) queue_name="$OPTARG";;
    i) raw_user2feed="$OPTARG";;
    ?) usage
       exit 1;;
    esac
done

if [ "$content_count" == "" ] || [ "$raw_user2feed" == "" ] ; then
    echo "ERROR! raw_user2feed and content_count cannot be empty"
    usage
    exit 1
fi
shift $((OPTIND-1))

#clean old data
declare -a allout=($content_count)
for o in ${allout[@]}; do
    exist=$(is_hfile_exist $o)
    if [ "$exist" == "1" ]; then
        hadoop fs -rmr -skipTrash $o 1>/dev/null 2>&1
    fi
done

jobname=$(level_name $0)
jobname="$jobname --> $content_count"

# step 1
# flatten click data and collect click count
# output: aid rec_aid UV|VV CLICK cnt
hadoop jar $STREAMING_PATH \
    -input "$raw_user2feed" \
    -mapper "$mapper" \
    -reducer "$reducer" \
    -output "$content_count" \
    -file "$mapper" \
    -jobconf "mapred.output.compress=true" \
    -jobconf "mapred.output.compression.codec=org.apache.hadoop.io.compress.GzipCodec" \
    -jobconf "mapred.reduce.tasks=5" \
    -jobconf "mapred.job.name=${jobname}" \
    -jobconf "mapred.job.queue.name=$queue_name" \
    -jobconf "mapred.map.tasks=5";
if [ $? != 0 ]
then
    hadoop fs -rmr -skipTrash $content_count
    echo "Error in ${jobname}"
    exit 1
fi

exit 0

