#!/bin/sh

reduces=100

if [ ! -d "wc-input" ]; then
    mkdir wc-input
    num=1
    for file in /usr/share/doc/*/README
    do
        cp $file wc-input/README.$num
        num=$[ $num + 1 ]
    done
fi

if [ -d /etc/hadoop/conf.cloudera.yarn1 ]; then
    echo "Cloudera hadoop"
    export HADOOP_CONF_DIR=/etc/hadoop/conf.cloudera.yarn1
    hdfs dfs -rm -r -skipTrash wc-input
    hdfs dfs -rm -r -skipTrash wc-result
elif [ -d /opt/mapr ]; then
    echo "MapR hadoop, not supported"
    exit 1
else
    echo "HortonWorks hadoop"
    STREAMJAR=/usr/lib/hadoop/contrib/streaming/hadoop-streaming-1.*.jar
    hdfs dfs -rmr -skipTrash wc-input
    hdfs dfs -rmr -skipTrash wc-result
fi

hdfs dfs -put wc-input wc-input
STREAMJAR=/usr/lib/hadoop-mapreduce/hadoop-streaming-2.*.jar

yarn jar $STREAMJAR \
    -D mapreduce.job.name='WordCount 2.0' \
    -files mapper.py,reducer.py \
    -mapper mapper.py \
    -reducer reducer.py \
    -numReduceTasks $reduces \
    -input wc-input/* -output wc-result

