#!/bin/sh
# usage: <command> <input_dir> <sample_rate>

hadoop=/home/work/luncai/hadoop-client/hadoop/bin/hadoop

# nanling
#hadoop_conf=/home/work/luncai/hadoop-client/hadoop/conf/nanling.xml
#hpath_python="hdfs://nj01-nanling-hdfs.dmop.baidu.com:54310/app/ps/spider/spider-eye/tools/jumbo.tar.gz"
# dbuild
hadoop_conf=/home/work/luncai/hadoop-client/hadoop/conf/dbuild.xml
hpath_python="hdfs://szjjh-dbuild-hdfs.dmop.baidu.com:54310/user/rd/tuluncai/tools/jumbo.tar.gz"

f_dir=$(dirname $BASH_SOURCE)

input_dir=$1
output_dir=${input_dir}_sample
$hadoop fs -conf $hadoop_conf -rmr $output_dir
sample_rate=$2

$hadoop streaming -conf $hadoop_conf \
    -D mapred.job.groups="time" \
    -D mapred.job.priority=VERY_HIGH \
    -D mapred.job.name="process $input_dir" \
    -D mapred.job.map.capacity=2000 \
    -D mapred.job.reduce.capacity=2000 \
    -D mapred.map.tasks=2000 \
    -D mapred.reduce.tasks=1 \
    -D stream.num.map.output.key.fields=1 \
    -D num.key.fields.for.partition=1 \
    -D mapred.reduce.slowstart.completed.maps=0.8 \
    -input $input_dir \
    -output $output_dir \
    -mapper "export LD_LIBRARY_PATH=./jumbo/lib/; ./jumbo/bin/python m_sample.py $sample_rate" \
    -reducer "cat" \
    -file $f_dir/m_sample.py \
    -cacheArchive ${hpath_python}#jumbo \
    -partitioner org.apache.hadoop.mapred.lib.KeyFieldBasedPartitioner

#    -mapper "export LD_LIBRARY_PATH=./jumbo/lib/; ./jumbo/bin/python trans_query_to_url.py" \
#    -cacheArchive ${hpath_python}#jumbo \
