#!/bin/sh

# Train the word rank model from "$corpus" and save it into 'output/word_rank.txt'
#
# Configure parameters in src/confs.py
#
# Configure enviroment below as you need
# Be sure hadoop is already running

hadoop_home="/home/kelvin/hadoop"
hadoop="$hadoop_home/bin/hadoop"
hdfs="$hadoop dfs"
hadoop_streaming="$hadoop jar $hadoop_home/contrib/streaming/hadoop-streaming-1.0.0.jar"
hdfs_user_root="localhost:9000/user/kelvin"
hdfs_pwd="word_rank"
python="pypy"

# corpus="../data/large.raw.txt"
corpus="../data/brent.raw.txt"
iterations=10

function check_hdfs {
    $hdfs -ls $1 >/dev/null 2>&1
}

if [ "$1" != "" ]; then
    job_name_prefix="[$1] "
else
    job_name_prefix=""
fi

echo "checking hdfs workspace.."
if check_hdfs "$hdfs_pwd"; then
    echo "workspace ok"
else
    echo "hdfs workspace not initialized"
    echo "initializing hdfs workspace.."
    $hdfs -mkdir "$hdfs_pwd/"
    $hdfs -mkdir "$hdfs_pwd/corpus/"
    $hdfs -put "$corpus" "$hdfs_pwd/corpus/"
    echo "done"
fi
echo

echo "training begin"
echo

t1=`date`

echo "[background job] building mutual infomation model"
echo
( \
echo "building mutual infomation model.."
check_hdfs "$hdfs_pwd/mutual_infomation_model" && echo "skip" ||
$hadoop_streaming \
    -D mapred.job.name="${job_name_prefix}build mutual infomation model" \
    -input "$hdfs_pwd/corpus/" \
    -output "$hdfs_pwd/mutual_infomation_model/" \
    -mapper "$python mutual_infomation_model_map.py" \
    -reducer "$python mutual_infomation_model_reduce.py" \
    -file "src/mutual_infomation_model_map.py" \
    -file "src/mutual_infomation_model_reduce.py" \
    -numReduceTasks 1 \
&& echo "mutual infomation model built" || exit
echo \
) >output/mutual_information.log 2>&1 &

pid1=$!

echo "[background job] first stage of link structure construction"
echo
( \
echo "first stage of link structure construction.."
check_hdfs "$hdfs_pwd/links_first" && echo "skip" ||
$hadoop_streaming \
    -D mapred.job.name="${job_name_prefix}build link structure first stage" \
    -input "$hdfs_pwd/corpus/" \
    -output "$hdfs_pwd/links_first/" \
    -mapper "$python build_links_first_map.py" \
    -file "src/build_links_first_map.py" \
    -file "src/confs.py" \
    -numReduceTasks 0 \
&& echo "link structure construction first stage completed" || exit
echo \
) >output/build_links_first.log 2>&1 &

pid2=$!

echo "retrieving word hypothesises.."
check_hdfs "$hdfs_pwd/words_all" && echo "skip" ||
$hadoop_streaming \
    -D mapred.job.name="${job_name_prefix}retrieve word hypothesises" \
    -input "$hdfs_pwd/corpus/" \
    -output "$hdfs_pwd/words_all/" \
    -mapper "$python retrieve_words_map.py" \
    -reducer "$python retrieve_words_reduce.py" \
    -file "src/retrieve_words_map.py" \
    -file "src/retrieve_words_reduce.py" \
    -file "src/confs.py" \
&& echo "word hypothesises retrieved" || exit
echo

echo "filtering word hypothesises.."
check_hdfs "$hdfs_pwd/words_filtered" && echo "skip" ||
$hadoop_streaming \
    -D mapred.job.name="${job_name_prefix}filter word hypothesises" \
    -input "$hdfs_pwd/words_all/" \
    -output "$hdfs_pwd/words_filtered/" \
    -mapper "$python filter_words_map.py" \
    -reducer "$python filter_words_reduce.py" \
    -file "src/filter_words_map.py" \
    -file "src/filter_words_reduce.py" \
    -file "src/confs.py" \
&& echo "word hypothesises filtered" || exit
echo

echo "waiting for first stage of link structure construction.."
wait $pid2
echo "Done!"
echo

echo "second stage of link structure construction.."
check_hdfs "$hdfs_pwd/links_second" && echo "skip" ||
$hadoop_streaming \
    -D mapred.job.name="${job_name_prefix}build link structure second stage" \
    -input "$hdfs_pwd/links_first/" \
    -input "$hdfs_pwd/words_filtered/" \
    -output "$hdfs_pwd/links_second/" \
    -mapper "cat" \
    -reducer "$python build_links_second_reduce.py" \
    -file "src/build_links_second_reduce.py" \
    -file "src/confs.py" \
&& echo "link structure construction second stage completed" || exit
echo

echo "third stage of link structure construction.."
check_hdfs "$hdfs_pwd/links_third" && echo "skip" ||
$hadoop_streaming \
    -D mapred.job.name="${job_name_prefix}build link structure third stage" \
    -input "$hdfs_pwd/links_second/" \
    -output "$hdfs_pwd/links_third/" \
    -mapper "cat" \
    -reducer "$python build_links_third_reduce.py" \
    -file "src/build_links_third_reduce.py" \
&& echo "link structure construction third stage completed" || exit
echo

echo "fourth stage of link structure construction.."
check_hdfs "$hdfs_pwd/links" && echo "skip" ||
$hadoop_streaming \
    -D mapred.job.name="${job_name_prefix}build link structure fourth stage" \
    -input "$hdfs_pwd/links_third/" \
    -output "$hdfs_pwd/links/" \
    -mapper "cat" \
    -reducer "$python build_links_fourth_reduce.py" \
    -file "src/build_links_fourth_reduce.py" \
&& echo "link structure built" || exit
echo

echo "exterior boundary values initialization counting.."
check_hdfs "$hdfs_pwd/ebv_init_count" && echo "skip" ||
$hadoop_streaming \
    -D mapred.job.name="${job_name_prefix}exterior boundary values initialization counting" \
    -input "$hdfs_pwd/links/" \
    -output "$hdfs_pwd/ebv_init_count/" \
    -mapper "$python ebv_init_count_map.py" \
    -reducer "$python ebv_init_count_reduce.py" \
    -file "src/ebv_init_count_map.py" \
    -file "src/ebv_init_count_reduce.py" \
    -numReduceTasks 1 \
&& echo "init count saved" || exit
echo

input="$hdfs_pwd/links/"
for (( i = 1; i <= $iterations; i++ )); do
    echo "exterior boundary values iteration $i.."
    output="$hdfs_pwd/ebv_iteration_$i/"
    check_hdfs $output && echo "skip" ||
    $hadoop_streaming \
        -D mapred.job.name="${job_name_prefix}exterior boundary values iteration $i" \
        -files "hdfs://$hdfs_user_root/$hdfs_pwd/ebv_init_count/part-00000#ebv_init_count.txt" \
        -input "$input" \
        -output "$output" \
        -mapper "$python ebv_iterate_map.py" \
        -reducer "$python ebv_iterate_reduce.py" \
        -file "src/ebv_iterate_map.py" \
        -file "src/ebv_iterate_reduce.py" \
        -file "src/ebv_iterate_utils.py" \
        -file "src/confs.py" \
    && echo "iteration $i completed" || exit
    input=$output
    echo
done

echo "waiting for Mutual Information.."
wait $pid1
echo "Done!"
echo

echo "finalizing word rank calculation.."
check_hdfs "$hdfs_pwd/word_rank" && echo "skip" ||
$hadoop_streaming \
    -D mapred.job.name="${job_name_prefix}word rank calculation" \
    -files "hdfs://$hdfs_user_root/$hdfs_pwd/mutual_infomation_model/part-00000#mi_model.txt" \
    -input "$hdfs_pwd/ebv_iteration_$iterations/" \
    -output "$hdfs_pwd/word_rank/" \
    -mapper "$python word_rank_map.py" \
    -file "src/word_rank_map.py" \
    -file "src/confs.py" \
    -numReduceTasks 0 \
&& echo "word rank calculated" || exit
$hdfs -cat "$hdfs_pwd/word_rank/part-*" > "output/word_rank.txt"
echo "word rank values downloaded to 'output/word_rank.txt'"
echo

t2=`date`
echo "begin: $t1"
echo "end:   $t2"
