#!/bin/bash

job_name="hbase_client_demo"

# HADOOP输出路径，内容为hbase的列数据
OUTPUT_PATH="/user/qykg/users/zhaoqi/temp/${job_name}"
# HADOOP输入路径, 内容为rowkey
INPUT_PATH="/user/qykg/users/zhaoqi/temp/demo_input_rowkey.txt"
# 需要读写的hbase表名
TABLE_NAME="test:test_table"

# 创建hbase表
echo "exists '${TABLE_NAME}'" | hbase shell -n 2>&1 | grep -F 'does exist'
table_exist=$?

if [ $table_exist -ne 0 ]; then
    echo "create '${TABLE_NAME}', 'data'" | hbase shell -n
    ret=$?
    if [ $ret -ne 0 ]; then
        echo "${TABLE_NAME} does not exist and create failed!" 1>&2
        exit 1
    fi
else
    echo "${TABLE_NAME} is already exists." 1>&2
fi

HADOOP_BIN="${HADOOP_HOME}/bin/hadoop"
HADOOP_SHELL="${HADOOP_BIN} fs"
HADOOP_STREAMING="${HADOOP_BIN} jar ${HADOOP_HOME}/share/hadoop/tools/lib/hadoop-streaming-3.2.2.jar"

# 将测试数据写入hbase, 并生成hadoop输入数据
${HADOOP_SHELL} -rm -r ${INPUT_PATH}
python write_hbase.py | ${HADOOP_SHELL} -put - ${INPUT_PATH}

${HADOOP_SHELL} -rm -r ${OUTPUT_PATH}

# 一定先删除旧dump数据，避免覆盖错误meta。
rm -rf hbase_meta.sqlite
# 预先加载hbase meta， 多个表可以重复此命令dump到一个sqlite文件中。
# 注意hbase region会迁移，建议每次启动前都dump一次。
if ! python ../dump_hbase_meta.py \
    --hbase_conf hbase.conf \
    --table_name ${TABLE_NAME} \
    --db_path ./hbase_meta.sqlite \
    --action "dump_hbase_meta";
then
    echo "dump hbase meta failed." 1>&2
    exit 1
fi

if ! ${HADOOP_STREAMING} \
    -files "map-${job_name}.sh,map-${job_name}.py,hbase_meta.sqlite,hbase.conf" \
    -archives "hdfs://HDFS8000787/user/qykg/packages/conda_env.tar.gz#conda_env" \
    -D mapred.job.name="${job_name}" \
    -partitioner "org.apache.hadoop.mapred.lib.KeyFieldBasedPartitioner" \
    -input "${INPUT_PATH}" \
    -output "${OUTPUT_PATH}" \
    -mapper "sh -x map-${job_name}.sh" \
    -reducer "NONE";
then
    echo "hadoop run failed." 1>&2
    exit 1;
fi

${HADOOP_SHELL} -text ${OUTPUT_PATH}/*

rm -rf hbase_meta.sqlite
