#!/bin/sh
###############################################################################
##  Author    : xuezhoyi
##  Name      : edw_hivql_executor.sh
##  Functions : Construct the interface of hive
##  Purpose   : Construct the interface of hive
##  Revisions or Comments
##  VER        DATE        AUTHOR           DESCRIPTION
##---------  ----------  ---------------  ------------------------------------ 
##  1.0      2017-05-22  xuezhouyi        1. CREATED THIS SHELL.
###############################################################################

. ${0%`basename ${0}`}edw_parm.sh
. ${0%`basename ${0}`}edw_func.sh

function USAGE(){
    echo -e "\nHow to use this shell script!"
    echo -e '\n${V_SHELL_HOME}/edw_hivql_executor.sh -q "select current_date;"'
    echo -e '\n${V_SHELL_HOME}/edw_hivql_executor.sh -o 5Q01 -f a.HQL'
}

# Run sql statement
function RUNSQL(){
V_RESULT=`hive -S -e "
set mapred.job.priority=LOW;
set hive.groupby.skewindata=true;
set mapred.max.split.size=256000000;
set mapred.min.split.size.per.node=100000000;
set mapred.min.split.size.per.rack=100000000;
set hive.input.format=org.apache.hadoop.hive.ql.io.CombineHiveInputFormat; 
SET hive.merge.size.per.task=256000000;
set hive.merge.mapfiles=true;
set hive.merge.mapredfiles=true;
set hive.merge.smallfiles.avgsize=256000000;
set hive.exec.mode.local.auto=false;
set mapreduce.map.memory.mb=2048;
set mapreduce.reduce.memory.mb=2048;
set hive.auto.convert.join=false;
set hive.ignore.mapjoin.hint=false;
${V_HIVE_QUERY};
"`
if [[ $? -eq 0 ]];then
    if [[ -n ${V_RESULT} ]];then
        echo ${V_RESULT}
    fi
else
    exit 9
fi
}

# Run a sql file
function RUNFILE(){
V_RESULT=`hive -S -e "
set mapred.job.priority=LOW;
set hive.groupby.skewindata=true;
set mapred.max.split.size=256000000;
set mapred.min.split.size.per.node=100000000;
set mapred.min.split.size.per.rack=100000000;
set hive.input.format=org.apache.hadoop.hive.ql.io.CombineHiveInputFormat; 
SET hive.merge.size.per.task=256000000;
set hive.merge.mapfiles=true;
set hive.merge.mapredfiles=true;
set hive.merge.smallfiles.avgsize=256000000;
set hive.exec.mode.local.auto=false;
set mapreduce.map.memory.mb=2048;
set mapreduce.reduce.memory.mb=2048;
set hive.auto.convert.join=false;
set hive.ignore.mapjoin.hint=false;
source ${1};
"`
if [[ $? -eq 0 ]];then
    if [[ -n ${V_RESULT} ]];then
        echo ${V_RESULT}
    fi
else
    exit 9
fi
}

# get the parameter
while getopts :q:o:x:f: args;do
    case ${args} in
        q)
            V_HIVE_QUERY=${OPTARG}
        ;;
        o)
            V_DATA_SRC_ORG=${OPTARG}
        ;;
        x)
            V_EXP_DATA=${OPTARG}
        ;;
        f)
            V_HIVE_FILE=${OPTARG}
        ;;
        ?)
            USAGE
        ;;
    esac
done

# export data
function EXPDATA(){
hive -e "${V_EXP_DATA}"
}

# switch case
if [[ -f "${V_SHELL_PROC}/${V_HIVE_FILE}" ]];then
    # create the folder if not exists
    if [[ ! -d "${V_SHELL_PROC}/${V_DATA_SRC_ORG}/tmp" ]];then
        mkdir -p "${V_SHELL_PROC}/${V_DATA_SRC_ORG}/tmp"
    fi
    
    # Create a copy file
    cp "${V_SHELL_PROC}/${V_HIVE_FILE}" "${V_SHELL_PROC}/${V_DATA_SRC_ORG}/tmp/${V_HIVE_FILE}"
    while read line;do
        V_KEY=$(echo ${line} | awk -F'=' '{print $1}')
        V_VAL=$(echo ${line} | awk -F'=' '{print $2}')
        sed -i "s/${V_KEY}/${V_VAL}/g" "${V_SHELL_PROC}/${V_DATA_SRC_ORG}/tmp/${V_HIVE_FILE}"
    done < "${V_SHELL_PARM}/${V_DATA_SRC_ORG}/edw_parm.txt"
    
    # Runfile
    RUNFILE "${V_SHELL_PROC}/${V_DATA_SRC_ORG}/tmp/${V_HIVE_FILE}"
elif [[ -n ${V_HIVE_QUERY} ]];then
    RUNSQL
elif [[ -n ${V_EXP_DATA} ]];then
    EXPDATA
else
    USAGE
fi

exit 0
