#zxq 20191223 v0.1 --start
#zxq 20200204 v0.2 --end

#获取传参
add_or_cover=`echo $1 | tr 'A-Z' 'a-z'`
hive_table_name="$2"
let  starttimekeyNo=$#-1
let  endtimekeyNo=$#
eval start_timekey=\$${starttimekeyNo}
eval end_timekey=\$${endtimekeyNo}

user=`whoami`
if [ ! "${user}" = "csot.rptadmin" ] ; then
   echo -e "\n[ERROR] 当前用户 ${user} 没有权限，请使用 csot.rptadmin 执行。"
   exit 1
fi

#The first args must be 'add' or 'cover'
if [ "${add_or_cover}" = "add" ] ; then
   filter_column=$3
   where_condition=`echo $4|sed -e "s/LVS_START_TIMEKEY/${start_timekey}/g" -e "s/LVS_END_TIMEKEY/${end_timekey}/g"`
   if [ ! $# -eq 6 ] ; then
      echo -e "\n[ERROR] You must input 6 args for increment ETL! [add]"
      echo " Format:sh $0 [add] [target_table_name] [filter_column] [\"where_condition\"] [start_timekey] [end_timekey]"
      echo " Example:sh $0 add s_rptt1_dwr_gls_hist event_timekey \"where event_timekey >= 'LVS_START_TIMEKEY' and event_timekey < 'LVS_END_TIMEKEY' \" 20200101073000 20200101083000"
      exit 1
   fi
   #分区值 如：event_timekey~20200101073000~20200101083000
   partition_value=${filter_column}~${start_timekey}~${end_timekey}
elif [ "${add_or_cover}" = "cover" ] ; then
   where_condition=$3
   if [ ! $# -eq 5 ] ; then
      echo -e "\n[ERROR] You must input 5 args for full ETL! [cover] "
      echo " Format:sh $0 [cover] [target_table_name] [\"where_condition\"] [start_timekey] [end_timekey]"
      echo " Example:sh $0 cover s_rptt1_dwd_oper \"where 1=1\" 20190101073000 20190102073000"
      exit 1
   fi
   partition_value="none"
else
   echo -e "\n[ERROR] The first args must be 'add' or 'cover'!"
   exit 1
fi

# 判断hive table_name 是否合法  1:以s开头；2:必须含有两个下划线
where_tns_conf='/RPT/etlscript/sqoop/conf/source_db_tns.conf'

if [ ! ${hive_table_name:0:2} = "s_" ] ; then
   echo -e "\n[ERROR] The target table name(the 2nd args) must start with \"s_\""
   exit 1
else
   count_=`echo ${hive_table_name} | awk -F"_" '{print NF}'`
   if [ "${count_}" -lt "3" ] ; then
      echo -e "\n[ERROR] The target table name must like \"s_sys_xxx\"."
      exit 1
   fi
fi

#hive表去掉2码，即业务系统表名称
src_table_name=${hive_table_name#*`echo ${hive_table_name} | awk -F"_" '{print$2"_"}'`}

#hive表的第2码为业务系统简称（与source_db_tns.conf里的一致）
soure_sys=`echo ${hive_table_name} | awk -F"_" '{print$2}'`

#判断source_db_tns.conf里对应的TNS配置内容是否存在，且必须唯一
count_tns=`grep "^"${soure_sys}"==" ${where_tns_conf} | wc -l`
if [ "${count_tns}" -eq "0" ] ; then
   echo -e "\n[ERROR] The TNS of \"${soure_sys}\" is not in \"${where_tns_conf}\" , please add."
   exit 1
else
   if [ "${count_tns}" -gt "1" ] ; then
      echo -e "\n[ERROR] There is more than one TNS of \"${soure_sys}\" , please check  \"${where_tns_conf}\" . "
      exit 1
   fi
fi

#获取TNS配置
connect_db_tns=`cat ${where_tns_conf} | grep "^"${soure_sys}"==" | awk -F '==' '{print$2}'`
count_maohao=`echo ${connect_db_tns} | awk -F":" '{print NF}'`
if [ ! ${connect_db_tns:0:5} = "jdbc:" -o  "${count_maohao}" -lt "3" ] ; then
   echo -e "\n[ERROR] The TNS of \"${soure_sys}\" is not correct , please check  \"${where_tns_conf}\" . "
   exit 1
fi

#获取表栏位类型 (目前适用于oracle\mysql\postgresql,可扩展)
type_tns=`echo ${connect_db_tns} | awk -F":" '{print$2}'|tr '[A-Z]' '[a-z]'`
case ${type_tns} in
oracle )
    load_data_start_time="to_char(sysdate,'yyyy-mm-dd hh24:mi:ss') "
    data_type_sql="select lower(table_name) as table_name, lower(column_name) as column_name, lower(data_type) as data_type \
                     from all_tab_columns \
                    where lower(table_name) = lower('${src_table_name}') and \$CONDITIONS order by column_id "
    ;;
mysql )
    table_schema=`echo ${connect_db_tns} |awk -F"3306/" '{print$2}'|awk '{print$1}'`
    load_data_start_time="date_format(now(), '%Y-%m-%d %T') "
    data_type_sql="select lower(table_name) as table_name, lower(column_name) as column_name, lower(data_type) as data_type \
                     from information_schema.columns \
                    where lower(table_schema) = lower('${table_schema}') \
                      and lower(table_name) = lower('${src_table_name}') and \$CONDITIONS order by ordinal_position "
    ;;
postgresql )
    load_data_start_time="to_char(now(),'yyyy-mm-dd hh24:mi:ss') "
    data_type_sql="select lower(table_name) as table_name, lower(column_name) as column_name, lower(data_type) as data_type \
                     from information_schema.columns \
                    where lower(table_name) = lower('${src_table_name}') and \$CONDITIONS order by ordinal_position "
    ;;
esac

#获取表结构
#判断存放表结构的文件是否存在
where_src_table_info="/RPT/etlscript/sqoop/conf/src_table_info/"
if [ ! -f "${where_src_table_info}${soure_sys}/${src_table_name}.info" ] ; then
   if [ ! -d "${where_src_table_info}${soure_sys}" ] ; then
      mkdir "${where_src_table_info}${soure_sys}"
   fi
   #获取表结构 <table_name>_column.info
   sqoop import -D mapred.child.java.opts="-Djava.security.egd=file:/dev/./dev/urandom" -D oraoop.jdbc.url.verbatim=true -D mapreduce.map.memory.mb=1024 \
                -D sqoop.parquet.logical_types.decimal.enable=false \
                --connect ${connect_db_tns}  \
                --query "${data_type_sql}" -m 1 \
                --target-dir /data/db/psd/tmp/${soure_sys}/${src_table_name} --null-string '\\N' --null-non-string '\\N' \
                --delete-target-dir --as-textfile --direct-split-size 134217728
   if [ ! "$?" -eq "0" ] ; then
      echo -e "\n[ERROR] 获取表结构失败"
      exit 1
   fi
   #文件大小为零 则表不存在
   hadoop fs -test -s /data/db/psd/tmp/${soure_sys}/${src_table_name}/part*
   if [ ! "$?" -eq "0" ] ; then
      hadoop fs -rm -r /data/db/psd/tmp/${soure_sys}/${src_table_name}
      echo -e "\n[ERROR] The table \"${src_table_name}\" is not exist in \"${soure_sys}\" , please check. "
      exit 1
   else
      hadoop fs -get /data/db/psd/tmp/${soure_sys}/${src_table_name}/part* /tmp/${src_table_name}.info
      mv /tmp/${src_table_name}.info ${where_src_table_info}${soure_sys}/${src_table_name}.info
      hadoop fs -rm -r /data/db/psd/tmp/${soure_sys}/${src_table_name}
   fi
fi

#获取hive、beeline、impala配置
where_create_hive_table="/RPT/etlscript/sqoop/conf/sql/.create_hive_table/"
where_hive_conf="/RPT/etlscript/sqoop/conf/tgt_properties/hive.conf"
where_impala_conf="/RPT/etlscript/sqoop/conf/tgt_properties/impala_shell.conf"
for hv in `cat ${where_hive_conf} | grep "==" | awk -F'==' '{print$1}'`; do
   eval $hv='`cat ${where_hive_conf} | grep "$hv" | awk -F"==" '\''{print$2}'\''`' ;
done
for im in `cat ${where_impala_conf} | grep "==" | awk -F'==' '{print$1}'`; do
   eval $im='`cat ${where_impala_conf} | grep "$im" | awk -F"==" '\''{print$2}'\''`' ;
done

${beeline_shell} --silent=true -n ${beeline_user} -p ${beeline_pwd} -e "describe ${schema}.${hive_table_name};" > /dev/null
if [ ! "$?" -eq "0" ] ; then
#目标表不存在  ->  拼接建hive表语句 并 建表
   echo -e "\n[INFO] The table \"${schema}.${hive_table_name}\" will be create below."
   if [ ! -d "${where_create_hive_table}${soure_sys}" ] ; then
      mkdir "${where_create_hive_table}${soure_sys}"
   fi
   echo "${create_head} ${schema}.${hive_table_name} ( " > ${where_create_hive_table}${soure_sys}/${hive_table_name}.sql
   cat ${where_src_table_info}${soure_sys}/${src_table_name}.info | awk -F"," '{if($3~"boolean") print"   `"$2"` boolean,";else  print"   `"$2"` string,"}' \
       >> ${where_create_hive_table}${soure_sys}/${hive_table_name}.sql
   echo "   \`${add_column}\` string)" >> ${where_create_hive_table}${soure_sys}/${hive_table_name}.sql
   echo "partitioned by (\`${partitioned_by}\` string) " >> ${where_create_hive_table}${soure_sys}/${hive_table_name}.sql
   echo "row format delimited fields terminated by '\t' " >> ${where_create_hive_table}${soure_sys}/${hive_table_name}.sql
   echo "stored as ${file_format} " >> ${where_create_hive_table}${soure_sys}/${hive_table_name}.sql
   echo "location '${location}/${hive_table_name}';" >> ${where_create_hive_table}${soure_sys}/${hive_table_name}.sql
   ${beeline_shell} --silent=true -n ${beeline_user} -p ${beeline_pwd} -f "${where_create_hive_table}${soure_sys}/${hive_table_name}.sql" > /dev/null
fi

#关键逻辑：根据栏位类型拼接sqoop sql语句  ----begin----

#事先准备source db转字符串的语法,如:date对应to_char(xxx,'yyyy-mm-dd hh24:mi:ss')
where_db_conf="/RPT/etlscript/sqoop/conf/src_db_data_type/"${type_tns}".conf"
data_type_23=`cat ${where_src_table_info}${soure_sys}/${src_table_name}.info | awk -F"," '{printf"%s,",$2"|"$3}' | sed 's/.$//'`
#切换分隔符
OLD_IFS=${IFS}
IFS="$,"

cat /dev/null > /tmp/zhongtai_${hive_table_name}.txt
for i in $data_type_23
   do
   data_type=`echo $i|awk -F"|" '{print"|"$2"|"}'`
   data_type_tmp=`echo $i|awk -F"|" '{print$2}'`
   column_name_sed=`echo $i|awk -F"|" '{print$1"|"}'`
   column_name=`echo $i|awk -F"|" '{print$1}'`
   cat ${where_db_conf} | grep -w ${data_type_tmp} > /dev/null
   if [ "$?" = "0" ] ; then
      if [ "${type_tns}" = "mysql" ] ; then
         cat ${where_db_conf} | sed "s/^/${column_name_sed}/g" | grep ${data_type} | sed "s/00_xx_00/\`${column_name}\`/g" >> /tmp/zhongtai_${hive_table_name}.txt
      else
         cat ${where_db_conf} | sed "s/^/${column_name_sed}/g" | grep ${data_type} | sed "s/00_xx_00/${column_name}/g" >> /tmp/zhongtai_${hive_table_name}.txt
      fi
   else
      if [ "${type_tns}" = "mysql" ] ; then
         echo "\`${column_name}\`||\`${column_name}\`|" >> /tmp/zhongtai_${hive_table_name}.txt
      else
         echo "${column_name}||${column_name}|" >> /tmp/zhongtai_${hive_table_name}.txt
      fi
   fi
done
IFS="$ "
select_sql="select "`cat /tmp/zhongtai_${hive_table_name}.txt | awk -F"|" '{print$3" as "$1", "}'`${load_data_start_time}" as load_data_start_time "
from_sql=" from "${src_table_name}
where_sql=" ${where_condition}"
#根据栏位类型拼接sqoop sql语句  ----end----

#预防二进制栏位出错 转为String
if [ "${type_tns}" = "oracle" ] ; then
   map_column_java=`cat /tmp/zhongtai_${hive_table_name}.txt| grep "map-column-java" | awk -F"|" '{print$NF","}' |awk '{printf$NF}' | tr 'a-z' 'A-Z'| \
                    sed -e 's/=STRING/=String/g' -e 's/.$//g' -e 's/^/--map-column-java /g'`
else
   map_column_java=`cat /tmp/zhongtai_${hive_table_name}.txt| grep "map-column-java" | awk -F"|" '{print$NF","}' |awk '{printf$NF}' | tr 'A-Z' 'a-z'| \
                    sed -e 's/=string/=String/g' -e 's/.$//g' -e 's/^/--map-column-java /g'`
fi
rm -rf  /tmp/zhongtai_${hive_table_name}.txt

#还原分隔符
IFS=${OLD_IFS}
#add partition
${beeline_shell} --silent=true -n ${beeline_user} -p ${beeline_pwd} \
 -e "alter table ${schema}.${hive_table_name} add if not exists partition (${partitioned_by}='${partition_value}');"
#for redo:先删该分区下已有文件，sqoop再append进去
hadoop fs -rm -r /data/db/psd/${hive_table_name}/${partitioned_by}=${partition_value}/*

memory_sqoop=4096

for((i=0;i<3;i++))
do
sqoop import -D mapred.child.java.opts="-Djava.security.egd=file:/dev/./dev/urandom" -D oraoop.jdbc.url.verbatim=true -D mapreduce.map.memory.mb=${memory_sqoop} \
             -D sqoop.parquet.logical_types.decimal.enable=false \
             --append --connect ${connect_db_tns} \
             --query "${select_sql} ${from_sql} ${where_sql} and \$CONDITIONS"  ${map_column_java} -m 1 \
             --target-dir  /data/db/psd/${hive_table_name}/${partitioned_by}=${partition_value}/ \
             --null-string '\\N' --null-non-string '\\N' --as-parquetfile \
             --fields-terminated-by '\t' --direct-split-size 134217728
returncode=$?

cnt=`expr $i + 1`

if [ $returncode -eq 0 -o $i -eq 2  ] ; then
   if [ $i -eq 2 ] ; then
      echo -e "-----------尝试 ${cnt} 次执行失败，请检查。-----------"
      echo -e "\n[ERROR] 抽取${src_table_name}的数据失败，请检查。"
      echo "returncode=$returncode"
      echo "exit $returncode"
      exit $returncode
    fi
    #删除 .metadata-00000 和 .signals-00000
    hadoop fs -rm -r /data/db/psd/${hive_table_name}/${partitioned_by}=${partition_value}/.*-00000

    #刷新impala元数据
    ${impala_shell} --quiet -q "invalidate metadata psd.${hive_table_name};" > /dev/null

    echo -e "\n--OK--"

    echo "returncode=$returncode"
    echo "exit $returncode"
    exit $returncode

fi

echo "-----------第 ${cnt} 次执行失败-----------"
sleep 5
memory_sqoop=`expr ${memory_sqoop} \* 3`

done


