#zxq 20191223 v0.1 --start
#zxq 20200204 v0.2 --end

#获取传参

add_or_cover=`echo $1 | tr 'A-Z' 'a-z'`
hive_table_name=`echo $2 | tr 'A-Z' 'a-z'`
let  starttimekeyNo=$#-1
let  endtimekeyNo=$#
eval start_timekey=\$${starttimekeyNo}
eval end_timekey=\$${endtimekeyNo}

user=`whoami`
if [ ! "${user}" = "csot.rptadmin" ] ; then
   echo -e "\n[ERROR] 当前用户 ${user} 没有权限，请使用 csot.rptadmin 执行。"
   exit 1
fi

#The first args must be 'add' or 'cover'
if [ "${add_or_cover}" = "add" ] ; then
   filter_column=$3
   where_condition=`echo $4|sed -e "s/LVS_START_TIMEKEY/${start_timekey}/g" -e "s/LVS_END_TIMEKEY/${end_timekey}/g"`
   if [ ! $# -eq 6 ] ; then
      echo -e "\n[ERROR] You must input 6 args for increment ETL! [add]"
      echo " Format:sh $0 [add] [target_table_name] [filter_column] [\"where_condition\"] [start_timekey] [end_timekey]"
      echo " Example:sh $0 add s_mest1_producthistory timekey \"where timekey >= 'LVS_START_TIMEKEY' and timekey < 'LVS_END_TIMEKEY' \" 20200101073000 20200101083000"
      exit 1
   fi
   #分区值 如：event_timekey~20200101073000~20200101083000
   partition_value=${filter_column}~${start_timekey}~${end_timekey}
elif [ "${add_or_cover}" = "cover" ] ; then
   where_condition=$3
   if [ ! $# -eq 5 ] ; then
      echo -e "\n[ERROR] You must input 5 args for full ETL! [cover] "
      echo " Format:sh $0 [cover] [target_table_name] [\"where_condition\"] [start_timekey] [end_timekey]"
      echo " Example:sh $0 cover s_mest1_arc \"where 1=1\" 20190101073000 20190102073000"
      exit 1
   fi
   partition_value="none"
else
   echo -e "\n[ERROR] The first args must be 'add' or 'cover'!"
   exit 1
fi

# 判断hive table_name 是否合法  1:以s开头；2:必须含有两个下划线
where_tns_conf='/RPT/etlscript/sqoop/conf/source_db_tns.conf'

if [ ! ${hive_table_name:0:2} = "s_" ] ; then
   echo -e "\n[ERROR] The target table name(the 2nd args) must start with \"s_\""
   exit 1
else
   count_=`echo ${hive_table_name} | awk -F"_" '{print NF}'`
   if [ "${count_}" -lt "3" ] ; then
      echo -e "\n[ERROR] The target table name must like \"s_sys_xxx\"."
      exit 1
   fi
fi

#hive表去掉2码，即业务系统表名称
src_table_name=${hive_table_name#*`echo ${hive_table_name} | awk -F"_" '{print$2"_"}'`}

#hive表的第2码为业务系统简称（与source_db_tns.conf里的一致）
source_sys=`echo ${hive_table_name} | awk -F"_" '{print$2}' | tr '[A-Z]' '[a-z]'`

#判断source_db_tns.conf里对应的TNS配置内容是否存在，且必须唯一
count_tns=`grep "^"${source_sys}"==" ${where_tns_conf} | wc -l`
if [ "${count_tns}" -eq "0" ] ; then
   echo -e "\n[ERROR] The TNS of \"${source_sys}\" is not in \"${where_tns_conf}\" , please add."
   exit 1
else
   if [ "${count_tns}" -gt "1" ] ; then
      echo -e "\n[ERROR] There is more than one TNS of \"${source_sys}\" , please check  \"${where_tns_conf}\" . "
      exit 1
   fi
fi

#获取TNS配置
connect_db_tns=`cat ${where_tns_conf} | grep "^"${source_sys}"==" | awk -F '==' '{print$2}'`
count_maohao=`echo ${connect_db_tns} | awk -F":" '{print NF}'`
if [ ! ${connect_db_tns:0:5} = "jdbc:" -o  "${count_maohao}" -lt "3" ] ; then
   echo -e "\n[ERROR] The TNS of \"${source_sys}\" is not correct , please check  \"${where_tns_conf}\" . "
   exit 1
fi

#获取表栏位类型 (目前适用于oracle\mysql\postgresql,可扩展)
type_tns=`echo ${connect_db_tns} | awk -F":" '{print$2}'|tr '[A-Z]' '[a-z]'`
case ${type_tns} in
oracle )
    load_data_start_time="to_char(sysdate,'yyyy-mm-dd hh24:mi:ss') "
    data_type_sql="select lower(table_name) as table_name, nvl2(translate(lower(column_name),'\1234567890','\'),lower(column_name),'\"'||lower(column_name)||'\"') as column_name, lower(data_type) as data_type \
                     from all_tab_columns \
                    where lower(table_name) = lower('${src_table_name}') and \$CONDITIONS order by column_id "
    ;;
mysql )
    table_schema=`echo ${connect_db_tns} |awk -F"3306/" '{print$2}'|awk '{print$1}'`
    load_data_start_time="date_format(now(), '%Y-%m-%d %T') "
    data_type_sql="select lower(table_name) as table_name, lower(column_name) as column_name, lower(data_type) as data_type \
                     from information_schema.columns \
                    where lower(table_schema) = lower('${table_schema}') \
                      and lower(table_name) = lower('${src_table_name}') and \$CONDITIONS order by ordinal_position "
    ;;
postgresql )
    load_data_start_time="to_char(now(),'yyyy-mm-dd hh24:mi:ss') "
    data_type_sql="select lower(table_name) as table_name, lower(column_name) as column_name, lower(data_type) as data_type \
                     from information_schema.columns \
                    where lower(table_name) = lower('${src_table_name}') and \$CONDITIONS order by ordinal_position "
    ;;
esac

#获取表结构
#判断存放表结构的文件是否存在
where_src_table_info="/RPT/etlscript/sqoop/conf/src_table_info/"
if [ ! -f "${where_src_table_info}${source_sys}/${src_table_name}.info" ] ; then
   if [ ! -d "${where_src_table_info}${source_sys}" ] ; then
      mkdir "${where_src_table_info}${source_sys}"
   fi
   #获取表结构 <table_name>_column.info
   sqoop import -D mapred.child.java.opts="-Djava.security.egd=file:/dev/./dev/urandom" -D oraoop.jdbc.url.verbatim=true \
                -D sqoop.parquet.logical_types.decimal.enable=false -D mapreduce.map.memory.mb=1024 \
                --connect ${connect_db_tns}  \
                --query "${data_type_sql}" -m 1 \
                --target-dir /data/db/psd/tmp/${source_sys}/${src_table_name} --null-string '\\N' --null-non-string '\\N' \
                --delete-target-dir --as-textfile --direct-split-size 134217728
   if [ ! "$?" -eq "0" ] ; then
      echo -e "\n[ERROR] 获取表结构失败"
      exit 1
   fi
   #文件大小为零 则表不存在
   hadoop fs -test -s /data/db/psd/tmp/${source_sys}/${src_table_name}/part*
   if [ ! "$?" -eq "0" ] ; then
      hadoop fs -rm -r -skipTrash /data/db/psd/tmp/${source_sys}/${src_table_name}
      echo -e "\n[ERROR] The table \"${src_table_name}\" is not exist in \"${source_sys}\" , please check. "
      exit 1
   else
      hadoop fs -get /data/db/psd/tmp/${source_sys}/${src_table_name}/part* /tmp/${src_table_name}.info
      mv /tmp/${src_table_name}.info ${where_src_table_info}${source_sys}/${src_table_name}.info
      hadoop fs -rm -r -skipTrash /data/db/psd/tmp/${source_sys}/${src_table_name}
   fi
fi


#获取hive、beeline、impala配置
where_create_hive_table="/RPT/etlscript/sqoop/conf/sql/.create_hive_table/"
where_hive_conf="/RPT/etlscript/sqoop/conf/tgt_properties/hive.conf"
where_impala_conf="/RPT/etlscript/sqoop/conf/tgt_properties/impala_shell.conf"
for hv in `cat ${where_hive_conf} | grep "==" | awk -F'==' '{print$1}'`; do
   eval $hv='`cat ${where_hive_conf} | grep "$hv" | awk -F"==" '\''{print$2}'\''`' ;
done
for im in `cat ${where_impala_conf} | grep "==" | awk -F'==' '{print$1}'`; do
   eval $im='`cat ${where_impala_conf} | grep "$im" | awk -F"==" '\''{print$2}'\''`' ;
done

#${beeline_shell} --silent=true -n ${beeline_user} -p ${beeline_pwd} -e "describe ${schema}.${hive_table_name};" > /dev/null
${impala_shell} --quiet -q "refresh ${schema}.${hive_table_name};" > /dev/null

if [ ! "$?" -eq "0" ] ; then
#目标表不存在  ->  拼接建hive表语句 并 建表
   echo -e "\n[INFO] The table \"${schema}.${hive_table_name}\" will be create below."
   if [ ! -d "${where_create_hive_table}${source_sys}" ] ; then
      mkdir "${where_create_hive_table}${source_sys}"
   fi
   echo "${create_head} ${schema}.${hive_table_name} ( " > ${where_create_hive_table}${source_sys}/${hive_table_name}.sql
   cat ${where_src_table_info}${source_sys}/${src_table_name}.info | awk -F"," '{if($3~"boolean") print"   `"$2"` boolean,";else  print"   `"$2"` string,"}' \
       >> ${where_create_hive_table}${source_sys}/${hive_table_name}.sql
   echo "   \`${add_column}\` string)" >> ${where_create_hive_table}${source_sys}/${hive_table_name}.sql
   echo "partitioned by (\`${partitioned_by}\` string) " >> ${where_create_hive_table}${source_sys}/${hive_table_name}.sql
   echo "row format delimited fields terminated by '\t' " >> ${where_create_hive_table}${source_sys}/${hive_table_name}.sql
   echo "stored as ${file_format} " >> ${where_create_hive_table}${source_sys}/${hive_table_name}.sql
   echo "location '${location}/${hive_table_name}';" >> ${where_create_hive_table}${source_sys}/${hive_table_name}.sql
   ${beeline_shell} --silent=true -n ${beeline_user} -p ${beeline_pwd} -f "${where_create_hive_table}${source_sys}/${hive_table_name}.sql" > /dev/null
fi

#关键逻辑：根据栏位类型拼接sqoop sql语句  ----begin----

#事先准备source db转字符串的语法,如:date对应to_char(xxx,'yyyy-mm-dd hh24:mi:ss')
where_db_conf="/RPT/etlscript/sqoop/conf/src_db_data_type/"${type_tns}".conf"
data_type_23=`cat ${where_src_table_info}${source_sys}/${src_table_name}.info | awk -F"," '{printf"%s,",$2"|"$3}' | sed 's/.$//'`
#切换分隔符
OLD_IFS=${IFS}
IFS="$,"

cat /dev/null > /tmp/zhongtai_${hive_table_name}.txt
for i in $data_type_23
   do
   data_type=`echo $i|awk -F"|" '{print"|"$2"|"}'`
   data_type_tmp=`echo $i|awk -F"|" '{print$2}'`
   column_name_sed=`echo $i|awk -F"|" '{print$1"|"}'`
   column_name=`echo $i|awk -F"|" '{print$1}'`
   cat ${where_db_conf} | grep -w ${data_type_tmp} > /dev/null
   if [ "$?" = "0" ] ; then
      cat ${where_db_conf} | sed "s/^/${column_name_sed}/g" | grep ${data_type} | sed "s/00_xx_00/${column_name}/g" >> /tmp/zhongtai_${hive_table_name}.txt
   else
      echo "${column_name}||${column_name}|" >> /tmp/zhongtai_${hive_table_name}.txt
   fi
done
IFS="$ "
select_sql="select "`cat /tmp/zhongtai_${hive_table_name}.txt | awk -F"|" '{printf$3" as "$1", "}'`${load_data_start_time}" as load_data_start_time "
from_sql=" from "${src_table_name}
where_sql=" ${where_condition}"
#根据栏位类型拼接sqoop sql语句  ----end----

#预防二进制栏位出错 转为String
if [ "${type_tns}" = "oracle" ] ; then
   map_column_java=`cat /tmp/zhongtai_${hive_table_name}.txt| grep "map-column-java" | awk -F"|" '{print$NF","}' |awk '{printf$NF}' | tr 'a-z' 'A-Z'| \
                    sed -e 's/=STRING/=String/g' -e 's/.$//g' -e 's/^/--map-column-java /g'`
else
   map_column_java=`cat /tmp/zhongtai_${hive_table_name}.txt| grep "map-column-java" | awk -F"|" '{print$NF","}' |awk '{printf$NF}' | tr 'A-Z' 'a-z'| \
                    sed -e 's/=string/=String/g' -e 's/.$//g' -e 's/^/--map-column-java /g'`
fi
rm -rf  /tmp/zhongtai_${hive_table_name}.txt

#还原分隔符
IFS=${OLD_IFS}
#add partition

${beeline_shell} --silent=true -n ${beeline_user} -p ${beeline_pwd}  \
 -e "alter table ${schema}.${hive_table_name} add if not exists partition (${partitioned_by}='${partition_value}');"

#${impala_shell} --quiet -q "alter table ${schema}.${hive_table_name} add if not exists partition (${partitioned_by}='${partition_value}'); quit;"


memory_sqoop=2048
for ((retry=0;retry<3;retry++))
   do
   #for redo:先删该分区下已有文件，sqoop再append进去
   hadoop fs -rm -r -skipTrash /data/db/psd/${hive_table_name}/${partitioned_by}=${partition_value}/*
   sqoop import -D mapred.child.java.opts="-Djava.security.egd=file:/dev/./dev/urandom" -D oraoop.jdbc.url.verbatim=true \
                -D sqoop.parquet.logical_types.decimal.enable=false -D mapreduce.map.memory.mb=${memory_sqoop} \
                --append --connect ${connect_db_tns} \
                --query "${select_sql} ${from_sql} ${where_sql} and \$CONDITIONS"  ${map_column_java} -m 1 \
                --target-dir  /data/db/psd/${hive_table_name}/${partitioned_by}=${partition_value}/ \
                --null-string '\\N' --null-non-string '\\N' --as-parquetfile \
                --fields-terminated-by '\t' --direct-split-size 134217728
   if [ $? -eq 0 -a  ${retry} -le 2 ] ; then
      break
   else
      memory_sqoop=`expr ${memory_sqoop} \* 3`
      sleep 5
      if [ ${retry} -eq 2 ] ; then
         echo -e "\n[ERROR] 已尝试3次抽取${src_table_name}的数据失败，请检查。"
         exit 1
      fi
   fi
done

#删除 .metadata-00000 和 .signals-00000
hadoop fs -rm -r -skipTrash /data/db/psd/${hive_table_name}/${partitioned_by}=${partition_value}/.*-00000

#刷新impala元数据
${impala_shell} --quiet -q "invalidate metadata psd.${hive_table_name};" > /dev/null
echo -e "\n数据同步至psd.${hive_table_name}: OK\n"






####----Part 2----####

#20200602 psd -> rptpid

#解析column_mapping${where_src_table_info}${source_sys}
where_column_mapping="/RPT/etlscript/sqoop/conf/column_mapping"
where_sql_psd2rptpid="/RPT/etlscript/sqoop/conf/sql/psd2rptpid"
if [ ! -f ${where_column_mapping}/${source_sys}/${src_table_name}".info" ] ; then
   echo -e "ERROR\n栏位映射关系(${where_column_mapping}/${source_sys}/${src_table_name}.info)不存在，请添加后再次执行。"
   echo -e "请参考/RPT/etlscript/sqoop/conf/column_mapping/mest1/arc.info\n"
   exit 1
fi

kudu_table_name=`cat ${where_column_mapping}/${source_sys}/${src_table_name}".info" | grep kudu_table_name | awk -F"=" '{print$2}' | sed 's/ //g'`
is_rt=`echo ${kudu_table_name}|awk -F"_" '{print$5}'`
if [ "${add_or_cover}" = "add"  ] ; then
   kudu_filter_column=`cat ${where_column_mapping}/${source_sys}/${src_table_name}".info" | grep ${filter_column} | awk -F"${filter_column}:" '{print$2}' | awk -F"|" '{print$1}'`
fi

if [ ! -d "${where_sql_psd2rptpid}/${source_sys}" ] ; then
   mkdir "${where_sql_psd2rptpid}/${source_sys}"
fi

timestamp_1=`date +%Y%m%d%H%M%S`
#site_flag只允许2位 #目前有t1t2已合并的业务系统，如oeet1t2, 请使用 sqoop_import_source_data_by_tables.sh 和 run_sql_file.sh
site_flag="t1 t2 t6 t7 h0 h1 h2"
site_flag_cnt="0"
for i in `echo ${site_flag}`
   do
   if [[ ${source_sys} =~ $i ]] ; then
      site_flag_cnt="1"
      if [ ${#i} -eq 2 ] ; then
         SITE=`echo ${i}|tr 'a-z' 'A-Z'`
         if [ ! -f "${where_sql_psd2rptpid}/${source_sys}/${src_table_name}.sql" ] ; then
            if [ "${add_or_cover}" = "add" -a ! "${is_rt}" = "rt" ] ; then
               insert_head="site,shift_timekey,"
               select_head="'${SITE}',f_get_shift_timekey(regexp_replace(${filter_column},'[- /.:]','')),"
               delete_where=" where site = '${SITE}' \
 and shift_timekey >= f_get_shift_timekey('LVS_START_TIMEKEY') and shift_timekey <= f_get_shift_timekey('LVS_END_TIMEKEY') \
 and regexp_replace(${kudu_filter_column},'[- /.:]','')  >= 'LVS_START_TIMEKEY' \
 and regexp_replace(${kudu_filter_column},'[- /.:]','')   < 'LVS_END_TIMEKEY' "
            elif [ "${add_or_cover}" = "add" -a  "${is_rt}" = "rt" ] ; then
               insert_head="site,"
               select_head="'${SITE}',"
               delete_where=" where site = '${SITE}' \
 and regexp_replace(${kudu_filter_column},'[- /.:]','')  >= 'LVS_START_TIMEKEY' \
 and regexp_replace(${kudu_filter_column},'[- /.:]','')   < 'LVS_END_TIMEKEY' "
            else
               insert_head="site,"
               select_head="'${SITE}',"
               delete_where=" where site = '${SITE}'"
            fi
            #拼接sql

            cat /dev/null > ${where_sql_psd2rptpid}/${source_sys}/.${src_table_name}"_1.sql"
            cat /dev/null > ${where_sql_psd2rptpid}/${source_sys}/.${src_table_name}"_2.sql"
            cat /dev/null > ${where_sql_psd2rptpid}/${source_sys}/${src_table_name}".sql"
            for j in `cat ${where_column_mapping}/${source_sys}/${src_table_name}".info" | grep column_mapping | awk -F"=" '{print$2}'  | sed 's/ //g' | sed 's/|/ /g'`
               do
               echo $j|awk -F":" '{print"`"$1"`,"}'|sed -e "s/\`'/'/g" -e "s/'\`/'/g" >> ${where_sql_psd2rptpid}/${source_sys}/.${src_table_name}"_1.sql"
               echo $j|awk -F":" '{print"`"$2"`,"}'|sed -e "s/\`'/'/g" -e "s/'\`/'/g" >> ${where_sql_psd2rptpid}/${source_sys}/.${src_table_name}"_2.sql"
            done
            echo "refresh psd.${hive_table_name};" \
                 >> ${where_sql_psd2rptpid}/${source_sys}/${src_table_name}".sql"
            echo "delete t from ${kudu_table_name} t ${delete_where};" \
                 >> ${where_sql_psd2rptpid}/${source_sys}/${src_table_name}".sql"
            echo "upsert into ${kudu_table_name} (${insert_head} " \
                 `cat ${where_sql_psd2rptpid}/${source_sys}/.${src_table_name}"_2.sql" | sed ':a;N;s/\n//g;ta' | sed '$s/.$//'` ",interface_time)" \
                 >> ${where_sql_psd2rptpid}/${source_sys}/${src_table_name}".sql"
            echo "select ${select_head} " \
                 `cat ${where_sql_psd2rptpid}/${source_sys}/.${src_table_name}"_1.sql" | sed ':a;N;s/\n//g;ta' | sed '$s/.$//'` ",substr(cast(now() as string),1,19)" \
                 >> ${where_sql_psd2rptpid}/${source_sys}/${src_table_name}".sql"
            echo "from psd.${hive_table_name}" \
                 >> ${where_sql_psd2rptpid}/${source_sys}/${src_table_name}".sql"
            echo "where ${partitioned_by}='LVS_PARTITION_VALUE'; quit; " \
                 >> ${where_sql_psd2rptpid}/${source_sys}/${src_table_name}".sql"
            rm -rf ${where_sql_psd2rptpid}/${source_sys}/.${src_table_name}"_1.sql"
            rm -rf ${where_sql_psd2rptpid}/${source_sys}/.${src_table_name}"_2.sql"
         fi
         cat ${where_sql_psd2rptpid}/${source_sys}/${src_table_name}.sql | sed  \
             -e "s/LVS_PARTITION_VALUE/${partition_value}/g"  \
             -e "s/LVS_START_TIMEKEY/${start_timekey}/g"  \
             -e "s/LVS_END_TIMEKEY/${end_timekey}/g"  \
           > ${where_sql_psd2rptpid}/${source_sys}/.${src_table_name}_${timestamp_1}.sql

         for ((retry=0;retry<3;retry++))
         do
            ${impala_shell}  -f "${where_sql_psd2rptpid}/${source_sys}/.${src_table_name}_${timestamp_1}.sql"
            impala_shell_ok=$?
            if [ ${impala_shell_ok} -eq 0  -a  ${retry} -le 2 ] ; then
               #删除 本周期
               if [ "${add_or_cover}" = "add" ] ; then
                  hadoop fs -rm -r -skipTrash /data/db/psd/${hive_table_name}/${partitioned_by}=${partition_value}
                  ${impala_shell} --quiet -q "alter table ${schema}.${hive_table_name} drop if exists partition (${partitioned_by}='${partition_value}');" > /dev/null
                  ${impala_shell} --quiet -q "refresh ${schema}.${hive_table_name};" > /dev/null
               fi
               break
            else
               sleep 5
            fi
         done

         rm -rf ${where_sql_psd2rptpid}/${source_sys}/.${src_table_name}_${timestamp_1}.sql
         if [ ! "${impala_shell_ok}" -eq "0" ] ; then
            echo -e "\n[ERROR] 执行SQL(${where_sql_psd2rptpid}/${source_sys}/${src_table_name}.sql)失败，请检查。\n"
            echo -e "\n[提示] 请检查配置项文件(${where_column_mapping}/${source_sys}/${src_table_name}.info)，将rptpid.${kudu_table_name}所有栏位(审计栏位除外)配置映射关系.\n"
            rm -rf ${where_sql_psd2rptpid}/${source_sys}/${src_table_name}.sql
            exit 1
         fi
      else
         echo -e "\n此脚本不支持从合并site的业务系统(${source_sys}) \
                  \n请使用 sqoop_import_source_data_by_tables.sh 和 run_sql_file.sh 加工数据至 ${kudu_table_name}\n"
         break
         exit 1
      fi
   fi
done

   if [ ${site_flag_cnt} -eq "0" ] ; then
      echo -e "\n${source_sys}超出预设的范围（目前支持结尾为\"${site_flag}\"），请修改脚本\"$0\"以\"site_flag\"开头的那一行。\
           \n请使用 run_sql_file.sh 加工数据至 ${kudu_table_name}"
      exit 1
   fi

