#!/bin/bash

export  JAVA_HOME=/opt/apps/jdk1.8.0_191
export  HADOOP_HOME=/opt/apps/hadoop-3.1.1
export  HIVE_HOME=/opt/apps/hive-3.1.2

# 默认日期  T-1
dt=$(date -d'-1 days' +%Y-%m-%d)
if [  $1 ]; then
    dt=$1
fi
echo "正准备去重${dt}日的数据到ODS............"
##################### 定义方法
# 去重
function distinct(){
  # 2023-02-15 日的数据映射到  表的分区下了
  sql1="alter table tmp.cool_app_raw add if not exists partition(dt='${dt}') location '/cool/${dt}/appLog'"
  # 去重  将数据 覆盖掉重复的数据
  # 设置输出压缩
  sql2="set hive.exec.compress.output = true;
  set mapred.output.compression.codec = org.apache.hadoop.io.compress.GzipCodec;
  insert overwrite table tmp.cool_app_raw partition(dt='${dt}')
  SELECT
  line
  FROM
  tmp.cool_app_raw
  where  dt = '${dt}'
  group by line"
  # 加载数据到临时表中
  ${HIVE_HOME}/bin/hive -e "${sql1}"
  # 去重覆盖
  ${HIVE_HOME}/bin/hive -e "${sql2}"
}


# 判断数据是否有重复采集
hdfsCount=$(${HADOOP_HOME}/bin/hdfs dfs -text /cool/${dt}/appLog/* | wc -l)
serverCount=$(curl http://windows:8080/api/getCountByDtAndType -X POST  -d"{\"logType\":\"appLog\" ,\"dt\":\"$dt\"}" -H "Content-Type: application/json")
echo "${dt}日HDFS上的日志数量是: ${hdfsCount}"
echo "${dt}日原始日志数量是: ${serverCount}"
if [ $hdfsCount -gt $serverCount ]; then
     # 数据重复  需要去重操作
     echo  "需要去重操作.............."
       # 调用方法
     distinct
     if [ $? -eq 0 ]; then
         echo  "去重成功................."
         else echo "去重失败 "
     fi

   else
     echo  "不需要去重..........."
fi