#!/bin/bash
# 当前只针对分区表删除历史分区及数据
db='{{ params.db }}'              # 数据库
table='{{ params.table }}'            # 表名
partition_key='{{ params.partition_key }}'   # 分区字段 e.g: dt
partition_vaule='{{ params.partition_vaule }}' # 计划删除的分区值 2020-06-06
is_partition='{{ params.is_partition }}'    # 是否分区表
hive_user='{{ params.hive_user }}'  # hive 客户端用户
hive_password='{{ params.hive_password }}'  # hive 客户端密码

if [ $is_partition -eq 1 ]; then
  tt=$(spark-sql -e "select ${partition_key} from ${db}.${table} where ${partition_key} like '${partition_vaule}%'  group by ${partition_key};")
  sql=""
  for dt in $tt; do
    drop_script="alter table ${db}.${table} drop partition( ${partition_key}='$dt');"
    sql="$sql $drop_script"
  done

  drop_sql="alter table ${db}.${table} set tblproperties ('external.table.purge' = 'true'); $sql alter table ${db}.${table} set tblproperties ('e
xternal.table.purge' = 'false');"
  echo "$drop_sql"
  beeline -n $hive_user -p $hive_password -e "$drop_sql"
  if [ $? -eq 0 ]; then
    echo "删除${db}.${table}分区$partition_key=${partition_vaule}成功!!!"
  else
    echo "删除${db}.${table}分区$partition_key=${partition_vaule}失败~~~"
  fi
elif [ $is_partition -eq 0 ]; then
  echo "非分区表，作死删他干啥！！！"
else
  echo "兄dei儿，分区参数是不是没给！！！"
fi
