#!/bin/bash
# author: onion
# date: 2025年7月7日15:30:33
# description： 导出分布式表的本地表分区数据，需要每台执行


# Clickhouse信息
CK_USERNAME=default
CK_PASSWORD=SinoTruk!@#456
CK_PORT=9000
CK_DBNAME=snc_aiops_performance
CK_CLIEN="clickhouse-client --port ${CK_PORT} --user ${CK_USERNAME} --password ${CK_PASSWORD} --database ${CK_DBNAME}"
CLUSTER_NAME="cluster_3shards_1replicas"
# 备份路径
BACKUP_DIR="/root/backup"
# 导出天数内的分区
EXPORT_DAYS=1
# 日志路径
LOG_FILE="${BACKUP_DIR}/info_$(date +%Y%m%d_%H%M%S).log"

# 表处理模式
# false = 备份所有本地表
# "table1,table2" = 只备份指定表,可以设置为 "local_rum_session,local_rum_resource"
GD_TABLE="false"
#GD_TABLE="local_quick_trace,local_metric_text"


# 初始化日志
setup_logging() {
    # 创建日志目录
    log_dir=$(dirname "$LOG_FILE")
    mkdir -p "$log_dir"

    # 清空或创建日志文件
    echo "===== ClickHouse 数据备份日志 ($(date '+%Y-%m-%d %H:%M:%S')) =====" > "$LOG_FILE"

    # 函数：记录日志到文件和控制台
    log() {
        local message="$1"
        local timestamp
        timestamp=$(date '+%Y-%m-%d %H:%M:%S')
        echo "[$timestamp] $message" | tee -a "$LOG_FILE"
    }
}

# 主日志函数
log() {
    local message="$1"
    local timestamp
    timestamp=$(date '+%Y-%m-%d %H:%M:%S')
    echo "[$timestamp] $message" | tee -a "$LOG_FILE"
}

# 函数：安全创建目录
safe_mkdir() {
    if [ ! -d "$1" ]; then
        mkdir -p "$1"
        log "  创建目录: $1"
    fi
}

# 函数：备份单个表的分区
backup_table_partitions() {
    local table="$1"
    local backup_dir="$2"
    log "正在处理表: ${table}..."

    # 确保目录存在
    safe_mkdir "$backup_dir"

    # 1. 尝试获取分区键
    local partition_key=$(${CK_CLIEN} -q "
        SELECT partition_key
        FROM system.tables
        WHERE database = '${CK_DBNAME}' AND name = '${table}'
    " | tr -d "'()")

    # 2. 获取分区值
    local partitions=""
    if [ -n "$partition_key" ]; then
        log "  表分区键: ${partition_key}"
        partitions=$(${CK_CLIEN} -q "
            SELECT DISTINCT partition
            FROM system.parts
            WHERE database = '${CK_DBNAME}' AND table = '${table}'
            AND active = 1  -- 只考虑活动分区
        ")
    else
        log "  警告: 未检测到分区键"
    fi

    # 3. 处理不同类型的分区
    if [ -n "$partitions" ]; then
        # 处理所有分区
        log "  找到分区数量: $(echo "$partitions" | wc -w)"

        for part in $partitions; do
            # 尝试将分区值转换为Unix时间戳
            local part_timestamp=0
            if [[ $part =~ ^[0-9]{8}$ ]]; then
                part_timestamp=$(date -d "${part:0:4}-${part:4:2}-${part:6:2}" +%s 2>/dev/null)
            fi

            # 判断是否在保留期内（只检查能转换时间戳的分区）
            if [ "$part_timestamp" -ne 0 ]; then
                if [ "$part_timestamp" -ge "$RETAIN_DATE" ]; then
                    export_partition "$table" "$backup_dir" "$part" "date"
                else
                    log "  分区值 ${part} (日期: $(date -d @$part_timestamp '+%Y-%m-%d')) 超过保留期，不导出"
                fi
            else
                # 非日期类型分区 - 直接导出
                export_partition "$table" "$backup_dir" "$part" "non-date"
            fi
        done
    else
        # 无分区表 - 导出整个表
        log "  无分区信息，导出整个表..."
        backup_file="${backup_dir}/${table}.full.Parquet"
        ${CK_CLIEN} -q "SELECT *
                     FROM ${CK_DBNAME}.${table}
                     INTO OUTFILE '${backup_file}'
                     FORMAT Parquet" 2>&1 | tee -a "$LOG_FILE"

        if [ $? -eq 0 ]; then
            log "  表已导出到: ${backup_file}"
        else
            log "  错误: 导出表 ${table} 失败"
        fi
    fi
}

# 函数：导出单个分区
export_partition() {
    local table="$1"
    local backup_dir="$2"
    local part="$3"
    local part_type="$4"

    # 根据分区类型确定文件名
    if [ "$part_type" == "date" ]; then
        backup_file="${backup_dir}/${table}.${part}.Parquet"
    else
        backup_file="${backup_dir}/${table}.partition_$(echo "$part" | tr -dc 'a-zA-Z0-9').Parquet"
    fi

    log "  导出分区: ${part} 到 ${backup_file}"

    # 执行导出并记录日志
    ${CK_CLIEN} -q "SELECT *
                 FROM ${CK_DBNAME}.${table}
                 WHERE _partition_id = '${part}'
                 INTO OUTFILE '${backup_file}'
                 FORMAT Parquet" 2>&1 | tee -a "$LOG_FILE"

    # 检查导出结果
    if [ $? -eq 0 ] && [ -f "${backup_file}" ]; then
        file_size=$(du -h "${backup_file}" | cut -f1)
        log "  导出成功: ${backup_file} (大小: ${file_size})"
    else
        log "  错误: 分区导出失败: ${part}"
    fi
}

# 主程序开始
setup_logging
log "===== ClickHouse 数据备份脚本 ====="
log "数据库: ${CK_DBNAME}"
log "备份目录: ${BACKUP_DIR}"
log "日志文件: ${LOG_FILE}"

# 计算保留时间边界（Unix时间戳）
RETAIN_DATE=$(date -d "$EXPORT_DAYS days ago" +%s)
log "将保留最近 $EXPORT_DAYS 天的数据（包含日期: $(date -d @$RETAIN_DATE '+%Y-%m-%d')）"

# 确定要备份的表
declare -a tables_to_backup

if [ "$GD_TABLE" == "false" ]; then
    # 备份所有本地表
    log "模式: 备份所有本地表"
    all_tables=$(${CK_CLIEN} --query "SHOW TABLES LIKE 'local_%'")
    tables_to_backup=($all_tables)
else
    # 备份指定表
    log "模式: 备份指定表: $GD_TABLE"
    IFS=',' read -ra tables_to_backup <<< "$GD_TABLE"
fi

# 计算表数量
table_count=${#tables_to_backup[@]}
log "要导出 ${table_count} 个表:"
for table in "${tables_to_backup[@]}"; do
    log "  - $table"
done

# 记录开始时间
start_time=$(date +%s)

# 处理每张表
for table in "${tables_to_backup[@]}"; do
    # 检查表是否存在
    table_exists=$(${CK_CLIEN} -q "EXISTS TABLE ${CK_DBNAME}.${table}")
    if [ "$table_exists" -eq 0 ]; then
        log "警告: 表 ${CK_DBNAME}.${table} 不存在，跳过"
        continue
    fi

    # 创建表目录
    table_backup_dir="${BACKUP_DIR}/${CK_DBNAME}/${table}"
    safe_mkdir "$table_backup_dir"

    # 备份表数据
    backup_start=$(date +%s)
    backup_table_partitions "$table" "$table_backup_dir"
    backup_duration=$(($(date +%s) - backup_start))
    log "表 ${table} 处理完成，耗时: ${backup_duration}秒"
done

# 计算总耗时
end_time=$(date +%s)
total_duration=$(($end_time - $start_time))
log "备份完成! 所有本地表数据已导出到 ${BACKUP_DIR}"
log "总表数: ${table_count}"
log "总耗时: ${total_duration}秒"
log "详细日志已保存至: ${LOG_FILE}"