#!/bin/bash

# 颜色定义
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
NC='\033[0m'

# 日志函数
log_info() {
    echo -e "${GREEN}[INFO] $1${NC}"
}

log_warn() {
    echo -e "${YELLOW}[WARN] $1${NC}"
}

log_error() {
    echo -e "${RED}[ERROR] $1${NC}"
}

# 检查 Ceph 服务状态
check_ceph_status() {
    if ! ceph status &> /dev/null; then
        log_error "Ceph 服务未运行"
        exit 1
    fi
}

# 创建备份目录
create_backup_dir() {
    local backup_dir="/var/lib/ceph/backup/$(date +%Y%m%d_%H%M%S)"
    mkdir -p "$backup_dir"
    echo "$backup_dir"
}

# 备份集群配置
backup_cluster_config() {
    local backup_dir="$1"
    log_info "开始备份集群配置..."
    
    # 备份 ceph.conf
    cp /etc/ceph/ceph.conf "$backup_dir/ceph.conf"
    
    # 备份密钥环
    cp /etc/ceph/ceph.client.admin.keyring "$backup_dir/ceph.client.admin.keyring"
    
    # 备份 CRUSH map
    ceph osd getcrushmap > "$backup_dir/crush.map"
    
    # 备份监控节点配置
    ceph mon dump > "$backup_dir/mon.dump"
    
    # 备份 OSD 配置
    ceph osd dump > "$backup_dir/osd.dump"
    
    # 备份 MDS 配置
    ceph mds dump > "$backup_dir/mds.dump"
    
    # 备份 RGW 配置
    ceph osd pool get .rgw.root all > "$backup_dir/rgw.config"
    
    log_info "集群配置备份完成"
}

# 备份存储池数据
backup_pool_data() {
    local backup_dir="$1"
    log_info "开始备份存储池数据..."
    
    # 获取所有存储池
    local pools=$(ceph osd pool ls)
    
    for pool in $pools; do
        log_info "备份存储池: $pool"
        
        # 创建存储池备份目录
        mkdir -p "$backup_dir/pools/$pool"
        
        # 备份存储池配置
        ceph osd pool get $pool all > "$backup_dir/pools/$pool/config"
        
        # 如果是 RBD 存储池，备份镜像
        if [[ $pool == "rbd" ]]; then
            local images=$(rbd ls $pool)
            for image in $images; do
                log_info "备份镜像: $image"
                rbd export $pool/$image "$backup_dir/pools/$pool/$image"
            done
        fi
    done
    
    log_info "存储池数据备份完成"
}

# 备份 CephFS 数据
backup_cephfs_data() {
    local backup_dir="$1"
    log_info "开始备份 CephFS 数据..."
    
    # 检查 CephFS 是否存在
    if ceph fs ls | grep -q "cephfs"; then
        # 创建 CephFS 备份目录
        mkdir -p "$backup_dir/cephfs"
        
        # 挂载 CephFS
        local mount_point="/mnt/cephfs_backup"
        mkdir -p "$mount_point"
        mount -t ceph ceph-mds1:6789:/ "$mount_point"
        
        # 备份数据
        rsync -av "$mount_point/" "$backup_dir/cephfs/"
        
        # 卸载 CephFS
        umount "$mount_point"
        rmdir "$mount_point"
        
        log_info "CephFS 数据备份完成"
    else
        log_warn "未找到 CephFS，跳过备份"
    fi
}

# 备份 RGW 数据
backup_rgw_data() {
    local backup_dir="$1"
    log_info "开始备份 RGW 数据..."
    
    # 检查 RGW 是否存在
    if ceph -s | grep -q "rgw"; then
        # 创建 RGW 备份目录
        mkdir -p "$backup_dir/rgw"
        
        # 备份用户数据
        radosgw-admin user list > "$backup_dir/rgw/users.json"
        
        # 备份存储桶数据
        radosgw-admin bucket list > "$backup_dir/rgw/buckets.json"
        
        log_info "RGW 数据备份完成"
    else
        log_warn "未找到 RGW，跳过备份"
    fi
}

# 压缩备份数据
compress_backup() {
    local backup_dir="$1"
    log_info "开始压缩备份数据..."
    
    # 创建压缩文件
    tar -czf "$backup_dir.tar.gz" -C "$(dirname "$backup_dir")" "$(basename "$backup_dir")"
    
    # 删除原始备份目录
    rm -rf "$backup_dir"
    
    log_info "备份数据压缩完成: $backup_dir.tar.gz"
}

# 恢复集群配置
restore_cluster_config() {
    local backup_file="$1"
    local restore_dir="/tmp/ceph_restore_$(date +%Y%m%d_%H%M%S)"
    
    log_info "开始恢复集群配置..."
    
    # 解压备份文件
    mkdir -p "$restore_dir"
    tar -xzf "$backup_file" -C "$restore_dir"
    
    # 恢复 ceph.conf
    cp "$restore_dir/ceph.conf" /etc/ceph/
    
    # 恢复密钥环
    cp "$restore_dir/ceph.client.admin.keyring" /etc/ceph/
    
    # 恢复 CRUSH map
    crushtool -d "$restore_dir/crush.map" | ceph osd setcrushmap -i -
    
    # 恢复监控节点配置
    ceph mon restore "$restore_dir/mon.dump"
    
    # 恢复 OSD 配置
    ceph osd restore "$restore_dir/osd.dump"
    
    # 恢复 MDS 配置
    ceph mds restore "$restore_dir/mds.dump"
    
    # 恢复 RGW 配置
    ceph osd pool set .rgw.root all "$(cat "$restore_dir/rgw.config")"
    
    # 清理临时目录
    rm -rf "$restore_dir"
    
    log_info "集群配置恢复完成"
}

# 恢复存储池数据
restore_pool_data() {
    local backup_file="$1"
    local restore_dir="/tmp/ceph_restore_$(date +%Y%m%d_%H%M%S)"
    
    log_info "开始恢复存储池数据..."
    
    # 解压备份文件
    mkdir -p "$restore_dir"
    tar -xzf "$backup_file" -C "$restore_dir"
    
    # 恢复存储池配置
    for pool_config in "$restore_dir/pools"/*/config; do
        if [ -f "$pool_config" ]; then
            local pool=$(basename "$(dirname "$pool_config")")
            log_info "恢复存储池: $pool"
            
            # 创建存储池（如果不存在）
            if ! ceph osd pool ls | grep -q "$pool"; then
                ceph osd pool create "$pool" 128 128
            fi
            
            # 恢复存储池配置
            while read -r line; do
                if [[ $line =~ ^[0-9]+ ]]; then
                    ceph osd pool set "$pool" "$line"
                fi
            done < "$pool_config"
            
            # 如果是 RBD 存储池，恢复镜像
            if [[ $pool == "rbd" ]]; then
                for image in "$restore_dir/pools/$pool"/*; do
                    if [ -f "$image" ]; then
                        local image_name=$(basename "$image")
                        log_info "恢复镜像: $image_name"
                        rbd import "$image" "$pool/$image_name"
                    fi
                done
            fi
        fi
    done
    
    # 清理临时目录
    rm -rf "$restore_dir"
    
    log_info "存储池数据恢复完成"
}

# 恢复 CephFS 数据
restore_cephfs_data() {
    local backup_file="$1"
    local restore_dir="/tmp/ceph_restore_$(date +%Y%m%d_%H%M%S)"
    
    log_info "开始恢复 CephFS 数据..."
    
    # 解压备份文件
    mkdir -p "$restore_dir"
    tar -xzf "$backup_file" -C "$restore_dir"
    
    # 检查 CephFS 是否存在
    if ceph fs ls | grep -q "cephfs"; then
        # 挂载 CephFS
        local mount_point="/mnt/cephfs_restore"
        mkdir -p "$mount_point"
        mount -t ceph ceph-mds1:6789:/ "$mount_point"
        
        # 恢复数据
        rsync -av "$restore_dir/cephfs/" "$mount_point/"
        
        # 卸载 CephFS
        umount "$mount_point"
        rmdir "$mount_point"
        
        log_info "CephFS 数据恢复完成"
    else
        log_warn "未找到 CephFS，跳过恢复"
    fi
    
    # 清理临时目录
    rm -rf "$restore_dir"
}

# 恢复 RGW 数据
restore_rgw_data() {
    local backup_file="$1"
    local restore_dir="/tmp/ceph_restore_$(date +%Y%m%d_%H%M%S)"
    
    log_info "开始恢复 RGW 数据..."
    
    # 解压备份文件
    mkdir -p "$restore_dir"
    tar -xzf "$backup_file" -C "$restore_dir"
    
    # 检查 RGW 是否存在
    if ceph -s | grep -q "rgw"; then
        # 恢复用户数据
        while read -r user; do
            radosgw-admin user create --uid="$user" --display-name="$user"
        done < "$restore_dir/rgw/users.json"
        
        # 恢复存储桶数据
        while read -r bucket; do
            radosgw-admin bucket create --bucket="$bucket"
        done < "$restore_dir/rgw/buckets.json"
        
        log_info "RGW 数据恢复完成"
    else
        log_warn "未找到 RGW，跳过恢复"
    fi
    
    # 清理临时目录
    rm -rf "$restore_dir"
}

# 主函数
main() {
    # 检查 Ceph 服务状态
    check_ceph_status
    
    case "$1" in
        "backup")
            # 创建备份目录
            local backup_dir=$(create_backup_dir)
            
            # 执行备份
            backup_cluster_config "$backup_dir"
            backup_pool_data "$backup_dir"
            backup_cephfs_data "$backup_dir"
            backup_rgw_data "$backup_dir"
            
            # 压缩备份数据
            compress_backup "$backup_dir"
            ;;
        "restore")
            if [ -z "$2" ]; then
                log_error "请指定备份文件路径"
                exit 1
            fi
            
            # 执行恢复
            restore_cluster_config "$2"
            restore_pool_data "$2"
            restore_cephfs_data "$2"
            restore_rgw_data "$2"
            ;;
        *)
            log_error "用法: $0 {backup|restore <backup_file>}"
            exit 1
            ;;
    esac
}

# 执行主函数
main "$@" 