#!/bin/bash

# 颜色定义
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
NC='\033[0m'

# 日志函数
log_info() {
    echo -e "${GREEN}[INFO] $1${NC}"
}

log_warn() {
    echo -e "${YELLOW}[WARN] $1${NC}"
}

log_error() {
    echo -e "${RED}[ERROR] $1${NC}"
}

# 检查必要命令
check_command() {
    if ! command -v $1 &> /dev/null; then
        log_error "未找到命令: $1"
        exit 1
    fi
}

# 检查 Ceph 服务状态
check_ceph_status() {
    if ! ceph status &> /dev/null; then
        log_error "Ceph 服务未运行"
        exit 1
    fi
}

# 初始化 Ceph 集群
init_cluster() {
    local bootstrap_node=$1
    local mon_ip=$2
    local public_network=$3
    local cluster_network=$4

    log_info "开始初始化 Ceph 集群..."

    # 安装 cephadm
    curl --silent --remote-name --location https://github.com/ceph/ceph/raw/quincy/src/cephadm/cephadm
    chmod +x cephadm
    ./cephadm add-repo --release quincy
    ./cephadm install

    # 引导集群
    cephadm bootstrap --mon-ip $mon_ip \
        --initial-dashboard-user admin \
        --initial-dashboard-password admin \
        --allow-fqdn-hostname \
        --registry-url registry.cn-hangzhou.aliyuncs.com \
        --registry-username your-username \
        --registry-password your-password

    # 配置网络
    ceph config set mon public_network $public_network
    ceph config set mon cluster_network $cluster_network

    # 启用 dashboard
    ceph dashboard enable-ssl
    ceph dashboard create-self-signed-cert

    log_info "Ceph 集群初始化完成"
}

# 添加新节点
add_node() {
    local node_name=$1
    local node_ip=$2
    local roles=$3

    log_info "开始添加节点: $node_name ($node_ip)"

    # 检查节点连通性
    if ! ssh -o BatchMode=yes -o ConnectTimeout=5 $node_ip echo &> /dev/null; then
        log_error "无法连接到节点: $node_ip"
        exit 1
    fi

    # 复制 SSH 密钥
    ssh-copy-id -f -i /etc/ceph/ceph.pub root@$node_ip

    # 添加节点到集群
    ceph orch host add $node_name $node_ip --labels $roles

    # 部署服务
    if [[ $roles == *"mon"* ]]; then
        ceph orch apply mon --placement="$node_name"
    fi
    if [[ $roles == *"mgr"* ]]; then
        ceph orch apply mgr --placement="$node_name"
    fi
    if [[ $roles == *"osd"* ]]; then
        ceph orch apply osd --all-available-devices --host=$node_name
    fi
    if [[ $roles == *"rgw"* ]]; then
        ceph orch apply rgw rgw --placement="$node_name"
    fi
    if [[ $roles == *"mds"* ]]; then
        ceph orch apply mds cephfs --placement="$node_name"
    fi

    log_info "节点 $node_name 添加完成"
}

# 移除节点
remove_node() {
    local node_name=$1

    log_info "开始移除节点: $node_name"

    # 检查节点是否存在
    if ! ceph orch host ls | grep -q $node_name; then
        log_error "节点 $node_name 不在集群中"
        exit 1
    fi

    # 移除 OSD
    for osd in $(ceph osd ls | grep $node_name); do
        ceph osd out $osd
        ceph osd purge $osd --yes-i-really-mean-it
    done

    # 移除其他服务
    ceph orch host rm $node_name --force

    log_info "节点 $node_name 移除完成"
}

# 添加新磁盘
add_disk() {
    local node_name=$1
    local disk_path=$2

    log_info "开始为节点 $node_name 添加磁盘: $disk_path"

    # 检查节点是否存在
    if ! ceph orch host ls | grep -q $node_name; then
        log_error "节点 $node_name 不在集群中"
        exit 1
    fi

    # 创建 OSD
    ceph orch daemon add osd $node_name:$disk_path

    # 验证 OSD 创建
    sleep 10
    if ceph osd tree | grep -q $disk_path; then
        log_info "磁盘 $disk_path 已成功添加到节点 $node_name"
    else
        log_error "磁盘添加失败"
        exit 1
    fi
}

# 处理故障磁盘
handle_failed_disk() {
    local node_name=$1
    local disk_path=$2

    log_info "开始处理节点 $node_name 的故障磁盘: $disk_path"

    # 检查节点是否存在
    if ! ceph orch host ls | grep -q $node_name; then
        log_error "节点 $node_name 不在集群中"
        exit 1
    fi

    # 获取 OSD ID
    local osd_id=$(ceph osd tree | grep $disk_path | awk '{print $1}')

    if [ -z "$osd_id" ]; then
        log_error "未找到对应的 OSD"
        exit 1
    fi

    # 标记 OSD 为 out
    ceph osd out $osd_id

    # 等待数据迁移完成
    while ceph osd getcrushmap | grep -q $osd_id; do
        sleep 10
    done

    # 移除 OSD
    ceph osd purge $osd_id --yes-i-really-mean-it

    # 从 CRUSH map 中移除
    ceph osd crush remove osd.$osd_id

    # 删除 OSD 认证
    ceph auth del osd.$osd_id

    log_info "故障磁盘 $disk_path 处理完成"
}

# 优化集群配置
optimize_cluster() {
    log_info "开始优化集群配置..."

    # 全局优化
    ceph config set global osd_max_write_size 512
    ceph config set global osd_client_message_size_cap 1G
    ceph config set global osd_deep_scrub_stride 131072
    ceph config set global osd_op_threads 8
    ceph config set global osd_disk_threads 4
    ceph config set global osd_map_cache_size 1024
    ceph config set global osd_map_cache_bl_size 128
    ceph config set global filestore_merge_threshold 40
    ceph config set global filestore_split_multiple 8
    ceph config set global osd_target_transaction_size 30
    ceph config set global osd_max_write_size 512
    ceph config set global osd_client_message_size_cap 1G

    # RBD 优化
    ceph config set client rbd_cache true
    ceph config set client rbd_cache_size 1G
    ceph config set client rbd_cache_max_dirty 256M
    ceph config set client rbd_cache_target_dirty 192M
    ceph config set client rbd_cache_writethrough_until_flush true

    # CephFS 优化
    ceph config set mds mds_cache_memory_limit 4G
    ceph config set mds mds_cache_reservation 0.05
    ceph config set mds mds_health_cache_threshold 1.5

    # RGW 优化
    ceph config set client.rgw.rgw1 rgw_num_rados_handles 512
    ceph config set client.rgw.rgw1 rgw_thread_pool_size 512
    ceph config set client.rgw.rgw1 rgw_max_chunk_size 4M
    ceph config set client.rgw.rgw1 rgw_obj_stripe_size 4M

    log_info "集群优化完成"
}

# 主函数
main() {
    # 检查必要命令
    check_command ceph
    check_command cephadm
    check_command ssh
    check_command systemctl

    case "$1" in
        "init")
            if [ $# -ne 5 ]; then
                log_error "用法: $0 init <bootstrap_node> <mon_ip> <public_network> <cluster_network>"
                exit 1
            fi
            init_cluster "$2" "$3" "$4" "$5"
            ;;
        "add-node")
            if [ $# -ne 4 ]; then
                log_error "用法: $0 add-node <node_name> <node_ip> <roles>"
                exit 1
            fi
            add_node "$2" "$3" "$4"
            ;;
        "remove-node")
            if [ $# -ne 2 ]; then
                log_error "用法: $0 remove-node <node_name>"
                exit 1
            fi
            remove_node "$2"
            ;;
        "add-disk")
            if [ $# -ne 3 ]; then
                log_error "用法: $0 add-disk <node_name> <disk_path>"
                exit 1
            fi
            add_disk "$2" "$3"
            ;;
        "handle-failed-disk")
            if [ $# -ne 3 ]; then
                log_error "用法: $0 handle-failed-disk <node_name> <disk_path>"
                exit 1
            fi
            handle_failed_disk "$2" "$3"
            ;;
        "optimize")
            optimize_cluster
            ;;
        *)
            log_error "用法: $0 {init|add-node|remove-node|add-disk|handle-failed-disk|optimize}"
            exit 1
            ;;
    esac
}

# 执行主函数
main "$@" 