#!/bin/bash

# 颜色定义
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
NC='\033[0m'

# 日志函数
log_info() {
    echo -e "${GREEN}[INFO] $1${NC}"
}

log_warn() {
    echo -e "${YELLOW}[WARN] $1${NC}"
}

log_error() {
    echo -e "${RED}[ERROR] $1${NC}"
}

# 检查 Ceph 服务状态
check_ceph_status() {
    if ! ceph status &> /dev/null; then
        log_error "Ceph 服务未运行"
        exit 1
    fi
}

# 优化系统参数
optimize_system_params() {
    log_info "开始优化系统参数..."
    
    # 调整系统限制
    cat > /etc/security/limits.d/ceph.conf << EOF
* soft nofile 65535
* hard nofile 65535
* soft nproc 65535
* hard nproc 65535
EOF
    
    # 调整内核参数
    cat > /etc/sysctl.d/ceph.conf << EOF
# 网络优化
net.core.rmem_max = 16777216
net.core.wmem_max = 16777216
net.ipv4.tcp_rmem = 4096 87380 16777216
net.ipv4.tcp_wmem = 4096 87380 16777216
net.ipv4.tcp_mem = 8388608 8388608 16777216
net.ipv4.tcp_max_syn_backlog = 4096
net.ipv4.tcp_slow_start_after_idle = 0
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp_fin_timeout = 30

# 文件系统优化
fs.file-max = 65535
fs.aio-max-nr = 1048576

# 内存优化
vm.swappiness = 10
vm.dirty_ratio = 40
vm.dirty_background_ratio = 10
vm.dirty_expire_centisecs = 500
vm.dirty_writeback_centisecs = 100
EOF
    
    # 应用内核参数
    sysctl -p /etc/sysctl.d/ceph.conf
    
    log_info "系统参数优化完成"
}

# 优化 OSD 参数
optimize_osd_params() {
    log_info "开始优化 OSD 参数..."
    
    # 获取所有 OSD
    local osds=$(ceph osd ls)
    
    for osd in $osds; do
        log_info "优化 OSD.$osd 参数..."
        
        # 调整 OSD 缓存
        ceph config set osd.$osd bluestore_cache_size 4G
        ceph config set osd.$osd bluestore_cache_kv_ratio 0.4
        ceph config set osd.$osd bluestore_cache_meta_ratio 0.4
        ceph config set osd.$osd bluestore_cache_data_ratio 0.2
        
        # 调整 OSD 线程
        ceph config set osd.$osd osd_op_threads 8
        ceph config set osd.$osd osd_disk_threads 4
        
        # 调整 OSD 日志
        ceph config set osd.$osd osd_max_write_size 512
        ceph config set osd.$osd osd_client_message_size_cap 1G
        ceph config set osd.$osd osd_deep_scrub_stride 131072
        
        # 调整 OSD 缓存
        ceph config set osd.$osd osd_map_cache_size 1024
        ceph config set osd.$osd osd_map_cache_bl_size 128
        
        # 调整 OSD 合并
        ceph config set osd.$osd filestore_merge_threshold 40
        ceph config set osd.$osd filestore_split_multiple 8
        
        # 调整 OSD 事务
        ceph config set osd.$osd osd_target_transaction_size 30
    done
    
    log_info "OSD 参数优化完成"
}

# 优化 RBD 参数
optimize_rbd_params() {
    log_info "开始优化 RBD 参数..."
    
    # 调整 RBD 缓存
    ceph config set client rbd_cache true
    ceph config set client rbd_cache_size 1G
    ceph config set client rbd_cache_max_dirty 256M
    ceph config set client rbd_cache_target_dirty 192M
    ceph config set client rbd_cache_writethrough_until_flush true
    
    log_info "RBD 参数优化完成"
}

# 优化 CephFS 参数
optimize_cephfs_params() {
    log_info "开始优化 CephFS 参数..."
    
    # 调整 MDS 缓存
    ceph config set mds mds_cache_memory_limit 4G
    ceph config set mds mds_cache_reservation 0.05
    ceph config set mds mds_health_cache_threshold 1.5
    
    log_info "CephFS 参数优化完成"
}

# 优化 RGW 参数
optimize_rgw_params() {
    log_info "开始优化 RGW 参数..."
    
    # 调整 RGW 线程
    ceph config set client.rgw.rgw1 rgw_num_rados_handles 512
    ceph config set client.rgw.rgw1 rgw_thread_pool_size 512
    
    # 调整 RGW 块大小
    ceph config set client.rgw.rgw1 rgw_max_chunk_size 4M
    ceph config set client.rgw.rgw1 rgw_obj_stripe_size 4M
    
    log_info "RGW 参数优化完成"
}

# 优化网络参数
optimize_network_params() {
    log_info "开始优化网络参数..."
    
    # 调整网络超时
    ceph config set global ms_tcp_read_timeout 900
    ceph config set global ms_tcp_write_timeout 900
    
    # 调整网络缓冲区
    ceph config set global ms_dispatch_throttle_bytes 104857600
    
    log_info "网络参数优化完成"
}

# 优化 CRUSH map
optimize_crush_map() {
    log_info "开始优化 CRUSH map..."
    
    # 获取当前 CRUSH map
    ceph osd getcrushmap > /tmp/crush.map
    
    # 编译 CRUSH map
    crushtool -d /tmp/crush.map -o /tmp/crush.map.txt
    
    # 优化 CRUSH map
    sed -i 's/step take default/step take default\nstep chooseleaf firstn 0 type host\nstep emit/' /tmp/crush.map.txt
    
    # 应用优化后的 CRUSH map
    crushtool -c /tmp/crush.map.txt -o /tmp/crush.map.new
    ceph osd setcrushmap -i /tmp/crush.map.new
    
    # 清理临时文件
    rm -f /tmp/crush.map /tmp/crush.map.txt /tmp/crush.map.new
    
    log_info "CRUSH map 优化完成"
}

# 优化存储池参数
optimize_pool_params() {
    log_info "开始优化存储池参数..."
    
    # 获取所有存储池
    local pools=$(ceph osd pool ls)
    
    for pool in $pools; do
        log_info "优化存储池 $pool 参数..."
        
        # 调整 PG 数量
        ceph osd pool set $pool pg_num 128
        ceph osd pool set $pool pgp_num 128
        
        # 调整压缩
        ceph osd pool set $pool compression_algorithm snappy
        ceph osd pool set $pool compression_mode aggressive
        
        # 调整缓存
        ceph osd pool set $pool hit_set_type bloom
        ceph osd pool set $pool hit_set_count 4
        ceph osd pool set $pool hit_set_period 1200
        ceph osd pool set $pool hit_set_grade_decay_rate 20
        ceph osd pool set $pool hit_set_search_last_n 1
    done
    
    log_info "存储池参数优化完成"
}

# 主函数
main() {
    # 检查 Ceph 服务状态
    check_ceph_status
    
    # 执行优化
    optimize_system_params
    optimize_osd_params
    optimize_rbd_params
    optimize_cephfs_params
    optimize_rgw_params
    optimize_network_params
    optimize_crush_map
    optimize_pool_params
    
    log_info "Ceph 集群优化完成"
}

# 执行主函数
main "$@" 