#!/usr/bin/env bash

###############################################################
# File Name: install_kafka_kraft.sh
# Version: V1.0
# Author: Brian Hsiung
# Created Time : 2025-01-21 9:37:53
# Description: Kafka部署脚本
##############################################################

# 调试跟踪（-e: 遇错退出， -u: 捕获未定义的变量， -x: 调试模式，在执行每个命令前，将其打印到标准错误输出）
# set -eux
# set -e

# Kafka用户
declare -r KAFKA_USER=kafka
# Kafka客户端端口
declare -r KAFKA_CLIENT_PORT=9092
# Kafka Controller端口
declare -r KAFKA_CONTROLLER_PORT=9093
# topic消息保留3天
declare -r LOG_KEEP_HOURS=72
# JVM堆内存默认1G
declare -r KAFKA_HEAP_OPTS=2G
# Kafka版本根据实际情况修改(2.13-3.7.0, 2.13-3.7.2, 2.13-3.8.1, 2.13-3.9.0)
declare -r KAFKA_VERSION=2.13-3.6.2
# Kafka安装目录
declare -r KAFKA_HOME=/data/kafka
# Kafka Topic目录
declare -r KAFKA_LOGDIR="$KAFKA_HOME"/kafka_logs
# JDK安装目录
declare -r JDK_HOME=/usr/local/java/jdk8
# 阿里云OSS存储地址
declare -r OSS_BASE_URL="https://brianhsiung.oss-cn-hangzhou.aliyuncs.com"
# 当前主机IP
HOST_IP=$(hostname -I|awk '{print $1}')
# Kafka集群ip列表，根据实际情况修改(3, 5, 7奇数节点)
KAFKA_IPS=(172.26.85.157 172.26.85.158 172.26.85.159)

WORK_DIR="$(cd "$(dirname "$0")" && pwd)"
cd "$WORK_DIR" || exit

### 日志函数
log_info()    { echo -e "\033[34m[INFO] $*\033[0m"; }
log_warn()    { echo -e "\033[33m[WARNING] $*\033[0m"; }
log_success() { echo -e "\033[32m[SUCCESS] $*\033[0m"; }
log_error()   { echo -e "\033[31m[ERROR] $*\033[0m"; exit 1; }

##############################################
# 检查依赖
##############################################
check_dependency() {

    local -a required_cmds=("wget" "tar" "groupadd" "useradd")
    local missing=()

    log_info "检查依赖命令: ${required_cmds[*]}"
    for cmd in "${required_cmds[@]}"; do
        if ! command -v "$cmd" &>/dev/null; then
            missing+=("$cmd")
        fi
    done

    ((${#missing[@]})) && log_error "缺失依赖命令: ${missing[*]}"
}

##############################################
# 检查端口
##############################################
check_ports() {

    local -a ports=("$KAFKA_CLIENT_PORT" "$KAFKA_CONTROLLER_PORT")
    local used_ports=()
    for port in "${ports[@]}"; do
        if ss -lntu | grep -q ":$port " &>/dev/null; then
            used_ports+=("$port")
        fi
    done

    ((${#used_ports[@]})) && log_error "端口 ${used_ports[*]} 被占用"

    if systemctl is-active firewalld &>/dev/null; then
        for port in "${ports[@]}"; do
            firewall-cmd --zone=public --add-port="$port"/tcp --permanent
        done
        firewall-cmd --reload
        log_info "防火墙已开启，开放端口 $KAFKA_CLIENT_PORT $KAFKA_CONTROLLER_PORT"
    fi

}

#############################################
# 设置目录权限（递归检查上级目录）
#############################################
set_parent_permissions() {
    ### 某些等保三服务器默认新建的的目录权限为700，文件权限为600，导致服务无法启动，所以需要遍历上级目录，将权限修改为755
    # 获取真实目录
    local target_dir
    target_dir=$(realpath "$1")
    # 获取上级目录
    local current_dir
    current_dir=$(dirname "$target_dir")

    # 做个判断，避免进入死循环
    [ -d "$target_dir" ] || log_error "目录不存在 [$target_dir]"

    # 排除敏感目录
    local -a exclude_dirs
    exclude_dirs=("/usr" "/usr/local" "/bin" "/sbin" "/lib" "/etc" "/dev" "/var" "/root" "/usr/share")

    while [[ "$current_dir" != "/" ]]; do

        # 排除敏感目录
        if printf '%s\n' "${exclude_dirs[@]}" | grep -qx "$current_dir"; then
            log_info "跳过系统保护目录: $current_dir"
            current_dir=$(dirname "$current_dir")
            continue
        fi

        # 跳过不存在的目录
        if [[ ! -d "$current_dir" ]]; then
            log_warn "警告: 目录不存在 [$current_dir]"
            current_dir=$(dirname "$current_dir")
            continue
        fi
        
        # 获取当前目录权限
        local perm_oct
        perm_oct=$(stat -c '%a' "$current_dir")
        log_info "目录 $current_dir 的原始权限为: $perm_oct"
        
        if [[ "$perm_oct" != "755" ]]; then
            log_info "正在设置权限: [755] -> $current_dir"
            if ! chmod 755 "$current_dir"; then
                log_error "错误: 无法设置目录权限 [$current_dir]"
            fi
        else
            log_success "权限正确 [755]: $current_dir"
        fi

        # 向上一级目录
        current_dir=$(dirname "$current_dir")
    done
}

##############################################
# 初始化环境
##############################################
init() {

    # 检查是否是root账号
    [[  -z "$HOST_IP" ]] && log_error "请设置主机IP"
    [[ "$(id -u)" -ne 0 ]] && log_error "请使用root账号执行此脚本"

    # 检查依赖
    check_dependency
    # 检查端口
    check_ports

    # 添加用户
    if ! getent group "$KAFKA_USER" &>/dev/null; then
        groupadd "$KAFKA_USER"
        log_info "添加用户组 $KAFKA_USER"
    fi

    if ! getent passwd "$KAFKA_USER" &>/dev/null; then
        useradd -g "$KAFKA_USER" "$KAFKA_USER"
        log_info "添加用户 $KAFKA_USER"
    fi

}

##############################################
# 安装jdk
##############################################
install_jdk() {

    log_info "开始安装JDK"

    local jdk_pkg
    local arch_type

    arch=$(uname -m)
    case "$arch" in
        aarch64|arm64)
            arch_type=aarch64 ;;
        amd64|x86_64)
            arch_type=x64 ;;
        *)
            log_error "不支持的架构: $arch" ;;
    esac

    jdk_pkg="OpenJDK8U-jdk_${arch_type}_linux_hotspot_8u442b06.tar.gz"

    if [ "$access_mode" = "external" ] && [ ! -f "$jdk_pkg" ] ; then
        log_info "通过阿里云OSS下载 $jdk_pkg"
        wget -q "$OSS_BASE_URL/deploy/jdk/$jdk_pkg" || log_error "下载JDK失败"
    fi

    [ -f "$jdk_pkg" ] || log_error "此目录下不存在 $jdk_pkg"

    if [ ! -d "$JDK_HOME" ]; then
        mkdir -p "$JDK_HOME"
        tar -zxf "$jdk_pkg" -C "$JDK_HOME" --strip-components=1 --no-same-owner
        find "$JDK_HOME/bin" -type f -exec chmod 755 {} \;
    elif [ ! "$(ls -A $JDK_HOME)" ]; then
        log_info "如果目录存在且为空"
        tar -zxf "$jdk_pkg" -C "$JDK_HOME" --strip-components=1 --no-same-owner
        find "$JDK_HOME/bin" -type f -exec chmod 755 {} \;
    fi

    if [ ! -f "$JDK_HOME/bin/java" ] || [ ! -x "$JDK_HOME/bin/java" ]; then
        log_error "Java可执行文件不存在或无执行权限，JDK安装失败"
    fi

    # 某些服务器上面默认新建的的目录权限为700，将目录权限修改为755
    log_info "设置目录权限"
    chmod 755 "$JDK_HOME"
    set_parent_permissions "$JDK_HOME"

    log_success "JDK安装成功"
    "$JDK_HOME/bin/java" -version
}
##############################################
# 安装kafka
##############################################
install_kafka() {

    log_info "开始安装Kafka"

    local kafka_pkg="kafka_$KAFKA_VERSION.tgz"
    
    # 如果可访问外网，则下载kafka
    if [ "$access_mode" = "external" ] && [ ! -f "$kafka_pkg" ]; then
        log_info "通过阿里云OSS下载 $kafka_pkg"
        wget -q "$OSS_BASE_URL/deploy/kafka/$kafka_pkg" || log_error "下载 $kafka_pkg 失败"
    fi

    # 校验kafka包是否存在
    [ -f "$kafka_pkg" ] || log_error "当前目录无 kafka_$KAFKA_VERSION.tgz"

    # 创建目录
    if [ ! -d "$KAFKA_HOME" ] ; then
        log_info "创建目录 $KAFKA_HOME"
        mkdir -p "$KAFKA_HOME"
    fi
    if [ ! -d "$KAFKA_LOGDIR" ]; then
        log_info "创建目录 $KAFKA_LOGDIR"
        mkdir -p "$KAFKA_LOGDIR"
    elif [ "$(ls -A "$KAFKA_LOGDIR")" ]; then
        log_error "Kafka目录 $KAFKA_LOGDIR 已经存在且不为空"
    fi

    # 将上级目录的权限设置为755
    set_parent_permissions "$KAFKA_HOME"

    log_info "解压 $kafka_pkg 到 $KAFKA_HOME"
    tar -zxf "$kafka_pkg" -C "$KAFKA_HOME" --no-same-owner || log_error "解压 $kafka_pkg 失败"
    cd "$KAFKA_HOME" || exit

    if [ -L "kafka" ]; then
        log_warn "已经存在软链接 kafka, 更新软连接..."
        ln -sfn kafka_"$KAFKA_VERSION" kafka || log_error "软链接更新失败"
    elif [ -e "kafka" ]; then
        log_error "当前目录下存在名为 kafka 的文件或目录"
    else
        log_info "创建软链接 kafka"
        ln -s kafka_"$KAFKA_VERSION" kafka || log_error "软链接创建失败"
    fi

    # 创建关联数组建立IP与索引的映射
    declare -A IP_INDEX_MAP
    for i in "${!KAFKA_IPS[@]}"; do
        IP_INDEX_MAP["${KAFKA_IPS[i]}"]=$((i + 1))  # 直接存储计算后的NODE_ID, IP为Key, NODE_ID为Value
    done

    if [[ -n "${IP_INDEX_MAP[$HOST_IP]}" ]]; then
        NODE_ID=${IP_INDEX_MAP[$HOST_IP]}
        log_info "设置node.id，当前节点node.id的值为: $NODE_ID"
    else
        log_error "错误：当前主机IP $HOST_IP 不在KAFKA_IPS列表中"
    fi

    [ -z "$NODE_ID" ] && log_error "未设置node.id"

    # 构建controller.quorum.voters字符串
    CONTROLLER_QUORUM_VOTERS=""
    for (( i=0; i < "${#KAFKA_IPS[@]}"; i++ )); do
        voter_id=$((i + 1))
        ip="${KAFKA_IPS[i]}"
        CONTROLLER_QUORUM_VOTERS+="$voter_id@$ip:$KAFKA_CONTROLLER_PORT,"
    done
    # 去掉最后一个多余的逗号
    CONTROLLER_QUORUM_VOTERS=${CONTROLLER_QUORUM_VOTERS%,}

    # controller.quorum.voters=1@${KAFKA_IPS[0]}:$KAFKA_CONTROLLER_PORT,2@${KAFKA_IPS[1]}:$KAFKA_CONTROLLER_PORT,3@${KAFKA_IPS[2]}:$KAFKA_CONTROLLER_PORT
# 添加配置文件
cat > "$KAFKA_HOME"/kafka/config/kraft/server.properties << EOF
process.roles=broker,controller
node.id=$NODE_ID
controller.quorum.voters=$CONTROLLER_QUORUM_VOTERS
listeners=PLAINTEXT://$HOST_IP:$KAFKA_CLIENT_PORT,CONTROLLER://$HOST_IP:$KAFKA_CONTROLLER_PORT
inter.broker.listener.name=PLAINTEXT
advertised.listeners=PLAINTEXT://$HOST_IP:$KAFKA_CLIENT_PORT
controller.listener.names=CONTROLLER
listener.security.protocol.map=CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL
num.network.threads=3
num.io.threads=8
socket.send.buffer.bytes=102400
socket.receive.buffer.bytes=102400
socket.request.max.bytes=104857600
log.dirs=$KAFKA_LOGDIR
num.partitions=6
default.replication.factor=3
num.recovery.threads.per.data.dir=1
offsets.topic.replication.factor=3
transaction.state.log.replication.factor=3
transaction.state.log.min.isr=2
log.retention.hours=$LOG_KEEP_HOURS
log.segment.bytes=1073741824
log.retention.check.interval.ms=300000
leader.imbalance.per.broker.percentage=10
leader.imbalance.check.interval.seconds=300
EOF

    # 设置JVM
    sed -i "s/KAFKA_HEAP_OPTS=.*/KAFKA_HEAP_OPTS=\"-Xmx$KAFKA_HEAP_OPTS -Xms$KAFKA_HEAP_OPTS\"/g" "$KAFKA_HOME"/kafka/bin/kafka-server-start.sh
    # 设置JDK路径
    if ! grep 'JAVA_HOME=/usr/local/java/jdk8' "$KAFKA_HOME"/kafka/bin/kafka-run-class.sh >/dev/null 2>&1; then
        log_info "在 $KAFKA_HOME/kafka/bin/kafka-run-class.sh 中设置JDK路径"
        sed -i "16i JAVA_HOME=/usr/local/java/jdk8" "$KAFKA_HOME"/kafka/bin/kafka-run-class.sh
    fi
    # 设置权限
    chown "$KAFKA_USER":"$KAFKA_USER" -R "$KAFKA_HOME"
}

# delete_remote_file() {
#     local ip=$1
#     log_info "正在检查并删除远程服务器 ($ip) 上的文件 $WORK_DIR/uuid"
#     # shellcheck disable=SC2029
#     if ssh root@"$ip" "if [ -f '$WORK_DIR/uuid' ]; then rm -f '$WORK_DIR/uuid'; fi" 2> /dev/null; then
#         log_success "成功删除远程服务器 ($ip) 上的文件 $WORK_DIR/uuid"
#     else
#         log_error "删除远程服务器 ($ip) 上的文件 $WORK_DIR/uuid 失败"
#     fi
# }

random_uuid() {

    log_info "将在第一台服务器 ${KAFKA_IPS[0]} 生成uuid"
    if [ "${KAFKA_IPS[0]}" = "$HOST_IP" ]; then
        "$KAFKA_HOME"/kafka/bin/kafka-storage.sh random-uuid > "$WORK_DIR"/uuid
        for (( i=1; i < ${#KAFKA_IPS[@]}; i++ )); do
            # 删除远程服务器上的uuid文件（避免当安装失败并重新安装时存在旧版uuid文件导致集群关联失败）
            #delete_remote_file "${KAFKA_IPS[i]}"
            log_info "拷贝 uuid 至 ${KAFKA_IPS[i]}:$WORK_DIR 目录"
            scp "$WORK_DIR"/uuid root@"${KAFKA_IPS[i]}":"$WORK_DIR"
        done
    fi

    while :
    do
        echo "等待生成统一uuid..."
        sleep 5
        if [ -f "$WORK_DIR"/uuid ]; then
            KAFKA_UUID=$(cat "$WORK_DIR"/uuid)
            log_info "生成的uuid为: $KAFKA_UUID"
            #su - "$KAFKA_USER" -c "$KAFKA_HOME/bin/kafka-storage.sh format -t $KAFKA_UUID -c $KAFKA_HOME/config/kraft/server.properties"
            "$KAFKA_HOME"/kafka/bin/kafka-storage.sh format -t "$KAFKA_UUID" -c "$KAFKA_HOME"/kafka/config/kraft/server.properties
            break
        fi
    done

    chown "$KAFKA_USER":"$KAFKA_USER" -R "$KAFKA_HOME"
}

install_service() {
cat > /etc/rc.d/init.d/kafka << EOF
#!/bin/bash
#chkconfig:2345 25 75
#description:kafka
#processname:kafka

KAFKA_HOME="$KAFKA_HOME"

case \$1 in
    start)
        su - "$KAFKA_USER" -c "\$KAFKA_HOME/kafka/bin/kafka-server-start.sh -daemon \$KAFKA_HOME/kafka/config/kraft/server.properties"
        ;;
    stop)
        su - "$KAFKA_USER" -c "\$KAFKA_HOME/kafka/bin/kafka-server-stop.sh"
        ;;
    status)
        jps
        ;;
    restart)
        \$0 stop
        \$0 start
        ;;
    *)
        echo "require start|stop|status|restart"
        ;;
esac
EOF

    chmod +x /etc/rc.d/init.d/kafka
    chkconfig --add kafka
    chkconfig kafka on
    log_info "启动kafka服务"
    service kafka start
}

main() {

    if curl -sIfo /dev/null --max-time 5 --connect-timeout 10 "$OSS_BASE_URL"/health; then
        access_mode="external"
    fi
    log_info "开始安装Kafka集群 (版本: ${KAFKA_VERSION})"
    init
    if [ -d "$JDK_HOME" ] && [ -f "$JDK_HOME/bin/java" ]; then
        log_info "JDK已安装，跳过安装步骤, 版本如下："
        "$JDK_HOME/bin/java" -version
    else
        install_jdk
    fi
    install_kafka
    random_uuid
    install_service
    log_success "Kafka集群安装完成"
}

main


# [Unit]
# Description=Apache Kafka Server
# Documentation=http://kafka.apache.org/documentation.html
# After=network.target remote-fs.target

# [Service]
# Type=simple
# User=kafka_user  # 替换为实际的 Kafka 用户名
# ExecStart=/path/to/kafka/bin/kafka-server-start.sh /path/to/kafka/config/kraft/server.properties
# ExecStop=/path/to/kafka/bin/kafka-server-stop.sh
# Restart=on-abnormal
# SuccessExitStatus=143

# [Install]
# WantedBy=multi-user.target