#!/bin/bash

echo "=== 开始安装大数据组件 ==="

# 创建应用目录
sudo mkdir -p /opt/bigdata
cd /opt/bigdata

# 1. 安装Kafka
echo "安装Kafka..."
if [ ! -d "kafka_2.13-2.8.1" ]; then
    wget https://downloads.apache.org/kafka/2.8.1/kafka_2.13-2.8.1.tgz
    tar -xzf kafka_2.13-2.8.1.tgz
    sudo chown -R ubuntu:ubuntu kafka_2.13-2.8.1
fi

# 2. 安装Spark
echo "安装Spark..."
if [ ! -d "spark-3.2.1-bin-hadoop3.2" ]; then
    wget https://downloads.apache.org/spark/spark-3.2.1/spark-3.2.1-bin-hadoop3.2.tgz
    tar -xzf spark-3.2.1-bin-hadoop3.2.tgz
    sudo chown -R ubuntu:ubuntu spark-3.2.1-bin-hadoop3.2
fi

# 3. 设置环境变量
echo "配置环境变量..."
cat >> ~/.bashrc << 'EOF'
export KAFKA_HOME=/opt/bigdata/kafka_2.13-2.8.1
export SPARK_HOME=/opt/bigdata/spark-3.2.1-bin-hadoop3.2
export PATH=$PATH:$KAFKA_HOME/bin:$SPARK_HOME/bin
EOF

source ~/.bashrc

# 4. 启动Zookeeper
echo "启动Zookeeper..."
cd $KAFKA_HOME
nohup bin/zookeeper-server-start.sh config/zookeeper.properties > /tmp/zookeeper.log 2>&1 &
sleep 5

# 5. 启动Kafka
echo "启动Kafka..."
nohup bin/kafka-server-start.sh config/server.properties > /tmp/kafka.log 2>&1 &
sleep 10

# 6. 创建Topic
echo "创建Kafka Topic..."
bin/kafka-topics.sh --create --topic orders --bootstrap-server localhost:9092 --partitions 3 --replication-factor 1

# 7. 创建启动脚本
cat > /opt/bigdata/start_services.sh << 'EOF'
#!/bin/bash
export KAFKA_HOME=/opt/bigdata/kafka_2.13-2.8.1
export SPARK_HOME=/opt/bigdata/spark-3.2.1-bin-hadoop3.2

echo "启动Zookeeper..."
cd $KAFKA_HOME
nohup bin/zookeeper-server-start.sh config/zookeeper.properties > /tmp/zookeeper.log 2>&1 &

sleep 5
echo "启动Kafka..."
nohup bin/kafka-server-start.sh config/server.properties > /tmp/kafka.log 2>&1 &

sleep 10
echo "检查Topic..."
bin/kafka-topics.sh --list --bootstrap-server localhost:9092

echo "所有服务已启动！"
EOF

chmod +x /opt/bigdata/start_services.sh

echo "=== 大数据组件安装完成 ==="
echo "Kafka控制台: /opt/bigdata/kafka_2.13-2.8.1"
echo "Spark控制台: /opt/bigdata/spark-3.2.1-bin-hadoop3.2"
echo "启动脚本: /opt/bigdata/start_services.sh" 