#!/bin/bash

echo "🚀 启动完整大数据系统"

# 设置环境变量
export KAFKA_HOME=/opt/bigdata/kafka_2.13-2.8.0
export SPARK_HOME=/opt/bigdata/spark-3.1.2-bin-hadoop3.2
export PATH=$PATH:$KAFKA_HOME/bin:$SPARK_HOME/bin

# 杀死现有进程
echo "清理现有进程..."
pkill -f zookeeper
pkill -f kafka
sleep 3

# 启动Zookeeper
echo "1. 启动Zookeeper..."
cd $KAFKA_HOME
nohup bin/zookeeper-server-start.sh config/zookeeper.properties > /tmp/zookeeper.log 2>&1 &
sleep 10

# 启动Kafka
echo "2. 启动Kafka..."
nohup bin/kafka-server-start.sh config/server.properties > /tmp/kafka.log 2>&1 &
sleep 15

# 创建Topic
echo "3. 创建Kafka Topic..."
bin/kafka-topics.sh --create --topic orders --bootstrap-server localhost:9092 --partitions 3 --replication-factor 1 --if-not-exists

# 验证Topic
echo "4. 验证Topic创建..."
bin/kafka-topics.sh --list --bootstrap-server localhost:9092

# 启动数据生产者
echo "5. 启动实时数据生产者..."
cd ~
nohup java -cp "target/classes:$(mvn dependency:build-classpath -Dmdep.outputFile=/dev/stdout -q)" \
    com.orderstats.producer.RealTimeOrderProducer > /tmp/producer.log 2>&1 &

sleep 5

# 启动Spark消费者
echo "6. 启动Spark实时处理..."
nohup $SPARK_HOME/bin/spark-submit \
    --class com.orderstats.spark.RealTimeOrderConsumer \
    --master local[*] \
    --packages org.apache.spark:spark-sql-kafka-0-10_2.13:3.1.2 \
    target/classes > /tmp/spark_consumer.log 2>&1 &

sleep 10

echo "================================"
echo "✅ 大数据系统启动完成！"
echo ""
echo "📊 数据流: 生产者 → Kafka → Spark → 数据库 → Web界面"
echo "🔗 访问地址: http://106.75.5.247"
echo "👤 登录账号: admin / admin123"
echo ""
echo "📝 日志文件:"
echo "  - Zookeeper: tail -f /tmp/zookeeper.log"
echo "  - Kafka: tail -f /tmp/kafka.log"
echo "  - 数据生产者: tail -f /tmp/producer.log"
echo "  - Spark处理: tail -f /tmp/spark_consumer.log"
echo ""
echo "📈 查看进程状态:"
ps aux | grep -E 'kafka|zookeeper|java.*RealTime|spark-submit' | grep -v grep

echo ""
echo "🎯 实时数据监控:"
echo "tail -f /tmp/producer.log | head -10" 