#!/bin/bash

echo "🚀 启动完整的大数据订单统计系统"
echo "================================"

# 设置环境变量
export KAFKA_HOME=/opt/bigdata/kafka_2.13-2.8.1
export SPARK_HOME=/opt/bigdata/spark-3.2.1-bin-hadoop3.2
export PATH=$PATH:$KAFKA_HOME/bin:$SPARK_HOME/bin

# 创建日志目录
mkdir -p /tmp/bigdata_logs

echo "1. 启动Zookeeper..."
cd $KAFKA_HOME
nohup bin/zookeeper-server-start.sh config/zookeeper.properties > /tmp/bigdata_logs/zookeeper.log 2>&1 &
ZOOKEEPER_PID=$!
sleep 8

echo "2. 启动Kafka..."
nohup bin/kafka-server-start.sh config/server.properties > /tmp/bigdata_logs/kafka.log 2>&1 &
KAFKA_PID=$!
sleep 15

echo "3. 创建Kafka Topic..."
bin/kafka-topics.sh --create --topic orders --bootstrap-server localhost:9092 --partitions 3 --replication-factor 1 --if-not-exists

echo "4. 验证Topic创建..."
bin/kafka-topics.sh --list --bootstrap-server localhost:9092

echo "5. 编译Java项目..."
cd /var/lib/tomcat9/webapps/ROOT
if [ -f "pom.xml" ]; then
    mvn clean compile -DskipTests
else
    echo "在项目根目录编译..."
    cd ~
    mvn clean compile -DskipTests
fi

echo "6. 启动数据生产者..."
cd ~
nohup java -cp "target/classes:$(mvn dependency:build-classpath -Dmdep.outputFile=/dev/stdout -q)" \
    com.orderstats.producer.RealTimeOrderProducer > /tmp/bigdata_logs/producer.log 2>&1 &
PRODUCER_PID=$!

sleep 5

echo "7. 启动Spark消费者..."
nohup $SPARK_HOME/bin/spark-submit \
    --class com.orderstats.spark.RealTimeOrderConsumer \
    --master local[*] \
    --packages org.apache.spark:spark-sql-kafka-0-10_2.13:3.2.1 \
    target/classes > /tmp/bigdata_logs/spark.log 2>&1 &
SPARK_PID=$!

echo "8. 系统状态检查..."
sleep 10

echo "================================"
echo "✅ 大数据系统启动完成！"
echo "📊 数据流: Kafka Producer → Kafka → Spark → SQLite → Web界面"
echo ""
echo "🔗 访问地址: http://106.75.5.247"
echo "👤 登录账号: admin / admin123"
echo ""
echo "📈 进程状态:"
echo "  - Zookeeper PID: $ZOOKEEPER_PID"
echo "  - Kafka PID: $KAFKA_PID"  
echo "  - 数据生产者 PID: $PRODUCER_PID"
echo "  - Spark消费者 PID: $SPARK_PID"
echo ""
echo "📝 日志文件:"
echo "  - Zookeeper: /tmp/bigdata_logs/zookeeper.log"
echo "  - Kafka: /tmp/bigdata_logs/kafka.log"
echo "  - 数据生产者: /tmp/bigdata_logs/producer.log"
echo "  - Spark消费者: /tmp/bigdata_logs/spark.log"

# 创建停止脚本
cat > stop_system.sh << 'EOF'
#!/bin/bash
echo "🛑 停止大数据系统..."

# 停止Java进程
pkill -f "RealTimeOrderProducer"
pkill -f "RealTimeOrderConsumer"

# 停止Kafka和Zookeeper
export KAFKA_HOME=/opt/bigdata/kafka_2.13-2.8.1
cd $KAFKA_HOME
bin/kafka-server-stop.sh
bin/zookeeper-server-stop.sh

echo "系统已停止"
EOF

chmod +x stop_system.sh

echo "🎯 使用 './stop_system.sh' 停止系统"
echo "📊 实时监控订单数据: tail -f /tmp/bigdata_logs/producer.log"
echo ""

# 显示实时日志
echo "正在显示数据生产者日志 (Ctrl+C退出):"
tail -f /tmp/bigdata_logs/producer.log 