#!/bin/bash

# 订单信息统计系统云部署脚本
# 支持部署到UCloud、阿里云、腾讯云等主流云服务器

echo "=== 订单信息统计系统云部署脚本 ==="

# 检查是否为root用户
if [ "$EUID" -ne 0 ]; then
    echo "请使用sudo运行此脚本"
    exit 1
fi

# 设置变量
SERVER_IP="106.75.5.247"
APP_NAME="OrderStatsSystem"
APP_PORT="8080"
KAFKA_PORT="9092"
ZOOKEEPER_PORT="2181"

echo "目标服务器: $SERVER_IP"
echo "应用名称: $APP_NAME"
echo "应用端口: $APP_PORT"

# 更新系统
echo "1. 更新系统软件包..."
apt-get update && apt-get upgrade -y

# 安装Java 8
echo "2. 安装Java 8..."
apt-get install -y openjdk-8-jdk
export JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64
echo 'export JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64' >> ~/.bashrc

# 安装Maven
echo "3. 安装Maven..."
apt-get install -y maven

# 安装Tomcat 9
echo "4. 安装Tomcat 9..."
apt-get install -y tomcat9 tomcat9-admin

# 启动并启用Tomcat服务
systemctl start tomcat9
systemctl enable tomcat9

# 安装Kafka和Zookeeper
echo "5. 安装Kafka和Zookeeper..."
cd /opt

# 下载Kafka
wget https://archive.apache.org/dist/kafka/2.5.0/kafka_2.13-2.5.0.tgz
tar -xzf kafka_2.13-2.5.0.tgz
mv kafka_2.13-2.5.0 kafka
chown -R ubuntu:ubuntu kafka

# 创建Kafka服务配置
cat > /etc/systemd/system/zookeeper.service << 'EOF'
[Unit]
Description=Apache Zookeeper server
Documentation=http://zookeeper.apache.org
Requires=network.target remote-fs.target
After=network.target remote-fs.target

[Service]
Type=forking
User=ubuntu
Group=ubuntu
Environment=JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64
ExecStart=/opt/kafka/bin/zookeeper-server-start.sh -daemon /opt/kafka/config/zookeeper.properties
ExecStop=/opt/kafka/bin/zookeeper-server-stop.sh
Restart=on-abnormal

[Install]
WantedBy=multi-user.target
EOF

cat > /etc/systemd/system/kafka.service << 'EOF'
[Unit]
Description=Apache Kafka Server
Documentation=http://kafka.apache.org/documentation.html
Requires=zookeeper.service

[Service]
Type=forking
User=ubuntu
Group=ubuntu
Environment=JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64
ExecStart=/opt/kafka/bin/kafka-server-start.sh -daemon /opt/kafka/config/server.properties
ExecStop=/opt/kafka/bin/kafka-server-stop.sh
Restart=on-abnormal

[Install]
WantedBy=multi-user.target
EOF

# 重新加载systemd并启动服务
systemctl daemon-reload
systemctl start zookeeper
systemctl enable zookeeper
sleep 10
systemctl start kafka
systemctl enable kafka

# 创建Kafka主题
echo "6. 创建Kafka主题..."
sleep 15
su - ubuntu -c "/opt/kafka/bin/kafka-topics.sh --create --topic orders --bootstrap-server localhost:9092 --partitions 3 --replication-factor 1"
su - ubuntu -c "/opt/kafka/bin/kafka-topics.sh --create --topic order_valid_invalid_count --bootstrap-server localhost:9092 --partitions 1 --replication-factor 1"
su - ubuntu -c "/opt/kafka/bin/kafka-topics.sh --create --topic product_order_stats --bootstrap-server localhost:9092 --partitions 1 --replication-factor 1"
su - ubuntu -c "/opt/kafka/bin/kafka-topics.sh --create --topic category_order_counts --bootstrap-server localhost:9092 --partitions 1 --replication-factor 1"
su - ubuntu -c "/opt/kafka/bin/kafka-topics.sh --create --topic category_valid_invalid_counts --bootstrap-server localhost:9092 --partitions 1 --replication-factor 1"
su - ubuntu -c "/opt/kafka/bin/kafka-topics.sh --create --topic category_product_order_stats --bootstrap-server localhost:9092 --partitions 1 --replication-factor 1"

# 安装Spark
echo "7. 安装Apache Spark..."
cd /opt
wget https://archive.apache.org/dist/spark/spark-3.2.1/spark-3.2.1-bin-hadoop3.2.tgz
tar -xzf spark-3.2.1-bin-hadoop3.2.tgz
mv spark-3.2.1-bin-hadoop3.2 spark
chown -R ubuntu:ubuntu spark

# 设置Spark环境变量
echo 'export SPARK_HOME=/opt/spark' >> ~/.bashrc
echo 'export PATH=$PATH:$SPARK_HOME/bin:$SPARK_HOME/sbin' >> ~/.bashrc
source ~/.bashrc

# 配置防火墙
echo "8. 配置防火墙..."
ufw allow 22
ufw allow 8080
ufw allow 9092
ufw allow 2181
ufw allow 4040
ufw --force enable

# 创建应用目录
echo "9. 创建应用目录..."
mkdir -p /opt/orders-system
chown -R ubuntu:ubuntu /opt/orders-system

# 创建启动脚本
echo "10. 创建应用启动脚本..."
cat > /opt/orders-system/start-producer.sh << 'EOF'
#!/bin/bash
cd /opt/orders-system
java -cp "target/classes:target/lib/*" Real_time_Producer
EOF

cat > /opt/orders-system/start-spark-streaming.sh << 'EOF'
#!/bin/bash
cd /opt/orders-system
/opt/spark/bin/spark-submit \
  --class RealTimeOrderStatsApp \
  --master local[*] \
  --packages org.apache.spark:spark-streaming-kafka-0-10_2.13:3.2.1 \
  target/classes
EOF

chmod +x /opt/orders-system/*.sh

# 创建systemd服务文件
cat > /etc/systemd/system/orders-producer.service << 'EOF'
[Unit]
Description=Orders Data Producer
After=kafka.service

[Service]
Type=simple
User=ubuntu
Group=ubuntu
WorkingDirectory=/opt/orders-system
ExecStart=/opt/orders-system/start-producer.sh
Restart=always
RestartSec=10

[Install]
WantedBy=multi-user.target
EOF

cat > /etc/systemd/system/orders-spark.service << 'EOF'
[Unit]
Description=Orders Spark Streaming
After=kafka.service

[Service]
Type=simple
User=ubuntu
Group=ubuntu
WorkingDirectory=/opt/orders-system
ExecStart=/opt/orders-system/start-spark-streaming.sh
Restart=always
RestartSec=10

[Install]
WantedBy=multi-user.target
EOF

systemctl daemon-reload

echo "11. 安装Nginx反向代理..."
apt-get install -y nginx

# 配置Nginx
cat > /etc/nginx/sites-available/orders-system << EOF
server {
    listen 80;
    server_name $SERVER_IP;

    location / {
        proxy_pass http://localhost:8080;
        proxy_set_header Host \$host;
        proxy_set_header X-Real-IP \$remote_addr;
        proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
        proxy_set_header X-Forwarded-Proto \$scheme;
    }
    
    location /spark/ {
        proxy_pass http://localhost:4040/;
        proxy_set_header Host \$host;
        proxy_set_header X-Real-IP \$remote_addr;
    }
}
EOF

ln -s /etc/nginx/sites-available/orders-system /etc/nginx/sites-enabled/
rm -f /etc/nginx/sites-enabled/default
systemctl restart nginx
systemctl enable nginx

echo "=== 部署脚本执行完成 ==="
echo ""
echo "📋 部署信息："
echo "  🌐 Web访问地址: http://$SERVER_IP"
echo "  📊 Spark UI: http://$SERVER_IP/spark/"
echo "  📁 应用目录: /opt/orders-system"
echo "  🗄️ 数据库文件: /opt/orders-system/orders_system.db"
echo ""
echo "📝 下一步操作："
echo "  1. 将编译好的WAR文件复制到 /var/lib/tomcat9/webapps/"
echo "  2. 将项目源码复制到 /opt/orders-system/"
echo "  3. 编译项目: cd /opt/orders-system && mvn clean compile"
echo "  4. 初始化数据库: java -cp target/classes com.example.util.DatabaseInitializer"
echo "  5. 启动数据生产者: systemctl start orders-producer"
echo "  6. 启动Spark流处理: systemctl start orders-spark"
echo ""
echo "🔧 服务管理命令："
echo "  systemctl status kafka"
echo "  systemctl status tomcat9"
echo "  systemctl status nginx"
echo "  systemctl status orders-producer"
echo "  systemctl status orders-spark" 