package com.supermarket.spark.batch

import org.apache.spark.sql._
import org.apache.spark.sql.functions._
import org.apache.spark.sql.types._
import java.util.Properties

object LowStockAlertJob {
  def main(args: Array[String]): Unit = {

    // 设置 Hadoop Home（Windows 下必须）
    System.setProperty("hadoop.home.dir", "D:\\tmp\\hadoop")

    // 初始化 Spark Session
    val spark = SparkSession.builder
      .appName("Low Stock Alerts")
      .master("local[*]")
      .getOrCreate()

    import spark.implicits._

    // 定义 schema
    val schema = new StructType()
      .add("user_id", IntegerType)
      .add("product_id", IntegerType)
      .add("product_name", StringType)
      .add("in_stock", IntegerType)
      .add("favor_level", IntegerType)
      .add("sold", IntegerType)
      .add("status", StringType)

    // 从 HDFS 读取数据
    val flumeDF = spark.read
      .option("sep", "\t")
      .schema(schema)
      .csv("hdfs://niit01:8020/user/flume/supermarket00/*")

    // 注册为临时视图
    flumeDF.createOrReplaceTempView("supermarket_raw")

    // SQL 查询库存低于 50 的商品
    val lowStockDF = spark.sql("""
      SELECT
        product_id,
        product_name,
        in_stock,
        sold,
        status
      FROM supermarket_raw
      WHERE in_stock < 120
      ORDER BY in_stock ASC
    """)

    // 显示前 10 条记录（调试用）
    lowStockDF.limit(10).show(false)

    // MySQL 连接信息
    val jdbcUrl = "jdbc:mysql://43.140.205.103:3306/supermarket?" +
      "useSSL=false&serverTimezone=UTC&allowPublicKeyRetrieval=true&characterEncoding=UTF-8"

    val connectionProperties = new Properties()
    connectionProperties.put("user", "supermarket")
    connectionProperties.put("password", "a7NrdbX8hiAZ8Nxb")
    connectionProperties.put("driver", "com.mysql.cj.jdbc.Driver")

    // 写入 MySQL 表 low_stock_alerts
    lowStockDF.write
      .mode("overwrite")
      .jdbc(jdbcUrl, "low_stock_alerts", connectionProperties)

    spark.stop()
  }
}