package com.supermarket.spark.streaming

import org.apache.spark.sql._
import org.apache.spark.sql.functions._
import java.util.Properties

object StreamingApp {
  def main(args: Array[String]): Unit = {
    //structure streaming
    // 设置 hadoop.home.dir（仅 Windows 开发环境需要）
    System.setProperty("hadoop.home.dir", "D:\\tmp\\hadoop")

    // 初始化 SparkSession
    val spark = SparkSession.builder
      .appName("KafkaSupermarketStreaming")
      .master("local[*]")
      .getOrCreate()

    //123abc
    // 打印程序启动提示   123
    println("✅ Spark Session 已创建，开始消费 Kafka 数据...")

    // 1. 从 Kafka 读取数据流
    val kafkaDF = spark.readStream
      .format("kafka")
      .option("kafka.bootstrap.servers", "niit01:9092")
      .option("subscribe", "supermarket00")
      .load()
      .selectExpr("CAST(value AS STRING)")

    // 2. 定义 schema 并解析字段
    val parsedDF = kafkaDF.select(
      split(col("value"), "\t").alias("fields")
    ).select(
      col("fields")(0).cast("int").alias("user_id"),
      col("fields")(1).cast("int").alias("product_id"),
      col("fields")(2).alias("product_name"),
      col("fields")(3).cast("int").alias("in_stock"),
      col("fields")(4).cast("int").alias("favor_level"),
      col("fields")(5).cast("int").alias("sold"),
      col("fields")(6).alias("status")
    )

    // ✅ 使用 console sink 调试输出流式数据（替代 show()）
    val queryDebug = parsedDF.writeStream
      .outputMode("append")
      .format("console")
      .option("truncate", "false")
      .start()

    // 3. 统计上架/下架数量
    val upDownCount = parsedDF.groupBy("status").count().withColumnRenamed("count", "total")
      when(col("status") === "up", "上架")
      .when(col("status") === "down", "下架")

    // 4. 统计每个货品号的数量
    val productCount = parsedDF.groupBy("product_id").count().withColumnRenamed("count", "total")

    // 5. 商品类别映射（中文）
    val categorizedDF = parsedDF.withColumn("category",
      when(col("product_name").contains("Milk") || col("product_name").contains("Cheese") || col("product_name").contains("Butter"), "乳制品")
        .when(col("product_name").contains("Juice") || col("product_name").contains("Water") || col("product_name").contains("Cola"), "饮料类")
        .when(col("product_name").contains("Chips") || col("product_name").contains("Chocolate"), "零食类")
        .when(col("product_name").contains("Apple") || col("product_name").contains("Banana"), "水果类")
        .when(col("product_name").contains("Carrot") || col("product_name").contains("Tomato"), "蔬菜类")
        .when(col("product_name").contains("Detergent") || col("product_name").contains("Disinfectant"), "清洁用品")
        .when(col("product_name").contains("Shampoo") || col("product_name").contains("Toothpaste"), "个人护理")
        .otherwise("其他")
    )

    val categoryCount = categorizedDF.groupBy("category").count().withColumnRenamed("count", "total")

    // 6. MySQL 连接信息
    val jdbcUrl = "jdbc:mysql://43.140.205.103:3306/supermarket?useSSL=false&serverTimezone=UTC&allowPublicKeyRetrieval=true"
    val connectionProperties = new Properties()
    connectionProperties.put("user", "supermarket")
    connectionProperties.put("password", "a7NrdbX8hiAZ8Nxb")
    connectionProperties.put("driver", "com.mysql.cj.jdbc.Driver")

    // 7. 写入函数（用于 foreachBatch），并增加日志与异常处理
    def writeStreamToMySQL(df: DataFrame, epochId: Long, table: String): Unit = {
      try {
        val count = df.count()
        if (count > 0) {
          println(s"💾 正在写入表 $table，记录数: $count")
          df.show(false)
          df.write
            .mode("overwrite")
            .jdbc(jdbcUrl, table, connectionProperties)
        } else {
          println(s"🟡 表 $table 当前无更新数据，跳过写入。Epoch: $epochId")
        }
      } catch {
        case e: Exception =>
          e.printStackTrace()
          println(s"❌ 写入表 $table 失败: ${e.getMessage}")
      }
    }

    // 8. 启动多个流查询（只调用一次 awaitTermination）

    val query1 = upDownCount.writeStream
      .outputMode("update")
      .foreachBatch { (batchDF: DataFrame, batchId: Long) =>
        writeStreamToMySQL(batchDF, batchId, "status_count")
      }
      .start()

    val query2 = productCount.writeStream
      .outputMode("update")
      .foreachBatch { (batchDF: DataFrame, batchId: Long) =>
        writeStreamToMySQL(batchDF, batchId, "product_count")
      }
      .start()

    val query3 = categoryCount.writeStream
      .outputMode("update")
      .foreachBatch { (batchDF: DataFrame, batchId: Long) =>
        writeStreamToMySQL(batchDF, batchId, "category_count")
      }
      .start()


    // ✅ 只调用一次 awaitTermination()
    query1.awaitTermination()
  }
}