import org.apache.kafka.common.serialization.StringDeserializer
import org.apache.spark.SparkConf
import org.apache.spark.sql.functions._
import org.apache.spark.sql.types._
import org.apache.spark.sql.{DataFrame, SaveMode, SparkSession}
import org.apache.spark.streaming._
import org.apache.spark.streaming.kafka010._

object SupermarketStreaming {

  // 定义产品类
  case class Product(productCode: String, productName: String, stockQuantity: Int, soldQuantity: Int, status: String)

  def main(args: Array[String]): Unit = {
    // 1. 构建 SparkConf 和 SparkContext
    val conf = new SparkConf().setMaster("local[*]").setAppName("SupermarketStats")
    val ssc = new StreamingContext(conf, Seconds(2))  // 设置批处理间隔为2秒
    val spark = SparkSession.builder.appName("SupermarketStats").getOrCreate()
    spark.sparkContext.setLogLevel("ERROR")

    // 导入隐式转换
    import spark.implicits._

    // Kafka 参数设置
    val kafkaParams = Map[String, Object](
      "bootstrap.servers" -> "192.168.88.131:9092,192.168.88.132:9092,192.168.88.133:9092",
      "key.deserializer" -> classOf[StringDeserializer],
      "value.deserializer" -> classOf[StringDeserializer],
      "group.id" -> "supermarket_group",
      "auto.offset.reset" -> "latest",
      "enable.auto.commit" -> (false: java.lang.Boolean)
    )

    // 创建直接流以订阅 Kafka 主题
    val topics = Array("supermarket")
    val stream = KafkaUtils.createDirectStream[String, String](
      ssc,
      LocationStrategies.PreferConsistent,
      ConsumerStrategies.Subscribe[String, String](topics, kafkaParams)
    )

    // 解析并处理数据
    val parsedStream = stream.map(record => {
      val parts = record.value().split("\t")
      Product(parts(0), parts(1), parts(2).toInt, parts(3).toInt, parts(4))
    })

    // 处理每个批次的数据
    parsedStream.foreachRDD { rdd =>
      if (!rdd.isEmpty()) {
        // 将 RDD 转换为 DataFrame
        val productDF = rdd.toDF()

        // 2) 统计所有上架和下架各自的数量
        val statusCountDF = productDF.groupBy("status").agg(count("*").as("count"))

        // 3) 统计各货品号各自的数量
        val productIdCountDF = productDF.groupBy("productCode").agg(count("*").as("count"))

        // 4) 统计各类别的物品数量
        val productCategoryCountDF = productDF.groupBy("productName").agg(count("*").as("count"))

        // 5) 使用 Spark SQL 统计各货品号的上架和下架数量
        productDF.createOrReplaceTempView("products")
        val productStatusCountDF = spark.sql("""
          SELECT productCode, productName, status, COUNT(*) AS count
          FROM products
          GROUP BY productCode, productName, status
        """)

        // 6) 使用 Spark Core/RDD 统计各货品号各物品上架和下架的数量
        val productStatusCountRDD = rdd
          .map(product => ((product.productCode, product.productName, product.status), 1))
          .reduceByKey(_ + _)

        // 将 RDD 结果转换为 DataFrame
        val productStatusCountRDDDF = productStatusCountRDD.map { case ((productCode, productName, status), count) =>
          (productCode, productName, status, count)
        }.toDF("productCode", "productName", "status", "count")

        // 定义 Kafka 写入方法
        def writeToKafka(df: DataFrame, topic: String): Unit = {
          df.selectExpr("CAST('statusCount' AS STRING) AS key", "to_json(struct(*)) AS value")
            .write
            .format("kafka")
            .option("kafka.bootstrap.servers", "192.168.88.131:9092,192.168.88.132:9092,192.168.88.133:9092")
            .option("topic", topic)
            .save()
        }

        // 将结果写入 Kafka 主题 SupermarketData
        if (!statusCountDF.isEmpty) {
          writeToKafka(statusCountDF, "SupermarketData")
        }
        if (!productIdCountDF.isEmpty) {
          writeToKafka(productIdCountDF, "SupermarketData")
        }
        if (!productCategoryCountDF.isEmpty) {
          writeToKafka(productCategoryCountDF, "SupermarketData")
        }
        if (!productStatusCountDF.isEmpty) {
          writeToKafka(productStatusCountDF, "SupermarketData")
        }

        // 定义 MySQL 连接信息
        val url = "jdbc:mysql://192.168.88.131:3306/supermaket"
        val properties = new java.util.Properties()
        properties.setProperty("driver", "com.mysql.cj.jdbc.Driver")
        properties.setProperty("user", "root")
        properties.setProperty("password", "123456")

        // 将 Spark Core/RDD 的结果写入 MySQL
        if (!productStatusCountRDDDF.isEmpty) {
          productStatusCountRDDDF.write.mode(SaveMode.Append).jdbc(url, "product_status_counts", properties)
        }
      }
    }

    // 启动 StreamingContext
    ssc.start()
    ssc.awaitTermination()
  }
}