package org.huel.dataprocessing

import org.apache.spark.SparkConf
import org.apache.spark.sql.SparkSession
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.kafka.clients.consumer.ConsumerConfig
import org.apache.kafka.common.serialization.StringDeserializer
import org.apache.spark.streaming.kafka010.{ConsumerStrategies, KafkaUtils, LocationStrategies}
import java.util.Properties

/**
 * @Kafka topic:AllLACountByClass
 * @Description TODO
 * @author Liweijian.
 * @date 2024/11/29 0:22
 */
object runRDD {
  def main(args: Array[String]): Unit = {

    // 创建 SparkConf 和 SparkSession
    val conf = new SparkConf()
      .setAppName("runrdd")
      .setMaster("local[*]")
      //.set("spark.driver.memory", "100m")  // 设置 Driver 内存为 100MB
      //.set("spark.executor.memory", "100m")  // 设置每个 Executor 内存为 100MB
    val spark = SparkSession.builder.config(conf).getOrCreate()
    import spark.implicits._

    // StreamingContext
    val ssc = new StreamingContext(spark.sparkContext, Seconds(6))
    ssc.sparkContext.setLogLevel("error")

    // 设置检查点以存储状态
    ssc.checkpoint("checkpoint")

    // Kafka 消费者配置
    val kafkaParams = Map[String, Object](
      ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG -> "43.143.125.94:9092",
      ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG -> classOf[StringDeserializer],
      ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG -> classOf[StringDeserializer],
      ConsumerConfig.GROUP_ID_CONFIG -> "sparkrdd",
      ConsumerConfig.AUTO_OFFSET_RESET_CONFIG -> "earliest"
    )

    val topics = Array("attendance")

    // 创建 DStream
    val stream = KafkaUtils.createDirectStream[String, String](
      ssc,
      LocationStrategies.PreferConsistent,
      ConsumerStrategies.Subscribe[String, String](topics, kafkaParams)
    )
    stream.foreachRDD { rdd =>
      println("=== Kafka 消费的原始数据 ===")
      rdd.map(record => record.value()).take(10).foreach(println)
    }
    // 更新状态函数
    val updateCount = (newValues: Seq[Int], state: Option[Int]) => {
      val newCount = newValues.sum + state.getOrElse(0)
      Some(newCount)
    }

    // 处理数据流
    val processedStream = stream.map(record => record.value())
      .map(_.split("\t")) // 按制表符切分
      .filter(_.length >= 5)  // 只保留完整的数据
      .map(fields => ((fields(0), fields(4)), 1))  // 提取班级和状态

    // 过滤出 "L" 和 "A" 状态
    val stateStream = processedStream
      .filter { case ((_, status), _) => status == "L" || status == "A" }
      .updateStateByKey(updateCount)

    stateStream.foreachRDD { rdd =>

      val classCounts = rdd.map {
          case ((classId, status), totalCount) =>
            (classId, (status, totalCount))  // 解构重构每个元素
        }.groupByKey()  // 按照 classId 进行分组
        .map { case (classId, counts) =>
          val countMap = counts.toMap     // (status, count) 的迭代列表转换成一个映射，以便于状态查找
          val lCount = countMap.getOrElse("L", 0)
          val aCount = countMap.getOrElse("A", 0)   // 获取 A 的计数，若不存在计 0
          (classId, lCount, aCount)
        }.collect()

      // 创建一个包含 classId 为 220713, 220714, 220715 的 Map
      val classIdSet = Set("220713", "220714", "220715")
      val finalCounts = classIdSet.map { classId =>
        classCounts.collectFirst {
          case (id, lCount, aCount) if id == classId => (id, lCount, aCount)
        }.getOrElse((classId, 0, 0))  // 如果没有找到，默认为 (classId, 0, 0)
      }.toSeq

      // 定义 MySQL 连接属性
      val jdbcUrl = "jdbc:mysql://43.143.125.94:3306/attendance?useUnicode=true&characterEncoding=utf-8&useSSL=false&serverTimezone=Asia/Shanghai"
      val dbTable = "LAByClass"
      val connectionProperties = new Properties()
      connectionProperties.put("user", "root")
      connectionProperties.put("password", "Lwj378$$")

      // 将数据转换为 DataFrame
      val finalDataFrame = finalCounts.toDF("classId", "lCount", "aCount")

      println(finalDataFrame)

      // 将数据写入 MySQL
      finalDataFrame.write.mode("overwrite").jdbc(jdbcUrl, dbTable, connectionProperties)
    }

    // 启动流式计算并等待其结束
    ssc.start()
    ssc.awaitTermination()
  }
}