import org.apache.kafka.common.serialization.StringDeserializer
import org.apache.spark
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.sql.{SQLContext, SparkSession}
import org.apache.spark.sql.streaming.Trigger
import org.apache.spark.streaming.kafka010.ConsumerStrategies.Subscribe
import org.apache.spark.streaming.kafka010.KafkaUtils
import org.apache.spark.streaming.kafka010.LocationStrategies.PreferConsistent
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.sql.{SparkSession, functions => F}

object Semester_Class_G {
  def main(args: Array[String]): Unit = {
    // Spark 配置
    val conf = new SparkConf().setMaster("local[*]").setAppName("KafkaSparkStream")
    val ssc = new StreamingContext(conf, Seconds(2))

    ssc.sparkContext.setLogLevel("error")

    // Kafka 配置
    val kafkaParams = Map[String, Object](
      "bootstrap.servers" -> "192.168.244.11:9092",
      "key.deserializer" -> classOf[StringDeserializer],
      "value.deserializer" -> classOf[StringDeserializer],
      "group.id" -> "hael5",
      "enable.auto.commit" -> (false: java.lang.Boolean)
    )

    val topics = Array("lol")
    val stream = KafkaUtils.createDirectStream[String, String](
      ssc,
      PreferConsistent,
      Subscribe[String, String](topics, kafkaParams)
    )
    val studentData = stream.map(record => {
      val fields = record.value().split("\t")
      (fields(0), fields(1), fields(2), fields(3), fields(4), fields(5), fields(6))  // 班级号、姓名、性别、生日、学号、学期、在籍状态
    })
    // 创建 SparkSession
    val spark = SparkSession.builder.config(conf).getOrCreate()
    import spark.implicits._
    //  val spark = SparkSession.builder().appName("Spark SQL").getOrCreate()


    // 每个批次处理逻辑
    studentData.foreachRDD { rdd =>
      if (!rdd.isEmpty()) {
        // 将 RDD[(String, String, String, String, String, String, String)] 转换为 DataFrame
        val df = rdd.toDF("class_id", "name", "gender", "birthDate", "studentId", "semester", "status")

        // 统计各个班级的男女人数
        val Semester_class_G_Count = df.groupBy("semester","class_id", "gender").agg(F.count("gender").alias("count"))
        // 展示统计结果
        Semester_class_G_Count.show()
        val jdbcUrl = "jdbc:mysql://localhost:3306/stats?serverTimezone=Asia/Shanghai"
        val dbProperties = new java.util.Properties()
        dbProperties.put("user", "root")
        dbProperties.put("password", "123456")

        Semester_class_G_Count.write
          .mode("append")
          .jdbc(jdbcUrl, "semester_class", dbProperties)


      }
    }

    // 启动 Spark Streaming
    ssc.start()
    ssc.awaitTermination()
  }
}
