package app
//XXL  各个班级的男女的人数
import org.apache.kafka.clients.producer.{KafkaProducer, ProducerConfig, ProducerRecord}
import org.apache.spark.SparkConf
import org.apache.spark.streaming.kafka010.{ConsumerStrategies, KafkaUtils, LocationStrategies}
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.sql.{DataFrame, Row, SparkSession}
import org.apache.spark.sql.types._

import java.util

object SQL {
  case class StudentInfo(classNum: String, name: String, gender: String, birthDate: String, studentId: Long, semester: Integer, enrollmentStatus: String)

  def main(args: Array[String]): Unit = {
    val conf = new SparkConf().setMaster("local[*]").setAppName("KafkaToDataFrame")
    val ssc = new StreamingContext(conf, Seconds(2))
    ssc.sparkContext.setLogLevel("error")
    ssc.checkpoint("./checkpoint")

    val kfkParams = Map[String, Object](
      "bootstrap.servers" -> "192.168.65.128:9092",
      "key.deserializer" -> "org.apache.kafka.common.serialization.StringDeserializer",
      "value.deserializer" -> "org.apache.kafka.common.serialization.StringDeserializer",
      "group.id" -> "use_a_separate_group_id_for_each_stream",
      "auto.offset.reset" -> "latest"
    )

    val topicName = Array("stuInfo")
    val stream = KafkaUtils.createDirectStream[String, String](
      ssc,
      LocationStrategies.PreferConsistent,
      ConsumerStrategies.Subscribe[String, String](topicName, kfkParams)
    )

    val spark = SparkSession.builder.config(conf).getOrCreate()
    import spark.implicits._
    val property = new util.HashMap[String,Object]()
    property.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "192.168.65.128:9092")
    property.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer")
    property.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer")

    // 将 Kafka 流转换为 DataFrame，并使用 Spark SQL 查询
    stream.foreachRDD { rdd =>
      // 将 RDD 转换为 Row 的 RDD
      val rowRDD = rdd.map(record => {
        val values = record.value().split("\t")
        Row(values(0), values(1), values(2), values(3), values(4).toLong, Integer.valueOf(values(5)), values(6))
      })

      // 定义 schema
      val schema = StructType(Array(
        StructField("classNum", StringType, true),
        StructField("name", StringType, true),
        StructField("gender", StringType, true),
        StructField("birthDate", StringType, true),
        StructField("studentId", LongType, true),
        StructField("semester", IntegerType, true),
        StructField("enrollmentStatus", StringType, true)
      ))

      // 创建 DataFrame
      val studentInfoDF = spark.createDataFrame(rowRDD, schema)

      // 定义临时视图
      studentInfoDF.createOrReplaceTempView("student_info")
     // println(studentInfoDF)
      // 使用 Spark SQL 查询 DataFrame
      val genderCountQuery = spark.sql(
        """
          |SELECT classNum, gender, COUNT(*) as count
          |FROM student_info
          |GROUP BY classNum, gender
        """.stripMargin)
      genderCountQuery.show()
      //println(genderCountQuery)

      val genderCount=genderCountQuery.collect().toBuffer
      genderCountQuery.foreach { row =>
        val formattedString = s"${row.getAs[String]("classNum")},${row.getAs[String]("gender")},${row.getAs[Long]("count")}"
        //println(formattedString)
        val kfkProducer = new KafkaProducer[String, String](property)
        kfkProducer.send(new ProducerRecord[String, String]("stuInfo22", formattedString))
        kfkProducer.close()
      }
    }

    // 开始接收数据
    ssc.start()
    ssc.awaitTermination()
  }
}