package niit.sparkstreaming
import java.util.Properties
import org.apache.spark.sql.{DataFrame, SparkSession}

//各个班级的男女的人数 ->(班级,性别)
object streamSql {
  case class Student(classId: String,
                     name: String,
                     gender: Int,
                     brithDate: String,
                     studentId: String,
                     semester: String,
                     enrollmentStatus: String
                    )

  def main(args: Array[String]): Unit = {
    System.setProperty("hadoop.home.dir", "E:/hadoop")
    val spark = SparkSession.builder()
      .appName("streamSql")
      .master("local[*]")
      .getOrCreate()

    // 从 Kafka 主题读取数据
    val kafkaStream = spark.readStream
      .format("kafka")
      .option("kafka.bootstrap.servers", "192.168.29.115:9092") // 替换为实际 Kafka 服务器地址
      .option("subscribe", "stuInfo") // 替换为实际 Kafka 主题名称
      .option("startingOffsets", "earliest") // 设置起始偏移量为最早位置
      .load()

    import spark.implicits._
    // 将每条 Kafka 记录解析为 Student 对象
    val studentsStream = kafkaStream
      .selectExpr("CAST(value AS STRING)")
      .as[String]
      .map { record =>
        val fields = record.split("\t")
        Student(fields(0), fields(1), fields(2).toInt, fields(3), fields(4), fields(5), fields(6))
      }
    // dataset->dataframe
    val studentDF = studentsStream.toDF()

    // 创建临时视图
    studentDF.createOrReplaceTempView("students")


    // 使用 Spark SQL 统计各个班级的男女人数
    val genderCountDF = spark.sql(
      "SELECT classId, gender, COUNT(*) AS count " +
        "FROM students " +
        "GROUP BY classId, gender"
    )

    // 打印结果
    val query = genderCountDF.writeStream
      .outputMode("complete")
      .foreachBatch { (batchDF: DataFrame, batchId:Long) =>
        val jdbcUrl = "jdbc:mysql://localhost:3306/studentInfo"
        val tableName = "student"
        val connectionProperties = new java.util.Properties()
        connectionProperties.put("user", "root")
        connectionProperties.put("password", "123456")
        batchDF.write
          .mode("overwrite") // 根据实际需求选择写入模式
          .jdbc(jdbcUrl, tableName, connectionProperties)
      }
      .start()

    query.awaitTermination()
    //    // 停止 SparkSession
    spark.stop()
  }
}
