package niit.sparkstreaming

import org.apache.kafka.clients.producer.{KafkaProducer, ProducerRecord}

import java.util.Properties
import scala.util.Random
import org.apache.spark.SparkConf
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.kafka.common.serialization.StringDeserializer
import org.apache.spark.sql.{DataFrame, SparkSession}
import org.apache.spark.sql.functions.lit
import org.apache.spark.streaming.kafka010.{ConsumerStrategies, KafkaUtils}
import org.apache.spark.streaming.kafka010.LocationStrategies.PreferConsistent

//不同学期各个班级的男女各自的人数  (学期,班级,性别)
object streamRdd {
  case class Student(
                      semester: String,
                      classId: String,
                      gender: Int
                    )

  def main(args: Array[String]): Unit = {
    System.setProperty("hadoop.home.dir", "E:/hadoop")
    val spark = SparkSession.builder()
      .appName("streamRdd")
      .master("local[*]")
      .getOrCreate()

    // 从 Kafka 主题读取数据
    val df = spark.readStream
      .format("kafka")
      .option("kafka.bootstrap.servers", "192.168.29.115:9092") // 替换为实际 Kafka 服务器地址
      .option("subscribe", "stuInfo") // 替换为实际 Kafka 主题名称
      .option("startingOffsets", "earliest") // 设置起始偏移量为最早位置
      .load()

    import spark.implicits._
    // 处理 DataFrame 中的数据
    val lines = df.selectExpr("CAST(value AS STRING)").as[String]

    // 提取学期、班级和性别信息
    val genderBySemesterAndClass = lines
      .map { line =>
        val fields = line.split("\t")
        val semester = fields(5)
        val classId = fields(0)
        val gender = fields(2).toInt
        Student(semester, classId, gender)
      }
    val studentDF = genderBySemesterAndClass.toDF()
    studentDF.createOrReplaceTempView("students")
    val genderCountDF = spark.sql(
      "SELECT semester,classId,gender, COUNT(*) AS count FROM students " +
        "GROUP BY semester,classId,gender" +
        " ORDER BY semester"
    )
    // 打印结果
    val query = genderCountDF.writeStream
      .outputMode("complete")
      .foreachBatch { (batchDF: DataFrame, batchId: Long) =>
        val jdbcUrl = "jdbc:mysql://localhost:3306/studentInfo"
        val tableName = "student1"
        val connectionProperties = new java.util.Properties()
        connectionProperties.put("user", "root")
        connectionProperties.put("password", "123456")
        batchDF.write
          .mode("overwrite") // 根据实际需求选择写入模式
          .jdbc(jdbcUrl, tableName, connectionProperties)
      }
      .start()
    query.awaitTermination()
    //    // 停止 SparkSession
    spark.stop()


  }
}
