package Sparksql

import org.apache.spark.sql.{Row, SQLContext}
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.kafka.clients.consumer.{ConsumerConfig, KafkaConsumer}

import java.util.{Arrays, Properties}
import java.util.concurrent.{Executors, TimeUnit}
import scala.collection.JavaConverters._
import org.apache.spark.sql.functions._

object CourseAttendance {

  // 定义 case class
  case class attendance(course: String, name: String, studentId: String, status: String)

  def main(args: Array[String]): Unit = {
    // 配置 Spark 和 SQLContext
    val conf = new SparkConf().setMaster("local[*]").setAppName("CourseAttendance")
    val sc = new SparkContext(conf)
    val sqlCon = new SQLContext(sc)
    sc.setLogLevel("error")

    // Kafka 配置
    val properties = new Properties()
    properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "192.168.23.128:9092") // 替换为你的 Kafka Broker 地址
    properties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer")
    properties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer")
    properties.put(ConsumerConfig.GROUP_ID_CONFIG, "course-attendance-consumer-group")
    properties.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest")

    // Kafka 消费者
    val kafkaConsumer = new KafkaConsumer[String, String](properties)
    kafkaConsumer.subscribe(Arrays.asList("attendance123"))

    // 创建线程池用于调度
    val scheduler = Executors.newScheduledThreadPool(1)

    // 定义任务
    val task = new Runnable {
      def run(): Unit = {
        val records = kafkaConsumer.poll(1000).asScala
        val data = records
          .map(_.value().split("\t"))
          .map { arr => attendance(arr(2), arr(1), arr(3), arr(4))
          }.toList

        if (data.nonEmpty) {
          import sqlCon.implicits._

          val newRDD = sc.parallelize(data)
          // 将数据转换为 DataFrame
          val newDF = newRDD.toDF()

          newDF.createOrReplaceTempView("attendance1")

          //SQL查询以统计结果
          val statsDF = sqlCon.sql {
            """
	         SELECT course,
		        COUNT(CASE WHEN status='L' THEN 1 END) AS lCount,
		        COUNT(CASE WHEN status='A' THEN 1 END) AS aCount
	          FROM attendance1
	          GROUP BY course
            """
          }

          statsDF.show()


          // 配置 MySQL 连接
          val jdbcUrl = "jdbc:mysql://localhost:3306/attendance?serverTimezone=GMT%2B8"
          val dbTable = "course_attendance"
          val connectionProperties = new Properties()
          connectionProperties.put("user", "root")
          connectionProperties.put("password", "123456")





          // 使用 foreachPartition 将数据写入 MySQL
          statsDF.foreachPartition { (partition: Iterator[Row]) =>
            val connection = java.sql.DriverManager.getConnection(jdbcUrl, connectionProperties)
            try {
              val statement = connection.createStatement()

              try {
                partition.foreach { row =>
                  val course = row.getString(0)
                  val lCount = row.getLong(1)
                  val aCount = row.getLong(2)
                  val upsertSql =
                    s"""
                INSERT INTO $dbTable (course, lCount, aCount)
                VALUES ('$course', $lCount, $aCount)
                ON DUPLICATE KEY UPDATE
                lCount = lCount + VALUES(lCount),
                aCount = aCount + VALUES(aCount)
"""
                  statement.executeUpdate(upsertSql)
                }
              } finally {
                statement.close()
              }
            } finally {
              connection.close()
            }
          }

          println("执行成功")
        } else {
          println("没有获取到数据")
        }
      }
    }

    // 调度任务
    scheduler.scheduleAtFixedRate(task, 0, 5, TimeUnit.SECONDS)
  }
}
