package org.huel.processing

import org.apache.kafka.clients.consumer.{ConsumerConfig, KafkaConsumer}
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.sql.{Row, SQLContext}

import java.util.concurrent.{Executors, TimeUnit}
import java.util.{Arrays, Properties}
import scala.jdk.CollectionConverters.IterableHasAsScala

object runSQL {

  case class Attendance(course: String, name: String, studentId: String, status: String)
  /*
      * 1.从 Kafka 消费数据（考勤信息）。
        2.将数据转换为 Spark SQL 可以处理的格式。
        3.使用 SQL 查询统计每门课程的迟到和缺席人数。
        4.将统计结果通过 UPSERT 插入 MySQL 数据库。
        5.每 5 秒拉取一次新的数据并处理。
      * */
  def main(args: Array[String]): Unit = {
    // 创建 SparkConf
    // spark 配置和初始化
    val conf = new SparkConf().setAppName("SparkSql").setMaster("local[*]")
    val sc = new SparkContext(conf)
    val sqlCon = new SQLContext(sc)
    sc.setLogLevel("error")

    // Kafka 配置信息
    val properties: Properties = new Properties
    properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "43.143.125.94:9092")
    properties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer")
    properties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer")
    properties.put(ConsumerConfig.GROUP_ID_CONFIG, "sparksql")
    properties.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest")

    // 构建 Kafka 消费者
    val kfkconsumer: KafkaConsumer[String, String] = new KafkaConsumer[String, String](properties)
    kfkconsumer.subscribe(Arrays.asList("attendance"))

    // 创建一个调度器 后台定期执行任务
    val scheduler = Executors.newScheduledThreadPool(1)

    // 定义一个任务，每隔5秒执行一次
    val task = new Runnable {
      def run(): Unit = {
        // 拉取数据
        val records = kfkconsumer.poll(1000).asScala
        val data = records
          .map(record => record.value().split("\t"))
          .map(arr => Attendance(arr(2), arr(1), arr(3), arr(4)))
          .toList

        if (data.nonEmpty) {
          // 将数据转换为 RDD 和 DataFrame
          val attendanceRDD = sc.parallelize(data)
          import sqlCon.implicits._
          val attendanceDF = attendanceRDD.toDF()

          // 注册临时表 储存DataFrame数据结构
          attendanceDF.createOrReplaceTempView("attendance")

          // SQL 查询以统计结果
          val statsDF = sqlCon.sql("""
            SELECT course,
                   COUNT(CASE WHEN status = 'L' THEN 1 END) AS lCount,
                   COUNT(CASE WHEN status = 'A' THEN 1 END) AS aCount
            FROM attendance
            GROUP BY course
          """)

          // 显示统计结果
          statsDF.show()

          // 定义 MySQL 连接属性
          val jdbcUrl = "jdbc:mysql://43.143.125.94:3306/attendance?useUnicode=true&characterEncoding=utf-8&useSSL=false&serverTimezone=Asia/Shanghai"
          val dbTable = "LAByCourse"
          val connectionProperties = new Properties()
          connectionProperties.put("user", "root")
          connectionProperties.put("password", "Lwj378$$")

          // 执行 UPSERT 操作
          statsDF.foreachPartition { (partition: Iterator[Row]) =>
            val connection = java.sql.DriverManager.getConnection(jdbcUrl, connectionProperties)
            try {
              val statement = connection.createStatement()
              try {
                partition.foreach { row =>
                  val course = row.getString(0)
                  val lCount = row.getLong(1)
                  val aCount = row.getLong(2)
                  val upsertSql = s"""
                    INSERT INTO $dbTable (course, lCount, aCount)
                    VALUES ('$course', $lCount, $aCount)
                    ON DUPLICATE KEY UPDATE
                      lCount =lCount + VALUES(lCount),
                      aCount =aCount + VALUES(aCount)
                  """
                  statement.executeUpdate(upsertSql)
                }
              } finally {
                statement.close()
              }
            } finally {
              connection.close()
            }
          }
          println("Data processed and stored")
        } else {
          println("No data")
        }
      }
    }

    // 安排任务，每隔10秒执行一次
    scheduler.scheduleAtFixedRate(task, 0, 5, TimeUnit.SECONDS)
  }
}
