package main.scala.org.huel.dataprocessing

import org.apache.kafka.common.serialization.StringDeserializer
import org.apache.spark.SparkConf
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.streaming.kafka010._
import org.apache.spark.streaming.kafka010.LocationStrategies.PreferConsistent
import org.apache.spark.streaming.kafka010.ConsumerStrategies.Subscribe
import org.apache.spark.sql.{Row, SparkSession}


import java.sql.{Connection, DriverManager, PreparedStatement}
import java.time.LocalDateTime
import java.time.format.DateTimeFormatter

/**
 * @author Liweijian
 * @Description 实时统计所有课程的数量
 * course_count  总课程数量
 * course_attendance  每个课程出勤缺勤统计
 * @date 2025/06/13
 */
object CourseStatisticsApp {
  def main(args: Array[String]): Unit = {
    // 创建SparkConf
    val sparkConf = new SparkConf()
      .setAppName("CourseStatisticsApp")
      .setMaster("local[*]")  // 本地运行模式
      .set("spark.streaming.stopGracefullyOnShutdown", "true")

    // 创建SparkSession用于SQL操作
    val spark = SparkSession.builder
      .config(sparkConf)
      .getOrCreate()

    import spark.implicits._

    // 设置日志级别
    spark.sparkContext.setLogLevel("WARN")

    // 创建StreamingContext，批处理时间为5秒
    val ssc = new StreamingContext(spark.sparkContext, Seconds(5))

    // Kafka参数配置
    val kafkaParams = Map[String, Object](
      "bootstrap.servers" -> "43.143.125.94:9092",
      "key.deserializer" -> classOf[StringDeserializer],
      "value.deserializer" -> classOf[StringDeserializer],
      "group.id" -> "course_statistics_group",
      "auto.offset.reset" -> "latest",
      "enable.auto.commit" -> (false: java.lang.Boolean)
    )

    // 订阅Kafka主题
    val topics = Array("attendance")

    // 创建直接流
    val stream = KafkaUtils.createDirectStream[String, String](
      ssc,
      PreferConsistent,
      Subscribe[String, String](topics, kafkaParams)
    )

    // 定义处理逻辑
    stream.foreachRDD { rdd =>
      val offsetRanges = rdd.asInstanceOf[HasOffsetRanges].offsetRanges

      if (!rdd.isEmpty()) {
        // 获取当前时间
        val currentTime = LocalDateTime.now()
        val formatter = DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss")
        val formattedTime = currentTime.format(formatter)

        println(s"\n=========================================================")
        println(s"| 课程统计报告 - ${formattedTime} |")
        println(s"=========================================================")

        // 转换RDD为DataFrame
        val attendanceDF = rdd.map(record => record.value())
          .map { line =>
            val fields = line.split("\t")
            if (fields.length == 6) {
              (
                fields(0),     // class_id
                fields(1),     // student_name
                fields(2),     // course_name
                fields(3),     // student_id
                fields(4).toInt, // score
                fields(5).toInt  // is_absent
              )
            } else {
              // 处理格式不正确的数据，返回默认值
              ("Unknown", "Unknown", "Unknown", "Unknown", -1, -1)
            }
          }
          .filter(_._5 >= 0) // 过滤掉格式不正确的数据
          .toDF("class_id", "student_name", "course_name", "student_id", "score", "is_absent")

        // 注册为临时视图，用于SQL查询
        attendanceDF.createOrReplaceTempView("attendance")

        // 1. 统计课程数量
        val courseCountDF = spark.sql("""
          SELECT
            COUNT(DISTINCT course_name) AS course_count
          FROM attendance
        """)

        println("\n【总课程数量】")
        courseCountDF.withColumnRenamed("course_count", "课程总数").show(false)

        // 2. 统计每个课程的详细信息
        val courseDetailDF = spark.sql("""
          SELECT
            course_name,
            SUM(CASE WHEN is_absent = 0 THEN 1 ELSE 0 END) AS present_count,
            SUM(CASE WHEN is_absent = 1 THEN 1 ELSE 0 END) AS absent_count,
            COUNT(*) AS total_records,
            ROUND(SUM(CASE WHEN is_absent = 0 THEN 1 ELSE 0 END) / COUNT(*) * 100, 2) AS attendance_rate
          FROM attendance
          GROUP BY course_name
          ORDER BY course_name
        """)

        // 格式化显示
        val displayCourseDetail = courseDetailDF
          .withColumnRenamed("course_name", "课程名称")
          .withColumnRenamed("present_count", "出勤记录数")
          .withColumnRenamed("absent_count", "缺勤记录数")
          .withColumnRenamed("total_records", "总记录数")
          .withColumnRenamed("attendance_rate", "出勤率(%)")

        println("\n【课程详细统计】")
        displayCourseDetail.show(100, false)

        // 3. 统计每个课程在不同班级的分布
        val coursesPerClassDF = spark.sql("""
          SELECT
            course_name,
            class_id,
            COUNT(DISTINCT student_id) AS student_count,
            COUNT(*) AS record_count
          FROM attendance
          GROUP BY course_name, class_id
          ORDER BY course_name, class_id
        """)

        // 格式化显示
        val displayCoursesPerClass = coursesPerClassDF
          .withColumnRenamed("course_name", "课程名称")
          .withColumnRenamed("class_id", "班级号")
          .withColumnRenamed("student_count", "学生人数")
          .withColumnRenamed("record_count", "记录数")

        println("\n【课程班级分布】")
        displayCoursesPerClass.show(100, false)

        // 保存到MySQL数据库
        try {
          // 1. 保存总课程数
          val totalCourses = courseCountDF.head().getLong(0)
          saveTotalCourseCount(totalCourses)

          // 2. 保存每个课程的出勤统计
          saveCourseAttendance(courseDetailDF)

          println("\n✓ 课程统计数据已保存到MySQL数据库")
        } catch {
          case e: Exception =>
            println(s"\n✗ 保存数据到MySQL时出错: ${e.getMessage}")
            e.printStackTrace()
        }

        // 提交Kafka偏移量
        stream.asInstanceOf[CanCommitOffsets].commitAsync(offsetRanges)
      }
    }

    // 启动Streaming Context
    ssc.start()
    println("\n✓ Spark Streaming 课程统计程序已启动")
    println("✓ 正在监听Kafka数据流...")
    ssc.awaitTermination()
  }

  /**
   * 保存总课程数到course_count表
   */
  def saveTotalCourseCount(totalCourses: Long): Unit = {
    var connection: Connection = null
    var statement: PreparedStatement = null

    try {
      connection = createConnection()
      statement = connection.prepareStatement(
        """
          |INSERT INTO course_count (total_courses, last_updated)
          |VALUES (?, NOW())
          |ON DUPLICATE KEY UPDATE
          |total_courses = VALUES(total_courses),
          |last_updated = NOW()
          |""".stripMargin)

      statement.setLong(1, totalCourses)
      statement.executeUpdate()

    } catch {
      case e: Exception =>
        println(s"保存课程总数时出错: ${e.getMessage}")
        e.printStackTrace()
    } finally {
      if (statement != null) statement.close()
      if (connection != null) connection.close()
    }
  }

  /**
   * 保存每个课程的出勤统计到course_attendance表
   */
  def saveCourseAttendance(courseStatsDF: org.apache.spark.sql.DataFrame): Unit = {
    // FIX: Add explicit type for partition parameter
    courseStatsDF.foreachPartition { (partition: Iterator[Row]) =>
      if (partition.nonEmpty) {
        var connection: Connection = null
        var statement: PreparedStatement = null

        try {
          connection = createConnection()
          statement = connection.prepareStatement(
            """
              |INSERT INTO course_attendance
              |(course_name, present_count, absent_count, last_updated)
              |VALUES (?, ?, ?, NOW())
              |ON DUPLICATE KEY UPDATE
              |present_count = VALUES(present_count),
              |absent_count = VALUES(absent_count),
              |last_updated = NOW()
              |""".stripMargin)

          partition.foreach { row =>
            val courseName = row.getAs[String]("course_name")
            val presentCount = row.getAs[Long]("present_count")
            val absentCount = row.getAs[Long]("absent_count")

            statement.setString(1, courseName)
            statement.setLong(2, presentCount)
            statement.setLong(3, absentCount)
            statement.addBatch()
          }

          statement.executeBatch()

        } catch {
          case e: Exception =>
            println(s"保存课程出勤统计时出错: ${e.getMessage}")
            e.printStackTrace()
        } finally {
          if (statement != null) statement.close()
          if (connection != null) connection.close()
        }
      }

      // Return Unit explicitly
      ()
    }
  }

  /**
   * 创建MySQL数据库连接
   */
  def createConnection(): Connection = {
    // 加载数据库驱动
    Class.forName("com.mysql.cj.jdbc.Driver")

    // 创建连接 - 注意数据库名改为attendance_new
    DriverManager.getConnection(
      "jdbc:mysql://43.143.125.94:3306/attendance_new?useUnicode=true&characterEncoding=utf-8&useSSL=false&serverTimezone=Asia/Shanghai",
      "root",
      "Lwj378$$" // 更新为正确的密码
    )
  }
}