import java.util.Properties
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.functions._
import org.apache.spark.sql.streaming.Trigger
import org.apache.spark.sql.types._
import scala.collection.JavaConverters._
// 设置日志级别为WARN
import org.apache.log4j.{Level, Logger}

object SparkKafkaStreaming {
  def main(args: Array[String]): Unit = {
    // 设置 Kafka 和 Spark 的日志级别为 WARN（必须放在 SparkSession 初始化之前）
    Logger.getLogger("org").setLevel(Level.WARN) // 禁用所有 org 包的 INFO 日志
    Logger.getLogger("org.apache.kafka").setLevel(Level.ERROR) // Kafka 仅显示 ERROR
    Logger.getLogger("org.apache.spark").setLevel(Level.WARN) // Spark 显示WARN 及以上
    // 初始化 SparkSession
    val spark = SparkSession.builder()
      .master("local[*]")
      .appName("kafka_accumulate_full")
      .config("spark.sql.shuffle.partitions", 1)
      .getOrCreate()

    println(s"Spark Version: ${spark.version}")

    // 数据库连接配置
    val jdbcUrl = "jdbc:mysql://localhost:3306/db_teaching?useUnicode=true&characterEncoding=utf8&useSSL=false&serverTimezone=Hongkong&allowPublicKeyRetrieval=true&allowMultiQueries=true"
    val jdbcProps = new Properties()
    jdbcProps.setProperty("user", "root")
    jdbcProps.setProperty("password", "20040429lk")
    jdbcProps.setProperty("driver", "com.mysql.cj.jdbc.Driver")

    // 基础表操作
    def saveToMysqlTable(batchDF: org.apache.spark.sql.DataFrame): Unit = {
      batchDF.write
        .mode("append")
        .jdbc(jdbcUrl, "tb_teaching", jdbcProps)
    }

    def readMysqlTable(): org.apache.spark.sql.DataFrame = {
      spark.read
        .jdbc(jdbcUrl, "tb_teaching", jdbcProps)
    }

    def saveAnalysisResult(data: org.apache.spark.sql.DataFrame, dbtable: String, field: String): Unit = {
      println(dbtable)
      data.write
        .format("jdbc")
        .option("url", jdbcUrl)
        .option("driver", "com.mysql.cj.jdbc.Driver")
        .option("dbtable", dbtable)
        .option("user", "root")
        .option("password", "20040429lk")
        .option("createTableColumnTypes", field)
        .mode("overwrite")
        .save()
    }

    // 初始化新Schema
    val schema = StructType(Seq(
      StructField("gender", StringType, true),
      StructField("NationalITy", StringType, true),
      StructField("PlaceofBirth", StringType, true),
      StructField("StageID", StringType, true),
      StructField("GradeID", StringType, true),
      StructField("SectionID", StringType, true),
      StructField("Topic", StringType, true),
      StructField("Semester", StringType, true),
      StructField("Relation", StringType, true),
      StructField("raisedhands", IntegerType, true),
      StructField("VisITedResources", IntegerType, true),
      StructField("AnnouncementsView", IntegerType, true),
      StructField("Discussion", IntegerType, true),
      StructField("ParentAnsweringSurvey", StringType, true),
      StructField("ParentschoolSatisfaction", StringType, true),
      StructField("StudentAbsenceDays", StringType, true),
      StructField("Class", StringType, true)
    ))

    // 统计发言次数最高的10个学生关系
    def analyzeTop10Relation(): Unit = {
      val baseDF = readMysqlTable()
      val resultDF = baseDF.groupBy("Relation")
        .sum("raisedhands")
        .withColumnRenamed("sum(raisedhands)", "total_raisedhands")
        .orderBy(desc("total_raisedhands"))
        .limit(10)
      saveAnalysisResult(resultDF, "ads_relation_raisedhands_top10", "Relation varchar(255), total_raisedhands int")
    }

    // 统计各分类访问资源数量
    def analyzeResourceByClass(): Unit = {
      val baseDF = readMysqlTable()
      val resultDF = baseDF.groupBy("Class")
        .agg(sum("VisITedResources").as("total_resources"))
      saveAnalysisResult(resultDF, "ads_Class_resource_stats", "Class varchar(255), total_resources int")
    }

    // 统计不同教育阶段的平均参与讨论次数
    def analyzeDiscussionByStage(): Unit = {
      val baseDF = readMysqlTable()
      val resultDF = baseDF.groupBy("StageID")
        .agg(avg("Discussion").as("avg_discussion"))
        .withColumn("avg_discussion", round(col("avg_discussion"), 2))
      saveAnalysisResult(resultDF, "ads_stage_discussion_stats", "StageID varchar(255), avg_discussion double")
    }

    // 统计不同年级的平均举手次数
    def analyzeRaisedHandsByGrade(): Unit = {
      val baseDF = readMysqlTable()
      val resultDF = baseDF.groupBy("GradeID")
        .agg(avg("raisedhands").as("avg_raisedhands"))
        .withColumn("avg_raisedhands", round(col("avg_raisedhands"), 2))
        .orderBy(desc("avg_raisedhands"))
      saveAnalysisResult(resultDF, "ads_grade_raisedhands_stats", "GradeID varchar(255), avg_raisedhands double")
    }

    // 统计访问资源次数最多的前5个学习主题
    def analyzeTop5TopicsByResources(): Unit = {
      val baseDF = readMysqlTable()
      val resultDF = baseDF.groupBy("Topic")
        .agg(sum("VisITedResources").as("total_resources"))
        .orderBy(desc("total_resources"))
        .limit(5)
      saveAnalysisResult(resultDF, "ads_topic_resources_top5", "Topic varchar(255), total_resources int")
    }

    // 统计家长满意度分布
    def analyzeParentSatisfaction(): Unit = {
      val baseDF = readMysqlTable()
      val resultDF = baseDF.groupBy("ParentschoolSatisfaction")
        .count()
        .withColumnRenamed("count", "student_count")
      saveAnalysisResult(resultDF, "ads_parent_satisfaction_dist", "ParentschoolSatisfaction varchar(255), student_count int")
    }

    //   统计不同年级学生的平均在线资源访问量
    def analyzeResourcesByGradeID(): Unit = {
      val baseDF = readMysqlTable()
      val resultDF = baseDF.groupBy("GradeID")
        .agg(avg("VisITedResources").as("avg_resources"))
        .withColumn("avg_resources", round(col("avg_resources"), 2))
      saveAnalysisResult(resultDF, "ads_gradeID_resources_stats", "GradeID varchar(255), avg_resources double")
    }

    // 数据处理函数
    def outputFullData(microBatchDF: org.apache.spark.sql.DataFrame, batchId: Long): Unit = {
      println(s"\n=== 第${batchId + 1}次批次处理（${microBatchDF.count()}条新数据） ===")

      // 解析Kafka数据（逗号分隔符，17个字段）
      val parsedDF = microBatchDF.selectExpr("split(cast(value as string), ',') as fields")
        .select(
          col("fields").getItem(0).as("gender"),
          col("fields").getItem(1).as("NationalITy"),
          col("fields").getItem(2).as("PlaceofBirth"),
          col("fields").getItem(3).as("StageID"),
          col("fields").getItem(4).as("GradeID"),
          col("fields").getItem(5).as("SectionID"),
          col("fields").getItem(6).as("Topic"),
          col("fields").getItem(7).as("Semester"),
          col("fields").getItem(8).as("Relation"),
          col("fields").getItem(9).cast(IntegerType).as("raisedhands"),
          col("fields").getItem(10).cast(IntegerType).as("VisITedResources"),
          col("fields").getItem(11).cast(IntegerType).as("AnnouncementsView"),
          col("fields").getItem(12).cast(IntegerType).as("Discussion"),
          col("fields").getItem(13).as("ParentAnsweringSurvey"),
          col("fields").getItem(14).as("ParentschoolSatisfaction"),
          col("fields").getItem(15).as("StudentAbsenceDays"),
          col("fields").getItem(16).as("Class")
        )

      // 写入基础表
      val columns = schema.fieldNames.map(name => col(name))
      saveToMysqlTable(parsedDF.select(columns: _*))

      // 调用所有分析函数
      analyzeTop10Relation()
      analyzeResourceByClass()
      analyzeDiscussionByStage()
      analyzeRaisedHandsByGrade()
      analyzeTop5TopicsByResources()
      analyzeParentSatisfaction()
      analyzeResourcesByGradeID()

      // 等待数据落地
      Thread.sleep(1000)
    }

    // 读取Kafka流（保持原有配置，假设主题不变）
    val kafkaDF = spark.readStream
      .format("kafka")
      .option("kafka.bootstrap.servers", "localhost:9092")
      .option("subscribe", "topic_test001") // 如需变更主题请修改此处
      .option("startingOffsets", "latest")
      .load()

    // 启动流式查询
    val streamingQuery = kafkaDF.writeStream
      .outputMode("append")
      .foreachBatch(outputFullData _)
      .trigger(Trigger.ProcessingTime("5 seconds"))
      .start()

    streamingQuery.awaitTermination()
  }
}