package org.example

import org.apache.spark.sql.{SparkSession, functions}
import org.apache.spark.sql.types.DoubleType

object SQL {
  def main(args: Array[String]): Unit = {
    val spark = SparkSession.builder()
      .master("local[*]")
      .appName("GradeAnalysis")
      .getOrCreate()
    spark.sparkContext.setLogLevel("WARN")

    try {
      val df = spark.read
        .option("header", "true")
        .option("encoding", "GBK")
        .csv("src/main/resources/23yun1.csv")


      val gradeDF = df.withColumn("平时成绩", functions.col("平时成绩").cast(DoubleType))


      gradeDF.createOrReplaceTempView("student_grades")


      val statsDF = spark.sql(
        """
          |SELECT
          |  ROUND(MAX(`平时成绩`), 2) AS max_score,
          |  ROUND(AVG(`平时成绩`), 2) AS avg_score
          |FROM student_grades
          |WHERE `平时成绩` IS NOT NULL
          |""".stripMargin)


      val failDF = spark.sql(
        """
          |SELECT
          |  `学生姓名`,
          |  `平时成绩`
          |FROM student_grades
          |WHERE `平时成绩` < 60
          |ORDER BY `平时成绩` DESC
          |""".stripMargin)


      val stats = statsDF.first()
      val maxScore = stats.getAs[Double]("max_score")
      val avgScore = stats.getAs[Double]("avg_score")

      val failStudents = failDF.collect()
      val failCount = failStudents.length
      val failNames = failStudents.map(_.getAs[String]("学生姓名")).mkString("\n  • ")


      println("================ 成绩分析报告 ================")
      println(f"最高分：$maxScore%.2f")
      println(f"平均分：$avgScore%.2f")
      println("---------------------------------------------")
      println(s"不及格人数：$failCount 人")
      println("不及格学生名单：")
      println(s"  • $failNames")
      println("=============================================")

      spark.stop()
    }
  }
}