import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.functions._
import org.apache.spark.sql.types.DoubleType

object aa {
  def main(args: Array[String]): Unit = {
    // 初始化SparkSession‌:ml-citation{ref="1" data="citationList"}
    val spark = SparkSession.builder()
      .appName("ScoreAnalysis")
      .master("local[*]")
      .config("spark.sql.shuffle.partitions", "8") // 优化shuffle分区‌:ml-citation{ref="7" data="citationList"}
      .getOrCreate()

    try {
      // 数据加载与预处理‌:ml-citation{ref="5" data="citationList"}
      val scoreDF = spark.read
        .option("header", "true")
        .option("inferSchema", "true")  // 自动推断数值类型‌:ml-citation{ref="5" data="citationList"}
        .csv("src/main/resources/23yun1.csv")
        .select(
          col("_c0").as("name"),
          col("_c2").cast(DoubleType).as("score")
        )
        .cache()  // 缓存预处理结果‌:ml-citation{ref="8" data="citationList"}

      // 单次聚合计算核心指标‌:ml-citation{ref="2,8" data="citationList"}
      val result = scoreDF.agg(
        max("score").as("max_score"),
        format_number(avg("score"), 2).as("avg_score"),
        count(when(col("score") < 60, 1)).as("fail_count"),
        expr("filter(collect_set(name), x -> x is not null)").as("fail_names")
      )

      // 格式化输出‌:ml-citation{ref="1" data="citationList"}
      result.collect().foreach { row =>
        println(
          s"""最高分: ${row.getAs[Double]("max_score")}
             |平均分: ${row.getAs[String]("avg_score")}
             |不及格人数: ${row.getAs[Long]("fail_count")}
             |名单: ${row.getAs[Seq[String]]("fail_names").mkString("[", ", ", "]")}""".stripMargin)
      }
    } finally {
      spark.stop()
    }
  }
}

