package cn.whuc.test

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

object Demo03 {
  def main(args: Array[String]): Unit = {
    // 1 创建sparkContext
    val sc: SparkContext = new SparkContext(
      new SparkConf()
        .setMaster("local[*]")
        .setAppName(" ")
    )

    // 2 编写代码
    // 创建包含学生姓名、科目和成绩的RDD
    val studentScoresRDD = sc.parallelize(Seq(
      ("Alice", "Math", 70),
      ("Bob", "Math", 85),
      ("Charlie", "English", 60),
      ("David", "English", 75),
      ("Emily", "Math", 90),
      ("Frank", "English", 55)
    ))

    // 计算每个科目的平均成绩及及格人数

    // (科目，(分数，及格情况)) 及格情况 1 0
    val subjectScoreRDD: RDD[(String, (Int, Int))] = studentScoresRDD.map {
      case (_, subject, score) => (
        subject, (score, if (score >= 60) 1 else 0)
      )
    }

    // 科目分组,求平均值和及格人数
    val resultRDD: RDD[(String, (Double, Int))] = subjectScoreRDD.groupByKey().mapValues(t => {
      val count: Int = t.size
      val total: Int = t.map(_._1).sum
      val pass: Int = t.map(_._2).sum
      val avgScore: Double = if (count > 0) total.toDouble / count else 0.0
      (avgScore, pass)
    })

    resultRDD.collect().foreach(println)

//    // 注册DataFrame为临时表
//    studentsDF.createOrReplaceTempView("students")
//
//    // 执行Spark SQL查询，找出成绩大于等于80分的学生，并按照成绩降序排列
//    val topStudentsDF = spark.sql("SELECT * FROM students WHERE Score >= 80 ORDER BY Score DESC")
//    // 导入隐式转换
//    import spark.implicits._

    // 3 关闭上下文对象
    sc.stop()
  }
}
