package org.example
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, Dataset, SparkSession}


object sparkSQL_148 {
  def main(args: Array[String]): Unit = {
    val spark = SparkSession
      .builder()
      .master("local[*]")
      .appName("sparkBase")
      .getOrCreate()
    val sc = spark.sparkContext

    val studentRDD: RDD[String] =sc.textFile("src/main/resources/studen.txt")
//    studentRDD.foreach(println)
    //sql读取 textFile只有一个字段
    val studentSQL:Dataset[String] = spark.read.textFile("src/main/resources/studen.txt")
    //输出数据模式（列表和分类）
//    studentSQL.printSchema()
    //查看数据
//    studentSQL.show()
    val studentFrame: DataFrame = spark.read.csv("src/main/resources/studen.txt")
    studentFrame.printSchema()
    studentFrame.show()
//创建临时表
//    studentFrame.createTempView("score")
//    val res = spark.sql(
//      """
//        |select
//        | _c2 as term,
//        | avg(_c3) as avg_score,
//        | max(_c3) as max_score,
//        | sum(_c3) as sum_score
//        | from score
//        | group by _c2
//        |""".stripMargin
//    )
//    res.show()
//    val res1 = spark.sql(
//      """
//        |select
//        | _c1
//        | from score
//        | where _c3 > 90
//        |""".stripMargin
//    )
//    res1.show()
//随堂作业：求我们班spark平时成绩的最高分，平均分，和低于60分的同学人数和姓名
//    val sparkScore = spark.read.option("encoding","GBK").option("header","true").csv("src/main/resources/23yun1.csv")
//    sparkScore.printSchema()
//    sparkScore.show()
//    sparkScore.createTempView("score")
//    val res2 = spark.sql(
//      """
//        |select
//        | _c1 as term,
//        | avg(_c3) as avg_score,
//        | max(_c3) as max_score
//        | from score
//        | where _c3 < 60
//        |""".stripMargin
//    )
//    res2.show()



    val sparkScoreDF: DataFrame = spark.read
      .option("header", "true")
      .option("encoding", "GBK")
      .csv("src/main/resources/23yun1.csv")

    sparkScoreDF.createOrReplaceTempView("spark_scores")

    val resultDF = spark.sql(
      """
      SELECT
      MAX(`平时成绩`) AS max_score,
      AVG(`平时成绩`) AS avg_score,
      SUM(CASE WHEN `平时成绩` < 60 THEN 1 ELSE 0 END) AS count_below_60,
      CONCAT_WS(', ', COLLECT_LIST(CASE WHEN `平时成绩` < 60 THEN `学生姓名` ELSE NULL END)) AS names_below_60
      FROM spark_scores
      """
    )
    resultDF.show(truncate = false)


    sc.stop()
  }
}
