package org.example

import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, Dataset, SparkSession}


object sparkData2_SQL1 {
  def main(args: Array[String]): Unit = {
    val spark = SparkSession
      .builder()
      .master("local[*]")
     .getOrCreate()
    val sc = spark.sparkContext
    val student1:RDD[String] = sc.textFile("src/main/resources/student.txt")
    student1.foreach(println)
    val studentSet: Dataset[String] =
      spark.read.textFile("src/main/resources/student.txt")
    //   studentSet.printSchema()
    //  studentSet.show()
    // 结构化数据 封装数据映射成表结构的元数据
    val studentFrame: DataFrame =
    spark.read.csv("src/main/resources/student.txt")
    // 输出数据模式（列的名称和类型）
    studentFrame.printSchema()
    //   查看数据
     studentFrame.show()
    //  创建临时表
    studentFrame.createTempView("score")
    //  通过SQL语句查询全表
    val res:DataFrame = spark.sql(
    """
    |select
    |  _c2 as term,
    |  avg(_c3) as avg_score,
    |  max(_c3) as max_score,
    |  sum(_c3) as sum_score
    |  from score
    |  group by _c2
    |""".stripMargin
    )
    val res1 = spark.sql(
    """
    |select
    |_c1
    |from score
    |where _c3 > 85
    |""".stripMargin
    )
    //  读取平时成绩
    val data2: DataFrame = spark.read
      .option("encoding","GBK")
      .option("header","true")
      .csv("src/main/resources/23data2.csv")
    //   求我们班平时成绩的平均分、最高分和不及格人数

    //   结果展示
    data2.printSchema()
    data2.show(60)

    // 计算平均分、最高分和不及格人数
//    val avgScore = data2.select(avg("score")).asDouble].head
//    val maxScore = data2.select(max("score")).asDouble].head
//    val failCount = data2.filter($"score" <= 60).count()

    // 输出结果
//    println(s"平均分: $avgScore")
//    println(s"最高分: $maxScore")
//    println(s"不及格人数: $failCount")

    // 停止SparkSession

    sc.stop()
  }
}
