package org.example
import org.apache.spark.sql.{DataFrame,SparkSession}
object data1_SQL1 {
  def main(args: Array[String]): Unit = {
    val spark = SparkSession
      .builder
      .master("local[*]")
      .appName("spark")
      .getOrCreate()
    val sc = spark.sparkContext
    //
//    val data1 = sc.textFile("src/main/resources/data.txt")
//    data1.foreach(println)
//    //SQL读取 textFile有一个字段value csv结构化 封装数据映射生成表结构的元数据
//    val dataSet1: DataFrame = spark.read.csv("src/main/resources/data.txt")
//    //输出数据模式
//    dataSet1.printSchema()
//    //查看数据
//    dataSet1.show()
//    //创建临时表
//    dataSet1.createTempView("score")
//    //SQL语句查表
//    val res:DataFrame = spark.sql(
//      """
//        |select
//        | _c2 as term
//        | avg(_c3) as avg.score,
//        | max(_c3) as max_score,
//        | sum(_c3) as sum_score
//        | from score
//        | group by _c2
//        |""".stripMargin
//    )
//    val res1 = spark.sql(
//      """
//        |select
//        | _c1
//        | from score
//        | where _c3 > 90
//        |""".stripMargin
//    )
//    res.show()
//    res1.show()
    //练习题：求平时平均成绩分，最高分，小于60分的同学
    val dataSet2:DataFrame = spark.read.option("encoding","GBK").option("header","true").csv("src/main/resources/23data01.csv")
    dataSet2.printSchema()
    dataSet2.show()
    dataSet2.createTempView("table")
    val result: DataFrame = spark.sql(
      """
        |SELECT
        |  AVG(CAST(`平时成绩` AS DOUBLE)) AS avg_table,
        |  MAX(CAST(`平时成绩` AS DOUBLE)) AS max_table,
        |  COUNT_IF(CAST(`平时成绩` AS DOUBLE) < 60) AS fail_table,
        |  CONCAT_WS(', ', COLLECT_LIST(
        |    CASE WHEN CAST(`平时成绩` AS DOUBLE) < 60 THEN `学生姓名` ELSE NULL END
        |  )) AS failed
        |FROM table
     """.stripMargin
    )
    result.show(truncate = false)

sc.stop()
  }
}
