package org.example

import org.apache.spark.sql.{DataFrame, SparkSession}

object data1_SQL1 {
  def main(args: Array[String]): Unit = {
    val spark = SparkSession
      .builder
      .master("local[*]")
      .appName("spark")
      .getOrCreate()
    val sc = spark.sparkContext
//rdd读取
    val data1=sc.textFile("src/main/resources/data.txt")
    data1.foreach(println)
    //结构化数据读取>sql读取>textFile只有一个字段value   csv>封装数据映射成表结果的元数据>直接转化为CSV就会有分隔符
//    val dataSet1=spark.read.textFile("src/main/resources/data.txt")
    val dataSet1=spark.read.csv("src/main/resources/data.txt")
    val dataSet2=spark.read.option("encoding","GBK").option("header",true).csv("src/main/resources/23data01.csv")
    val dataSet3=spark.read.option("encoding","GBK").option("header",true).csv("src/main/resources/23data01.csv")

////    输出数据模式
//    dataSet1.printSchema()
////    查看数据
//    dataSet1.show()
////    创建临时表
//    dataSet1.createTempView("score")
////    sql语句查表
//    val res:DataFrame = spark.sql(
////      """
////        |select
////        |  _c2 as term,
////        |  avg(_c3) as avg_score
////        |  from score
////        |  group by _c2
////        |
////        |""".stripMargin
////      求最高分
////    """
////      |select
////      |  _c2 as term,
////      |  max(_c3) as avg_score
////      |  sum(_c3) as avg_score
////      |  from score
////      |  group by _c2
////      |
////      |""".stripMargin
////      大于80分的同学名字
//    """
//      |select
//      |  _c1
//      |  from score
//      |  where _c3>80

//      |
//      |""".stripMargin
//    )
//    求平均分，最高分,小于60的同学成绩,自己的成绩
    dataSet2.printSchema()
    dataSet2.show()
    dataSet2.createTempView("score1")
//    res.show()
    val  res1:DataFrame = spark.sql(
      //    求平均分，最高分,小于60的同学成绩
      """
        |select
        |      cast(round(avg(`平时成绩`)) as integer) as avg_score,
        |      max(`平时成绩`) as max_score
        |  from score1
        |""".stripMargin
    )
    dataSet3.printSchema()
    dataSet3.show()
    dataSet3.createTempView("score2")
    val res2: DataFrame = spark.sql(
      //    求平均分，最高分,小于60的同学成绩
      """
        |select
        |      `学生姓名`,
        |      `平时成绩`
        |  from score2
        |  where `平时成绩`<60
        |""".stripMargin
    )
    res1.show()
    res2.show()
    sc.stop()

  }
}
