package org.example

import org.apache.spark.sql.{DataFrame, SparkSession}

import org.apache.spark.sql.SparkSession

object data_SQL1 {
    def main(args: Array[String]): Unit = {
      //    spark运行环境
      val spark = SparkSession
        .builder
        .master("local[*]")
        .appName("spark")
        .getOrCreate()
      val sc = spark.sparkContext
      // RDD读取
      val data1 = sc.textFile("src/main/resources/daat.txt")
      data1.foreach(println)
      //  SQL读取 textFile只有一个字段value csv结构化 封装数据映射成表结构的元数据
      val dataSet1: DataFrame = spark.read.csv("src/main/resources/daat.txt")
      //    输出数据模式
      dataSet1.printSchema()
      //    查看数据
      dataSet1.show()
      //    创建临时表
      dataSet1.createTempView("score")
      //    SQL语句查表
      val res: DataFrame = spark.sql(
        """
          |select
          |  _c2 as term,
          |  avg(_c3) as avg_score,
          |  max(_c3) as max_score,
          |  sum(_c3) as sum_score
          |  from score
          |  group by _c2
          |""".stripMargin
      )
      val res1 = spark.sql(
        """
          |select
          |  _c1
          |  from score
          |  where _c3 > 90
          |""".stripMargin
      )
      res.show()
      res1.show()

      //    练习题：求平时成绩平均分，最高分以及小于60分的同学名字，截图提交
      val dataSet2: DataFrame = spark.read.option("encoding","GBK").option("header","true").csv("src/main/resources/23data01.csv")
      //    输出数据模式
      dataSet2.printSchema()
      //    查看数据
      dataSet2.show()
      dataSet2.createTempView("score1")
      val res2 = spark.sql(
        """
          |select
          |  _c0 as name,
          |  avg(_c3) as avg_score1,
          |  max(_c3) as max_score1,
          |  from score1
          |  group by _c0
          |""".stripMargin
      )
      val res3=spark.sql(
        """
          |select
          | _c3
          | from res2
          | where _c3 < 60
          |""".stripMargin
      )
      res3.show()
      res2.show()
      sc.stop()
    }
  }
