import org.apache.spark.sql.{DataFrame, SparkSession}

object kk {
  def main(args: Array[String]): Unit = {
    val spark = SparkSession
      .builder
      .master("local[*]")
      .appName("spark")
      .getOrCreate()
    val sc = spark.sparkContext
    val data1 = sc.textFile("F:\\Spark\\karry\\karry\\src\\main\\resources\\data.csv")
    data1.foreach(println)
    val dataSet1: DataFrame = spark.read.csv("F:\\Spark\\karry\\karry\\src\\main\\resources\\data.csv")
    dataSet1.printSchema()
    dataSet1.show()
    dataSet1.createTempView("score")
    val res: DataFrame = spark.sql(
      """
        |select
        |  _c2 as term,
        |  avg(_c3) as avg_score,
        |  max(_c3) as max_score,
        |  sum(_c3) as sum_score
        |  from score
        |  group by _c2
        |""".stripMargin
    )
    val res1 = spark.sql(
      """
        |select
        |  _c1
        |  from score
        |  where _c3 > 90
        |""".stripMargin
    )
    res.show()
    res1.show()
    val dataSet2: DataFrame = spark.read.option("encoding", "GBK").option("header", "true").csv("F:\\Spark\\karry\\karry\\src\\main\\resources\\23data01.csv")
    dataSet2.printSchema()
    dataSet2.createTempView("total")
    dataSet2.show()
    val res2: DataFrame = spark.sql(
      """
        |SELECT
        |  AVG(CAST(`平时成绩` AS DOUBLE)) AS avg_total,
        |  MAX(CAST(`平时成绩` AS DOUBLE)) AS max_total,
        |  COUNT_IF(CAST(`平时成绩` AS DOUBLE) < 60) AS file_total,
        |  CONCAT_WS(', ', COLLECT_LIST(
        |    CASE WHEN CAST(`平时成绩` AS DOUBLE) < 60 THEN `学生姓名` ELSE NULL END
        |  )) AS failed
        |FROM total
      """.stripMargin
    )
    res2.show(truncate = false)
    sc.stop()
  }
}