package com.shujia.sql

import org.apache.spark.sql.expressions.Window
import org.apache.spark.sql.{Column, DataFrame, Row, SparkSession}

object Demo6Stu {
  def main(args: Array[String]): Unit = {

    //1、创建Spark sql环境
    val spark: SparkSession = SparkSession
      .builder()
      .master("local")
      .appName("sql")
      .config("spark.sql.shuffle.partitions", 1) //默认在集群中时200个
      .getOrCreate()

    import spark.implicits._
    import org.apache.spark.sql.functions._


    val stu: DataFrame = spark.read
      .schema("name STRING ,item STRING,score DOUBLE")
      .option("sep", ",")
      .csv("spark/data/stu.txt")


    val result: DataFrame = stu
      .groupBy($"name")
      .agg(
        sum(when($"item" === "数学", $"score").otherwise(0)) as "math",
        sum(when($"item" === "英语", $"score").otherwise(0)) as "english"
      )

    result.show()


    result
      .select($"name", explode(map(expr("'数学'"), $"math", expr("'英语'"), $"english")) as Array("item", "score"))
      .show()


  }

}
