package com.shujia.spark.sql

import org.apache.spark.sql.{DataFrame, DataFrameReader, SparkSession}

object Demo10Stu {
  def main(args: Array[String]): Unit = {
    val spark: SparkSession = SparkSession
      .builder()
      .master("local")
      .appName("stu")
      .config("spark.sql.shuffle.partitions", 1) //指定spark sql shuffle之后rdd的分区数
      .getOrCreate()
    import spark.implicits._
    import org.apache.spark.sql.functions._

    /**
     * 张三,数学,33
     * 张三,英语,77
     * 李四,数学,66
     * 李四,英语,78
     *
     * name,math,english
     * 张三,33,77
     * 李四,66,78
     *
     */
    //读取数据
    val df1: DataFrame = spark
      .read
      .format("csv")
      .option("sep", ",")
      .schema("name STRING ,item STRING ,score DOUBLE")
      .load("data/stu_sco.txt")

    /**
     * sum 和case when 一起使用，  在统计的时候增加条件
     */
    val df2: DataFrame = df1
      .groupBy($"name")
      .agg(
        sum(when($"item" === "数学", $"score").otherwise(0)) as "math",
        sum(when($"item" === "英语", $"score").otherwise(0)) as "english"
      )
    df2.show()

    df2
      .select(
        $"name",
        explode(map(expr("'数学'"), $"math", expr("'英语'"), $"english")) as Array("item", "score")
      ).show()

  }
}
