package com.shujia.sql

import org.apache.spark.sql._

object Demo12Stu {

  def main(args: Array[String]): Unit = {

    val spark: SparkSession = SparkSession
      .builder()
      .config("spark.sql.shuffle.partitions", 2)
      .master("local[4]")
      .appName("stu")
      .getOrCreate()

    import spark.implicits._

    val stuDF: DataFrame = spark
      .read
      .option("sep", ",")
      .schema("name STRING,cou STRING,score INT")
      .csv("spark/data/stu.txt")


    stuDF.createOrReplaceTempView("stu")


    spark.sql(
      """
        |
        |select
        |name,
        |sum(case cou when '数学' then score else 0 end) as math,
        |sum(case cou when '英语' then score else 0 end) as en
        |from stu
        |group by name
        |
        |
      """.stripMargin) //.show()


    import org.apache.spark.sql.functions._


    val stu2: DataFrame = stuDF
      .groupBy($"name")
      .pivot($"cou", List("数学", "英语")) //透视
      .agg(sum($"score").as("score"))
    //.select($"name", $"数学" as "math", $"英语" as "en")
    //.show()


    spark.sql(
      """
        |
        |select name,`数学` as math, `英语` as en  from stu
        |pivot(sum(score) for cou in ('数学','英语'))
        |
        |
      """.stripMargin) //.show()


    stu2.select($"name", explode(map(expr("'math'"), $"数学", expr("'en'"), $"英语").as(Array("cou", "score"))))
      .show()


    ///df 转rdd

    stuDF.rdd.map(row => {
      val name: String = row.getAs[String]("name")
      val cou: String = row.getAs[String]("cou")
      val score: Int = row.getAs[Int]("score")

      (name, cou, score)
    }).foreach(println)


  }

}
