package com.shujia.sql

import org.apache.spark.sql.{Column, DataFrame, SparkSession}

object Demo8Stu {

  def main(args: Array[String]): Unit = {
    val spark: SparkSession = SparkSession
      .builder()
      .appName("spark")
      .master("local")
      .getOrCreate()


    import spark.implicits._
    import org.apache.spark.sql.functions._


    val stuDF: DataFrame = spark.read
      .format("csv")
      .option("sep", ",")
      .schema("name STRING, item  STRING , score INT")
      .load("spark/data/stu.txt")


    /**
      * 1、行列转换
      *
      * 表1
      * 姓名,科目,分数
      * name,item,score
      * 张三,数学,33
      * 张三,英语,77
      * 李四,数学,66
      * 李四,英语,78
      *
      *
      * 表2
      * 姓名,数学,英语
      * name,math,english
      * 张三,33,77
      * 李四,66,78
      *
      * 1、将表1转化成表2
      * 2、将表2转化成表1
      */

    stuDF.createOrReplaceTempView("stu")


    //1、将表1转化成表2
    val df: DataFrame = spark.sql(
      """
        |select
        |name,
        |sum(case when item='数学' then score else 0 end) as math,
        |sum(case when item='英语' then score else 0 end) as english
        |from stu
        |group by name
        |
      """.stripMargin)


    spark.sql(
      """
        |
        |select name, `数学` as a , `英语` as b  from
        |stu
        |pivot(sum(score) for item in ('数学','英语'))
        |
      """.stripMargin).show(100)


    /**
      * 2、将表2转化成表1
      *
      */

    val m: Column = map(
      expr("'数学'"), $"math",
      expr("'英语'"), $"english"
    )


    df.show(1000)
    df.select($"name", explode(m) as Array("item", "score"))
      .show(1000)


  }

}
