package com.shujia.sql

import org.apache.spark.sql.{Column, DataFrame, SparkSession}

/**
 * 表1
 * 姓名,科目,分数
 * name,item,score
 * 张三,数学,33
 * 张三,英语,77
 * 李四,数学,66
 * 李四,英语,78
 *
 *
 * 表2
 * 姓名,数学,英语
 * name,math,english
 * 张三,33,77
 * 李四,66,78
 */
object Test3 {
  def main(args: Array[String]): Unit = {
    val ss: SparkSession = SparkSession.builder()
      .master("local")
      .appName("spark sql练习2")
      .config("spark.sql.shuffle.partitions", "1")
      .getOrCreate()

    import ss.implicits._
    import org.apache.spark.sql.functions._

    val df1: DataFrame = ss.read
      .format("csv")
      .schema("name STRING,item STRING,score INT")
      .load("spark/data/t2.txt")

    //列转行
    val resDF1: DataFrame = df1.groupBy($"name")
      .agg(
        sum(when($"item" === "数学", $"score").otherwise(0)) as "math",
        sum(when($"item" === "英语", $"score").otherwise(0)) as "english"
      )

    //行转列
    val m: Column = map(
      expr("'数学'"), $"math",
      expr("'英语'"), $"english"
    )

    val resDF2: DataFrame = resDF1.select($"name", explode(m) as Array("item", "score"))
    resDF2.show()

  }
}
