package com.xiaohu.sql

import org.apache.spark.sql.{Column, DataFrame, SparkSession}

/**
 *
 * 1、行列转换
 *
 *表1
  *姓名,科目,分数
  *name,item,score
  *张三,数学,33
  *张三,英语,77
  *李四,数学,66
  *李四,英语,78
 *
 *
 *表2
  *姓名,数学,英语
  *name,math,english
  *张三,33,77
  *李四,66,78
 *
 *1、将表1转化成表2
      *2、将表2转化成表1
 *
 */
object Demo11Student {
  def main(args: Array[String]): Unit = {
    val sparkSession: SparkSession = SparkSession.builder()
      .master("local")
      .appName("行转列 列转行案例演示")
      .getOrCreate()

    import org.apache.spark.sql.functions._
    import sparkSession.implicits._

    //当你配置了hdfs等一些配置文件，那么默认读取路径是hadoop的，否则是本地
    val df1: DataFrame = sparkSession.read
      .format("csv")
      .schema("name STRING,item STRING,score INT")
      .load("/bigdata30/stu.txt")


    //列转行
    val resDF: DataFrame = df1.groupBy($"name")
      .agg(
        sum(when($"item" === "数学", $"score").otherwise(0)) as "math",
        sum(when($"item" === "英语", $"score").otherwise(0)) as "english"
      )

//    val array1: Column = array($"math", $"english")

    val m: Column = map(
      expr("'数学'"), $"math",
      expr("'英语'"), $"english"
    )

    //行转列
    resDF.select($"name",explode(m) as Array("item","score")).show()


  }
}
