import org.apache.spark.sql.{RelationalGroupedDataset, SparkSession}
import org.apache.spark.sql.types.{DoubleType, IntegerType, LongType, StructField, StructType, StringType}
import org.junit.Test

class 聚合操作_groupby {
  val spark = SparkSession.builder()
    .master("local[6]")
    .appName("Test")
    .getOrCreate()

  import spark.implicits._
  import org.apache.spark.sql.functions._

  @Test
  def groupByTest01(): Unit = {

    val schema = StructType(
      Seq(
        StructField("No", LongType),
        StructField("year", IntegerType),
        StructField("month", IntegerType),
        StructField("day", IntegerType),
        StructField("hour", IntegerType),
        StructField("season", IntegerType),
        StructField("pm", DoubleType)
      )
    )
    val source = spark.read
      .option("header", true)
      .schema(schema)
      .csv("E:\\PHY\\Pandas\\使用Pandas进行数据预处理\\resource\\BeijingPM_Nan.csv")

    val clean = source.where('pm =!= Double.NaN)

    //需求：统计每个月PM值的平均数
    //分组
    val group: RelationalGroupedDataset = clean.groupBy('year, 'month)

    //使用function完成聚合
    //本质上，avg这个函数，定义了一个操作
    group.agg(avg('pm) as "pm_avg")
      .orderBy('pm_avg.desc)
      .show()

    //使用GroupedDataset的 API 来完成聚合
    group.avg("pm")
      .select($"avg(pm)" as "avg_pm")
      .orderBy('avg_pm.desc)
  }

  /**
    * 多维聚合
    * 一个结果集：包含小计，总计
    */
  @Test
  def mutlgroupBy(): Unit = {
    val schema = StructType(
      Seq(
        StructField("source", StringType),
        StructField("year", IntegerType),
        StructField("month", IntegerType),
        StructField("day", IntegerType),
        StructField("hour", IntegerType),
        StructField("season", IntegerType),
        StructField("pm", DoubleType)
      )
    )
    val source = spark.read
      .option("header", true)
      .schema(schema)
      .csv("C:\\Users\\HR\\Desktop\\beijing_pm_final.csv")
    //需求1：不同源，不同年， PM值的平均数
    val postAndYearDF = source.groupBy('source, 'year)
      .agg(avg('pm) as "pm")
    //需求2：在整个数据集中，按照不同来源的统计PM值的平均数
    val postDF = source.groupBy('source)
      .agg(avg('pm) as "pm")
      .select('source, lit(null) as "year", 'pm)
    //合并到一个结果集
    postAndYearDF.union(postDF)
      .sort('source, 'year, 'pm)
      .show()
  }

  @Test
  def rollup(): Unit = {
    val sales = Seq(
      ("Beijing", 2016, 100),
      ("Beijing", 2017, 200),
      ("Shanghai", 2015, 50),
      ("Shanghai", 2016, 150),
      ("Guangzhou", 2017, 50)
    ).toDF("city", "year", "amount")
    //需求1：每个城市，每年的销售额
    //需求2：每个城市，一共的销售额
    //需求3：全部销售额


    //rollup是滚动分组：A、B
    //得到AB  A  NULL(全局)
    sales.rollup('city, 'year)
      .agg(sum('amount) as "amount")
      .sort('city.asc_nulls_last, 'year.asc_nulls_last)
      .show()
  }

  @Test
  def rollup1(): Unit = {
    val schema = StructType(
      Seq(
        StructField("source", StringType),
        StructField("year", IntegerType),
        StructField("month", IntegerType),
        StructField("day", IntegerType),
        StructField("hour", IntegerType),
        StructField("season", IntegerType),
        StructField("pm", DoubleType)
      )
    )
    val source = spark.read
      .option("header", true)
      .schema(schema)
      .csv("C:\\Users\\HR\\Desktop\\beijing_pm_final.csv")

    //需求：每个PM值计量  每年PM值平均数
    //需求：整体上的PM平均值
    //全局所有
    source.rollup('source, 'year)
      .agg(avg('pm) as "pm")
      .sort('source.asc_nulls_last, 'year.asc_nulls_last)
      .show()
  }

  @Test
  def cuteTest(): Unit = {
    val schema = StructType(
      Seq(
        StructField("source", StringType),
        StructField("year", IntegerType),
        StructField("month", IntegerType),
        StructField("day", IntegerType),
        StructField("hour", IntegerType),
        StructField("season", IntegerType),
        StructField("pm", DoubleType)
      )
    )
    val source = spark.read
      .option("header", true)
      .schema(schema)
      .csv("C:\\Users\\HR\\Desktop\\beijing_pm_final.csv")

    //cube是全排序  AB
    //AB  A   B   NULL
    source.cube('source, 'year)
      .agg(avg('pm) as "pm")
      .sort('source.asc_nulls_last, 'year.asc_nulls_last)
      .show()
  }

  @Test
  def cuteSQLTest(): Unit = {
    val schema = StructType(
      Seq(
        StructField("source", StringType),
        StructField("year", IntegerType),
        StructField("month", IntegerType),
        StructField("day", IntegerType),
        StructField("hour", IntegerType),
        StructField("season", IntegerType),
        StructField("pm", DoubleType)
      )
    )
    val source = spark.read
      .option("header", true)
      .schema(schema)
      .csv("C:\\Users\\HR\\Desktop\\beijing_pm_final.csv")
    //创建临时表
    source.createOrReplaceTempView("pm_final")
    val result = spark.sql("select source,year,avg(pm) as pm from pm_final group by source,year grouping sets((source,year),(source),(year),())" +
      "order by source asc nulls last,year asc nulls last")
    result.show()
  }
}
