package SQL_L

import org.apache.spark.sql.{DataFrame, RelationalGroupedDataset, SparkSession}
import org.apache.spark.sql.types.{DoubleType, IntegerType, LongType, StringType, StructField, StructType}
import org.junit.Test

class AggProcessor {
  //创建SparkSession
  val spark = SparkSession.builder()
    .master("local[6]")
    .appName("aggProcessor")
    .getOrCreate()
  //隐式转换

  import spark.implicits._
  //应用函数
  import org.apache.spark.sql.functions._

  @Test
  def groupBy() = {
    //创建Schema
    val schema = StructType(
      List(
        StructField("id", LongType),
        StructField("year", IntegerType),
        StructField("month", IntegerType),
        StructField("day", IntegerType),
        StructField("hour", IntegerType),
        StructField("session", IntegerType),
        StructField("pm", DoubleType)
      )
    )
    //数据读取
    val sourceDF: DataFrame = spark.read
      .option("header", true)
      .schema(schema)
      .csv("data/beijingpm_with_nan.csv")
    //数据去空
    val cleadDF = sourceDF.where('pm =!= Double.NaN)
    //分组
    val groupDF: RelationalGroupedDataset = cleadDF.groupBy('year, 'month)
    //使用functions函数来完成聚合
    groupDF.agg(avg("pm") as 'pm_avg)
      .orderBy('pm_avg desc)
      .show()
    //使用GroupedDataSet的API来完成聚合
    groupDF.avg("pm")
      .orderBy("avg(pm)")
      .show()

    groupDF.max("pm")
      .select($"max(pm)" as ("max_pm"))
      .orderBy('max_pm)
      .show()
  }

  /**
   * 多维聚合
   * 需求：
   * 1.完成不同年，不同来源，PM的平均数
   * 2.在整个数据集中，按照不同来源统计PM的平均数
   * 3.将两个结果集合并在同一个结果中
   */
  @Test
  def multiAgg() = {
    val schemaFinal = StructType(
      List(
        StructField("source", StringType),
        StructField("year", IntegerType),
        StructField("month", IntegerType),
        StructField("day", IntegerType),
        StructField("hour", IntegerType),
        StructField("season", IntegerType),
        StructField("pm", DoubleType)
      )
    )
    val pmFinal = spark.read
      .schema(schemaFinal)
      .option("header", value = true)
      .csv("data/pm_final.csv")

    //需求一
    val postAndYearDF = pmFinal.groupBy('source, 'year)
      .agg(avg('pm) as "pm")

    //需求二
    val postDF = pmFinal.groupBy('source)
      .agg(avg('pm) as "pm")
      .select('source, lit(null) as "year", 'pm)

    //将两个结果集合并
    postAndYearDF.union(postDF)
      .sort('source, 'year.asc_nulls_last, 'pm)
      .show()
  }

  @Test
  def rollUp() = {
    val sales = Seq(
      ("Beijing", 2016, 100),
      ("Beijing", 2017, 200),
      ("Shanghai", 2015, 50),
      ("Shanghai", 2016, 150),
      ("Guangzhou", 2017, 50)
    ).toDF("city", "year", "amount")

    sales.rollup("city", "year")
      .agg(sum("amount") as "amount")
      .sort($"city".desc_nulls_last, $"year".asc_nulls_last)
      .show()
  }

  @Test
  def rollUPTest() = {
    val schemaFinal = StructType(
      List(
        StructField("source", StringType),
        StructField("year", IntegerType),
        StructField("month", IntegerType),
        StructField("day", IntegerType),
        StructField("hour", IntegerType),
        StructField("season", IntegerType),
        StructField("pm", DoubleType)
      )
    )
    val pmFinal = spark.read
      .schema(schemaFinal)
      .option("header", value = true)
      .csv("data/pm_final.csv")

    pmFinal.rollup('source, 'year)
      .agg(avg('pm) as "pm")
      .sort('source, 'year.asc_nulls_last, 'pm)
      .show()
  }

  @Test
  def cube() = {
    val schemaFinal = StructType(
      List(
        StructField("source", StringType),
        StructField("year", IntegerType),
        StructField("month", IntegerType),
        StructField("day", IntegerType),
        StructField("hour", IntegerType),
        StructField("season", IntegerType),
        StructField("pm", DoubleType)
      )
    )
    val pmFinal = spark.read
      .schema(schemaFinal)
      .option("header", value = true)
      .csv("data/pm_final.csv")
    pmFinal.cube('source, 'year)
      .agg(avg('pm) as "pm")
      .sort('source, 'year.asc_nulls_last, 'pm)
      .show()
  }
}
