package SQL

import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.types.{DoubleType, IntegerType, LongType, StringType, StructField, StructType}
import org.junit.Test

class 聚合操作 {
  val spark = SparkSession.builder()
    .master("local[6]")
    .appName("Test")
    .getOrCreate()

  import spark.implicits._
  import org.apache.spark.sql.functions._

  @Test
  def groupBy(): Unit = {
    /**
      * 统计每个月PM值的平均数
      */
    val schema = StructType(
      Seq(
        StructField("id", LongType),
        StructField("year", IntegerType),
        StructField("month", IntegerType),
        StructField("day", IntegerType),
        StructField("hour", IntegerType),
        StructField("season", IntegerType),
        StructField("pm", DoubleType)
      )
    )
    val source = spark.read
      .schema(schema)
      .option("header", value = true)
      .csv("E:\\data\\spark数据\\Spark_data\\Beijing.csv")
    source.where('pm =!= Double.NaN)
      .groupBy('year, 'month)
      .agg(avg('pm as "avg(pm)"))
      .orderBy('year, 'month)
      .show()
  }

  @Test
  def multiAgg(): Unit = {
    val schema = StructType(
      Seq(
        StructField("id", StringType),
        StructField("year", IntegerType),
        StructField("month", IntegerType),
        StructField("day", IntegerType),
        StructField("hour", IntegerType),
        StructField("season", IntegerType),
        StructField("pm", DoubleType)
      )
    )
    val source = spark.read
      .schema(schema)
      .option("header", value = true)
      .csv("E:\\data\\spark数据\\Spark_data\\Beijing_final.csv")

    /**
      * 不同年，不同来源PM值的平均数
      * 在整个数据集中，按照不同的来源，统计PM值的平均数
      */
    source.groupBy('id, 'year)
      .agg(avg('pm))

    source.groupBy('id)
      .agg(avg('pm) as "pm")
      .show()
  }

  @Test
  def rollup(): Unit = {
    val sales = Seq(
      ("Beijing", 2016, 100),
      ("Beijing", 2017, 200),
      ("Shanghai", 2015, 50),
      ("Shanghai", 2016, 150),
      ("Guangzhou", 2017, 50)
    ).toDF("city", "year", "amount")
    //需求1：每个城市，每年的销售额
    //需求2：每个城市，一共的销售额
    //需求3：全部销售额


    //rollup是滚动分组：A、B
    //得到AB  A  NULL(全局)
    sales.rollup('city, 'year)
      .agg(sum('amount) as "amount")
      .sort('city.asc_nulls_last, 'year.asc_nulls_last)
      .show()
  }

  @Test
  def cube(): Unit = {
    /**
      * cube  AB
      * group by AB
      * group by A
      * group by B
      * group by null
      */
    val sales = Seq(
      ("Beijing", 2016, 100),
      ("Beijing", 2017, 200),
      ("Shanghai", 2015, 50),
      ("Shanghai", 2016, 150),
      ("Guangzhou", 2017, 50)
    ).toDF("city", "year", "amount")
    //需求1：每个城市，每年的销售额
    //需求2：每个城市，一共的销售额
    //需求3：全部销售额


    //rollup是滚动分组：A、B
    //得到AB  A  NULL(全局)
    sales.cube('city, 'year)
      .agg(sum('amount) as "amount")
      .sort('city.asc_nulls_last, 'year.asc_nulls_last)
      .show()
  }
}
