package com.shujia.spark.sql

import org.apache.spark.sql.expressions.Window
import org.apache.spark.sql.{DataFrame, SparkSession}

object Demo07Burks {
  def main(args: Array[String]): Unit = {

    val spark: SparkSession = SparkSession
      .builder()
      .master("local")
      .appName(this.getClass.getSimpleName.replace("$", ""))
      // 控制在SparkSQL中进行Shuffle操作时默认的分区数，默认值为200，相当于会启动200个Task进行处理
      .config("spark.sql.shuffle.partitions", "2")
      // 需要在任务运行配置中增加VMOption：-Djava.library.path="D:\soft\hadoop-3.2.0"
      .enableHiveSupport()
      .getOrCreate()


    val burkDF: DataFrame = spark.table("db01.burks")

    import spark.implicits._
    import org.apache.spark.sql.functions._

    val newBurkDF: DataFrame = burkDF
      .select(
        $"burk"
        , $"year"
        , posexplode(array($"tsl01", $"tsl02", $"tsl03", $"tsl04", $"tsl05", $"tsl06", $"tsl07", $"tsl08", $"tsl09", $"tsl10", $"tsl11", $"tsl12"))
      )

    newBurkDF.cache()

    newBurkDF
      .withColumn("sum_amount", sum($"col") over Window.partitionBy($"burk", $"year").orderBy($"pos"))
      .select(
        $"burk"
        , $"year"
        , $"pos" + 1 as "mon"
        , $"col" as "amount"
        , $"sum_amount"
      )
    //      .show()

    newBurkDF
      .withColumn("last_amount", lag($"col", 1) over Window.partitionBy($"burk", $"pos").orderBy($"year"))
      .withColumn("incr", coalesce(round($"col" / $"last_amount" - 1, 8) * 100, expr("0")))
      .select(
        $"burk"
        , $"year"
        , $"pos" + 1 as "mon"
        , coalesce($"last_amount", expr("0")) as "last_amount"
        , $"col" as "amount"
        , $"incr"
      )
      .show()

    newBurkDF.unpersist()

  }

}
