package com.shujia.spark.sql

import org.apache.spark.sql.expressions.Window
import org.apache.spark.sql.{DataFrame, SparkSession}

object Demo05Burks {
  def main(args: Array[String]): Unit = {
    val spark: SparkSession = SparkSession
      .builder()
      .appName(this.getClass.getSimpleName.replace("$", ""))
      .master("local")
      .config("spark.sql.shuffle.partitions", "2")
      .getOrCreate()

    import spark.implicits._
    import org.apache.spark.sql.functions._

    val burksDF: DataFrame = spark
      .read
      .format("csv")
      .option("sep", ",")
      .schema("burk String,year String,tsl01 Int,tsl02 Int,tsl03 Int,tsl04 Int,tsl05 Int,tsl06 Int,tsl07 Int,tsl08 Int,tsl09 Int,tsl10 Int,tsl11 Int,tsl12 Int")
      .load("spark/data/burks.csv")

    //    burksDF.show()

    // 1、统计每个公司每年按月累计收入
    // 输出结果：公司代码,年度,月份,当月收入,累计收入

    val newBurksDF: DataFrame = burksDF
      // 将每一行数据变成12行，还需要把月份信息提取出来
      .select($"burk", $"year", posexplode(array($"tsl01", $"tsl02", $"tsl03", $"tsl04", $"tsl05", $"tsl06", $"tsl07", $"tsl08", $"tsl09", $"tsl10", $"tsl11", $"tsl12")) as Array("pos", "amount"))
      .select($"burk", $"year", $"pos" + 1 as "month", $"amount")

    newBurksDF.cache()

    newBurksDF
      .withColumn("sum_amount", sum($"amount") over Window.partitionBy($"burk", $"year").orderBy($"month"))
      .show()

    // 2、统计每个公司当月比上年同期增长率
    // 输出结果：公司代码,年度,月度,去年当月收入,当月收入,增长率
    newBurksDF
      // 取去年同期的交易金额
      .withColumn("last_amount", lag($"amount", 1) over Window.partitionBy($"burk", $"month").orderBy($"year"))
      .withColumn("tb", coalesce(round($"amount" / $"last_amount" - 1, 2), expr("0")))
      .show()

    newBurksDF.unpersist()
  }

}
