package com.shujia.spark.sql

import org.apache.spark.sql.expressions.Window
import org.apache.spark.sql.{Column, DataFrame, SparkSession}

object Demo7Burk {
  def main(args: Array[String]): Unit = {


    val spark: SparkSession = SparkSession
      .builder()
      .master("local")
      .appName("burk")
      .config("spark.sql.shuffle.partitions", 1)
      .getOrCreate()
    import spark.implicits._
    import org.apache.spark.sql.functions._


    val burksDF: DataFrame = spark
      .read
      .format("csv")
      .option("sep", ",")
      .schema("burk STRING,year STRING,tsl01 DOUBLE,tsl02 DOUBLE,tsl03 DOUBLE,tsl04 DOUBLE,tsl05 DOUBLE,tsl06 DOUBLE,tsl07 DOUBLE,tsl08 DOUBLE,tsl09 DOUBLE,tsl10 DOUBLE,tsl11 DOUBLE,tsl12 DOUBLE")
      .load("data/burks.txt")


    val kv: Column = map(
      expr("1"), $"tsl01",
      expr("2"), $"tsl02",
      expr("3"), $"tsl03",
      expr("4"), $"tsl04",
      expr("5"), $"tsl05",
      expr("6"), $"tsl06",
      expr("7"), $"tsl07",
      expr("8"), $"tsl08",
      expr("9"), $"tsl09",
      expr("10"), $"tsl10",
      expr("11"), $"tsl11",
      expr("12"), $"tsl12")

    val tempDF: DataFrame = burksDF
      .select($"burk", $"year", explode(kv) as Array("month", "pic"))

    //同一个DF被使用了多次，可以缓存起来
    tempDF.cache()


    //1、统计每个公司每年按月累计收入

    tempDF
      .withColumn("sumPic", sum($"pic") over Window.partitionBy($"burk", $"year").orderBy($"month"))
      .show(1000)


    //2、统计每个公司当月比上年同期增长率
    //coalesce ；返回第一个不为空的列， nvl

    tempDF
      .withColumn("lastPic", lag($"pic", 1, 0.0) over Window.partitionBy($"burk", $"month").orderBy($"year"))
      .withColumn("p", round(coalesce(($"pic" / $"lastPic") - 1, expr("1.0")), 7))
      .show()

  }

}
