package Test

import org.apache.spark
import org.apache.spark.sql.expressions.Window
import org.apache.spark.sql.functions._
import org.apache.spark.sql.{DataFrame, SparkSession}

object homework1 {
  def main(args: Array[String]): Unit = {
    val spark = SparkSession.builder()
    .appName("homework1")
      .master("local")
      .config("spark.sql.shuffle.partitions",1)
      .getOrCreate()
    val dataFrame = spark.read
      .schema("burk String,year STRING,tsl01 INT,tsl02 INT,tsl03 INT,tsl04 INT,tsl05 INT,tsl06 INT,tsl07 INT,tsl08 INT,tsl09 INT,tsl10 INT,tsl11 INT,tsl12 INT")
      .csv("spark/data/Income.csv")
    import spark.implicits._

    //公司代码,年度,月份,当月收入,累计收入
    val sql: StringBuilder = new StringBuilder("select n from (")
    for (i <- 1 to 11 ){
      sql.append(s" select $i as n union all ")
    }
    sql.append("select 12 as n ) as t1 ")
    val months = spark.sql(sql.toString())
    val tmp:DataFrame = dataFrame.join(months).withColumn("monthIncome", expr(
      """case n
        | when 1 then tsl01
        | when 2 then tsl02
        | when 3 then tsl03
        | when 4 then tsl04
        | when 5 then tsl05
        | when 6 then tsl06
        | when 7 then tsl07
        | when 8 then tsl08
        | when 9 then tsl09
        | when 10 then tsl10
        | when 11 then tsl11
        | when 12 then tsl12
        | end as monthIncome
        |""".stripMargin))
      tmp.select($"burk"  , $"year", $"n", $"monthIncome")
    val res = tmp.withColumn("monthSumIncome", sum("monthIncome")
      over Window.partitionBy("burk", "year").orderBy("n")
      )
    res.show(100)
    }

}