package Test

import org.apache.spark.sql.functions.{collect_list, expr, round, year}
import org.apache.spark.sql.{DataFrame, SparkSession}


//公司代码,年度,月度,增长率（当月收入/上年当月收入 - 1）
object homework2 {
  def main(args: Array[String]): Unit = {
    val spark = SparkSession.builder()
      .appName("homework2")
      .master("local")
      .config("spark.sql.shuffle.partition", 1)
      .getOrCreate()
    val dataFrame = spark.read
      .schema("burk String,year INT,tsl01 INT,tsl02 INT,tsl03 INT,tsl04 INT,tsl05 INT,tsl06 INT,tsl07 INT,tsl08 INT,tsl09 INT,tsl10 INT,tsl11 INT,tsl12 INT")
      .csv("spark/data/Income.csv")

    import spark.implicits._

    //生成1到12的行
    val sql: StringBuilder = new StringBuilder("select n from (")
    for (i <- 1 to 11) {
      sql.append(s" select $i as n union all ")
    }
    sql.append("select 12 as n ) as t1 ")
    val months = spark.sql(sql.toString())
    val tmp: DataFrame = dataFrame.join(months).withColumn("monthIncome", expr(
      """case n
        | when 1 then tsl01
        | when 2 then tsl02
        | when 3 then tsl03
        | when 4 then tsl04
        | when 5 then tsl05
        | when 6 then tsl06
        | when 7 then tsl07
        | when 8 then tsl08
        | when 9 then tsl09
        | when 10 then tsl10
        | when 11 then tsl11
        | when 12 then tsl12
        | end as monthIncome
        |""".stripMargin))
      .select($"burk", $"year", $"n" as "month", $"monthIncome")
    tmp.cache()
    //自连接 连结相差1年月份相同的数据
    val rate = tmp.join(tmp.select($"burk",$"year"as "beforeyear",$"month",$"monthIncome" as "beforemonthIncome")
      , List("burk","month"))
      .where("year == beforeyear -1 ")
    //算增长率
    val rate1 = rate.withColumn("rate",round($"beforemonthIncome"/$"monthIncome"-1,4))
      .select("burk","beforeyear","month","rate")
      .withColumnRenamed("beforeyear","year")
    //得出结果
    tmp.join(rate1,List("burk","month","year"),joinType = "left")
      .orderBy("burk","year","month")
      .show(100)

  }

}
