package com.shujia.batch

import org.apache.spark.sql.expressions.Window
import org.apache.spark.sql.{DataFrame, SaveMode, SparkSession}

object DwsPppAcdMrjq {
  def main(args: Array[String]): Unit = {


    //分区参数
    val ds: String = args.head

    /**
     * 船舰spark sql环境
     *
     */

    val spark: SparkSession = SparkSession
      .builder()
      .appName("DwsPppAcdMrjq")
      .enableHiveSupport() //开启hie元数据支持
      .getOrCreate()


    import spark.implicits._
    import org.apache.spark.sql.functions._
    /**
     * 1、读取事故表
     *
     */
    val acdFile: DataFrame = spark
      .table("dwd.dwd_base_acd_file")
      .where($"ds" === ds)


    var baseDF: DataFrame = acdFile
      .withColumn("tjrq", date_format($"sgfssj", "yyyy-MM-dd"))
      .groupBy($"tjrq")
      .agg(
        //当日事故数
        count($"tjrq") as "dr_sgs",
        //当日失望事故数
        sum(when($"swrs30" =!= 0, 1).otherwise(0)) as "dr_swsgs",
        //死亡人数
        sum($"swrs30") as "dr_swrs",
        //受伤人数
        sum(when($"ssrs" =!= 0, 1).otherwise(0)) as "dr_ssrs",
        //财产损失
        sum($"zjccss") as "dr_zjccss"
      )

    val indexs = List("sgs", "swsgs", "swrs", "ssrs", "zjccss")

    /**
     * 重复代码可以通过循环来实现
     *
     */
    for (indexName <- indexs) {
      //循环计算指标
      //循环在baseDF上增加新的字段
      baseDF = comIndex(baseDF, spark, indexName)
    }
    //整理数据
    val resultDF: DataFrame = baseDF.select(
      $"tjrq",
      $"dr_sgs",
      $"jn_sgs",
      $"tb_sgs",
      $"tb_sgs_bj",
      $"dr_swsgs",
      $"jn_swsgs",
      $"tb_swsgs",
      $"tb_swsgs_bj",
      $"dr_swrs",
      $"jn_swrs",
      $"tb_swrs",
      $"tb_swrs_bj",
      $"dr_ssrs",
      $"jn_ssrs",
      $"tb_ssrs",
      $"tb_ssrs_bj",
      $"dr_zjccss",
      $"jn_zjccss",
      $"tb_zjccss",
      $"tb_zjccss_bj"
    )


    //保存数
    resultDF
      .write
      .format("csv")
      .option("sep", "^")
      .mode(SaveMode.Overwrite)
      .save(s"/daas/motl/dws/dws_app_acd_mrjq/ds=$ds")

    //增加分区
    spark.sql(
      s"""
         |alter table dws.dws_app_acd_mrjq add IF NOT EXISTS  partition  (ds='$ds')
         |
         |""".stripMargin)

  }

  def comIndex(df: DataFrame, spark: SparkSession, indexName: String): DataFrame = {
    import spark.implicits._
    import org.apache.spark.sql.functions._

    //今年事故数
    df.withColumn(s"jn_$indexName", sum($"dr_$indexName") over Window.partitionBy(year($"tjrq")))
      //获取去年当日事故时
      .withColumn(s"qn_dr_$indexName", lag($"dr_$indexName", 1) over Window.partitionBy(date_format($"tjrq", "MM-dd")).orderBy(year($"tjrq")))
      //计算同比
      .withColumn(s"tb_$indexName", round(($"dr_$indexName" - $"qn_dr_$indexName") / $"qn_dr_$indexName" * 100, 2))
      //同比标识
      .withColumn(s"tb_${indexName}_bj", when($"dr_$indexName" > $"qn_dr_$indexName", "上升").otherwise("下降"))
  }
}
