package com.shujia.batch

import org.apache.spark.sql.expressions.Window
import org.apache.spark.sql.{DataFrame, SaveMode, SparkSession}

object DwsAppCcdMrjq {
  def main(args: Array[String]): Unit = {

    val spark: SparkSession = SparkSession
      .builder()
      .appName("DwsAppCcdMrjq")
      .config("spark.sql.shuffle.partitions", 1)
      .enableHiveSupport() //开启hive的元数据支持
      .getOrCreate()
    import spark.implicits._
    import org.apache.spark.sql.functions._

    //分区参数
    val ds: String = args.head

    /**
     * 再hvie中已经存在的表再spark sql中可以直接读取
     *
     */
    //1、读取事故表
    val baseAcdFile: DataFrame = spark
      .table("dwd.dwd_base_acd_file")
      .where($"ds" === ds)

    var baseDF: DataFrame = baseAcdFile
      //获取事故发生日期
      .withColumn("tjrq", date_format($"sgfssj", "yyyy-MM-dd"))
      //安装日期分组
      .groupBy($"tjrq")
      //计算当日事故数
      .agg(
        //当然事故数
        count($"tjrq") as "dr_sgs",
        //当然死亡事故数
        sum(when($"swrs30" === 0, 0).otherwise(1)) as "dr_swsgs",
        //当然死亡人数
        sum($"swrs30") as "dr_swrs",
        //当日受伤人数
        sum($"ssrs30") as "dr_ssrs",
        //当日直接财产损失
        sum($"zjccss") as "dr_zjccss"
      )

    val indexList = List("sgs", "swsgs", "swrs", "ssrs", "zjccss")

    //循环计算指标
    for (indexName <- indexList) {
      //每次循环在baseDF的基础上增加新的字段
      baseDF = comIndex(baseDF, spark, indexName)
    }

    //取出需要的字段
    val resultDf: DataFrame = baseDF.select(
      "tjrq",
      "dr_sgs",
      "jn_sgs",
      "tb_sgs",
      "tb_sgs_bj",
      "dr_swsgs",
      "jn_swsgs",
      "tb_swsgs",
      "tb_swsgs_bj",
      "dr_swrs",
      "jn_swrs",
      "tb_swrs",
      "tb_swrs_bj",
      "dr_ssrs",
      "jn_ssrs",
      "tb_ssrs",
      "tb_ssrs_bj",
      "dr_zjccss",
      "jn_zjccss",
      "tb_zjccss",
      "tb_zjccss_bj"
    )

    //保存数据
    resultDf
      .write
      .format("csv")
      .option("sep", "^")
      .mode(SaveMode.Overwrite)
      .save(s"/daas/motl/dws/dws_app_acd_mrjq/ds=$ds")

    //增加分区
    spark.sql(
      s"""
         |alter table dws.dws_app_acd_mrjq add IF NOT EXISTS  partition  (ds='$ds')
         |
         |""".stripMargin)
  }

  /**
   * 将同样的逻辑封装成一个方法，减少重复代码
   *
   * @param df        ：df
   * @param spark     :spark环境
   * @param indexName ：指标名称
   * @return
   */
  def comIndex(df: DataFrame, spark: SparkSession, indexName: String): DataFrame = {
    import spark.implicits._
    import org.apache.spark.sql.functions._

    df
      //今年
      .withColumn(s"jn_$indexName", sum($"dr_$indexName") over Window.partitionBy(date_format($"tjrq", "yyyy")))
      //去年当日
      .withColumn(s"qn_dr_$indexName", lag($"dr_$indexName", 1, 0) over Window.partitionBy(date_format($"tjrq", "MM-DD")).orderBy(date_format($"tjrq", "yyyy")))
      //计算同比增长率
      .withColumn(s"tb_$indexName", round(($"dr_$indexName" - $"qn_dr_$indexName") / $"qn_dr_$indexName" * 100, 2))
      //增长标识
      .withColumn(s"tb_${indexName}_bj", when($"dr_$indexName" > $"qn_dr_$indexName", "上升").otherwise("下将"))

  }

}
