package com.shujia.batch.car

import org.apache.spark.sql.expressions.Window
import org.apache.spark.sql.functions._
import org.apache.spark.sql.{DataFrame, Dataset, Row, SparkSession}

object DwsAppAcdMrjq {
  def main(args: Array[String]): Unit = {

    /**
      *
      * 1、创建spark sql环境
      *
      */


    val spark: SparkSession = SparkSession
      .builder()
      .appName("DwsAppAcdMrjq")
      .enableHiveSupport() //开启hive元数据支持
      .getOrCreate()
    import spark.implicits._
    import org.apache.spark.sql.functions._


    //1、读取hive中的事故表
    val baseDcdFileDF: Dataset[Row] = spark
      .table("dwd.dwd_base_acd_file")
      //分区过滤
      .where($"ds" === "20220627")

    /**
      * 2、统计当日事故数，死亡事故数，死亡人数，当日受伤人数，当日财产损失
      * //按照事故发生日期分组
      * //组内统计指标
      */


    var indexDF: DataFrame = baseDcdFileDF
      .withColumn("tjrq", substring($"sgfssj", 1, 10))
      .groupBy($"tjrq")
      .agg(
        count($"sgfssj") as "dr_sgs", //当日事故数
        sum(when($"swrs30" > 0, 1).otherwise(0)) as "dr_swsgs", //当日死亡事故数
        sum($"swrs30") as "dr_swrs", //当日死亡人数
        sum($"ssrs7") as "dr_ssrs", //当日受伤人数
        sum($"zjccss") as "dr_zjccss" //当日财产损失
      )


    //需要计算的指标
    val indexs = List("sgs", "swsgs", "swrs", "ssrs", "zjccss")
    for (index <- indexs) {
      //计算指标，今年指标，去年当日指标，同步增长率，标记
      indexDF = genIndex(indexDF, spark, index)
    }

    //整理数据
    val resultDF: DataFrame = indexDF.select(
      $"tjrq",
      $"dr_sgs",
      $"jn_sgs",
      $"tb_sgs",
      $"tb_sgs_bj",
      $"dr_swsgs",
      $"jn_swsgs",
      $"tb_swsgs",
      $"tb_swsgs_bj",
      $"dr_swrs",
      $"jn_swrs",
      $"tb_swrs",
      $"tb_swrs_bj",
      $"dr_ssrs",
      $"jn_ssrs",
      $"tb_ssrs",
      $"tb_ssrs_bj",
      $"dr_zjccss",
      $"jn_zjccss",
      $"tb_zjccss",
      $"tb_zjccss_bj"
    )


    //保存数据到结果表中
    resultDF.createOrReplaceTempView("tmp")
    spark.sql(
      """
        |insert overwrite table dws.dws_app_acd_mrjq partition (ds='20220627')
        |select * from tmp
        |
      """.stripMargin)

    /**
      * 运行方式
      * 1、将项目打包
      * 2、将jar包上传到服务器
      * 3、使用spark-submit提交任务
      *
      * spark-submit --master yarn-client --class com.shujia.batch.car.DwsAppAcdMrjq --conf spark.sql.shuffle.partitio
      * ns=1  batch-1.0.jar
      */
  }

  /**
    * 所有重复写代码或者是写sql的地方都可以封装
    *
    * @param df    : 原始的df
    * @param spark :spark环境
    * @param index ：计算的指标
    * @return
    */
  def genIndex(df: DataFrame, spark: SparkSession, index: String): DataFrame = {
    import spark.implicits._
    val resultDF: DataFrame = df
      //获取今年指标
      .withColumn(s"jn_$index", sum($"dr_$index") over Window.partitionBy(year($"tjrq")))
      //获取去年当日指标
      .withColumn(s"qntq_$index", lag($"dr_$index", 1, 1) over Window.partitionBy(substring($"tjrq", 6, 5)).orderBy(year($"tjrq")))
      //计算同步增长率
      .withColumn(s"tb_$index", round(($"dr_$index" - $"qntq_$index") / $"qntq_$index" * 100, 2))
      //加上标记
      .withColumn(s"tb_${index}_bj", when($"jn_$index" > $"qntq_$index", "上升").otherwise("下降"))

    resultDF
  }
}
