package com.shujia.car.batch

import org.apache.spark.sql.expressions.Window
import org.apache.spark.sql.{DataFrame, SparkSession}

object DwsAppVioZqzf {
  def main(args: Array[String]): Unit = {


    val ds: String = args.head

    //1、创建spark环境
    val spark: SparkSession = SparkSession
      .builder()
      .enableHiveSupport()
      .getOrCreate()

    import spark.implicits._
    import org.apache.spark.sql.functions._


    //合并现场违法表和非现场违法表
    val unionDF: DataFrame = spark.sql(
      """
        |select
        |    id,
        |    date_format(wfsj, 'yyyy-MM-dd') as wfsj,
        |    regexp_replace(wfxw, '[A-Z]$', '') as wfxw
        |from
        |    (
        |        -- 现场违法表
        |        select
        |            id,
        |            wfsj,
        |            wfxw
        |        from
        |            (
        |                select
        |                    pzbh as id,
        |                    wfsj,
        |                    explode(array(wfxw1, wfxw2, wfxw3, wfxw4, wfxw5)) as wfxw -- 一行转多行
        |                from
        |                    dwd.base_vio_force
        |                where ds='$ds'
        |            ) as a
        |        where
        |            wfxw is not null
        |    )
        |union all (
        |    -- 非现场违法表
        |    select
        |        wfbh as id,
        |        date_format(wfsj, 'yyyy-MM-dd') as wfsj,
        |        regexp_replace(wfxw, '[A-Z]$', '') as wfxw
        |    from
        |        dwd.base_vio_surveil
        |    where
        |        wfxw is not null
        |        and wfbh is not null
        |        and wfbh != ''
        |        and ds='$ds'
        |)
        |""".stripMargin.replace("$ds", ds)
    )

    //计算的指标列表
    val indexs: List[String] = List("zd", "zjjs", "yjjs", "zdhp", "wzbz", "tpjp", "wdtk", "jspz", "wxjs", "cy", "cz")

    var indexDF: DataFrame = unionDF
      .where($"wfsj".isNotNull)
      .groupBy($"wfsj")
      .agg(
        countDistinct($"id") as "dr_zd_wfs",
        count(when($"wfxw".isInCollection(IndexKJUTIL.getIndexKJ("zjjs")), 1).otherwise(null)) as "dr_zjjs_wfs",
        count(when($"wfxw".isInCollection(IndexKJUTIL.getIndexKJ("yjjs")), 1).otherwise(null)) as "dr_yjjs_wfs",
        count(when($"wfxw".isInCollection(IndexKJUTIL.getIndexKJ("zdhp")), 1).otherwise(null)) as "dr_zdhp_wfs",
        count(when($"wfxw".isInCollection(IndexKJUTIL.getIndexKJ("wzbz")), 1).otherwise(null)) as "dr_wzbz_wfs",
        count(when($"wfxw".isInCollection(IndexKJUTIL.getIndexKJ("tpjp")), 1).otherwise(null)) as "dr_tpjp_wfs",
        count(when($"wfxw".isInCollection(IndexKJUTIL.getIndexKJ("wdtk")), 1).otherwise(null)) as "dr_wdtk_wfs",
        count(when($"wfxw".isInCollection(IndexKJUTIL.getIndexKJ("jspz")), 1).otherwise(null)) as "dr_jspz_wfs",
        count(when($"wfxw".isInCollection(IndexKJUTIL.getIndexKJ("wxjs")), 1).otherwise(null)) as "dr_wxjs_wfs",
        count(when($"wfxw".isInCollection(IndexKJUTIL.getIndexKJ("cy")), 1).otherwise(null)) as "dr_cy_wfs",
        count(when($"wfxw".isInCollection(IndexKJUTIL.getIndexKJ("cz")), 1).otherwise(null)) as "dr_cz_wfs"
      )


    //循环计算指标
    for (indexName <- indexs) {
      indexDF = indexDF
        //计算今年的指标
        .withColumn(s"jn_${indexName}_wfs", sum(s"dr_${indexName}_wfs") over Window.partitionBy(year($"wfsj")))
        //计算同比
        .withColumn(s"${indexName}_tb", coalesce(round(($"dr_${indexName}_wfs" / (lag($"dr_${indexName}_wfs", 1) over Window.orderBy($"wfsj"))) - 1, 4), expr("1.0")))
    }


    //按照顺序选择字段
    val resultDf: DataFrame = indexDF.select(
      $"wfsj",
      $"dr_zd_wfs",
      $"jn_zd_wfs",
      $"zd_tb",
      $"dr_zjjs_wfs",
      $"jn_zjjs_wfs",
      $"zjjs_tb",
      $"dr_yjjs_wfs",
      $"jn_yjjs_wfs",
      $"yjjs_tb",
      $"dr_zdhp_wfs",
      $"jn_zdhp_wfs",
      $"zdhp_tb",
      $"dr_wzbz_wfs",
      $"jn_wzbz_wfs",
      $"wzbz_tb",
      $"dr_tpjp_wfs",
      $"jn_tpjp_wfs",
      $"tpjp_tb",
      $"dr_wdtk_wfs",
      $"jn_wdtk_wfs",
      $"wdtk_tb",
      $"dr_jspz_wfs",
      $"jn_jspz_wfs",
      $"jspz_tb",
      $"dr_wxjs_wfs",
      $"jn_wxjs_wfs",
      $"wxjs_tb",
      $"dr_cy_wfs",
      $"jn_cy_wfs",
      $"cy_tb",
      $"dr_cz_wfs",
      $"jn_cz_wfs",
      $"cz_tb"
    )

    //保存结果
    resultDf.createTempView("tmp")
    spark.sql(
      s"""
         |insert overwrite table dws.dws_app_vio_zqzf partition (ds='$ds')
         |select * from tmp
         |""".stripMargin)


  }

}
