package com.shujia.batch

import org.apache.spark.sql.expressions.Window
import org.apache.spark.sql.{DataFrame, SaveMode, SparkSession}

object DwsAppVioZqzf {
  def main(args: Array[String]): Unit = {
    val spark: SparkSession = SparkSession
      .builder()
      .appName("DwsAppVioZqzf")
      .config("spark.sql.shuffle.partitions", 1)
      .enableHiveSupport() //开启hive的元数据支持
      .getOrCreate()
    import spark.implicits._
    import org.apache.spark.sql.functions._

    //分区参数
    val ds: String = args.head


    //读取数据
    //非现场违法表
    val baseSurveil: DataFrame = spark
      .table("dwd.dwd_base_vio_surveil")
      .where($"ds" === ds)


    //现场违法表
    val baseVioForce: DataFrame = spark
      .table("dwd.dwd_base_vio_force")
      .where($"ds" === ds)

    /**
     * 多现场违法表做处理
     * 现场未发表中一次违法记录存在多种违法行为
     *
     */
    val forceDF: DataFrame = baseVioForce.select(
      //违法时间
      date_format($"wfsj", "yyyy-MM-dd") as "wfsj",
      //违法行为
      explode(array($"wfxw1", $"wfxw2", $"wfxw3", $"wfxw4", $"wfxw5")) as "wfxw"
    )

    //非现场违法表
    val surveilDf: DataFrame = baseSurveil.select(
      //违法时间
      date_format($"wfsj", "yyyy-MM-dd") as "wfsj",
      $"wfxw"
    )

    //合并现场违法表和非现场违法表
    val unionDF: DataFrame = forceDF.union(surveilDf)

    //获取指标口径
    val indexMap: Map[String, List[String]] = ZqZfIndexUtils.getIndexMap

    var baseDF: DataFrame = unionDF
      //去掉违法行为后面的字母
      .withColumn("wfxw", regexp_replace($"wfxw", "[A-Z]$", ""))
      .groupBy($"wfsj")
      .agg(
        count($"wfsj") as "dr_zd_wfs",
        sum(when($"wfxw".isInCollection(indexMap("zjjs")), 1).otherwise(0)) as "dr_zjjs_wfs",
        sum(when($"wfxw".isInCollection(indexMap("yjjs")), 1).otherwise(0)) as "dr_yjjs_wfs",
        sum(when($"wfxw".isInCollection(indexMap("zdhp")), 1).otherwise(0)) as "dr_zdhp_wfs",
        sum(when($"wfxw".isInCollection(indexMap("wzbz")), 1).otherwise(0)) as "dr_wzbz_wfs",
        sum(when($"wfxw".isInCollection(indexMap("tpjp")), 1).otherwise(0)) as "dr_tpjp_wfs",
        sum(when($"wfxw".isInCollection(indexMap("wdtk")), 1).otherwise(0)) as "dr_wdtk_wfs",
        sum(when($"wfxw".isInCollection(indexMap("jspz")), 1).otherwise(0)) as "dr_jspz_wfs",
        sum(when($"wfxw".isInCollection(indexMap("wxjs")), 1).otherwise(0)) as "dr_wxjs_wfs",
        sum(when($"wfxw".isInCollection(indexMap("cy")), 1).otherwise(0)) as "dr_cy_wfs",
        sum(when($"wfxw".isInCollection(indexMap("cz")), 1).otherwise(0)) as "dr_cz_wfs"
      )

    val indexList = List("zd", "zjjs", "yjjs", "zdhp", "wzbz", "tpjp", "wdtk", "jspz", "wxjs", "cy", "cz")

    //循环计算所有的指标
    for (indexName <- indexList) {
      //计算每个指标的其它指标
      baseDF = comIndex(baseDF, spark, indexName)
    }

    //整理数据
    val resultDF: DataFrame = baseDF.select(
      "wfsj",
      "dr_zd_wfs",
      "jn_zd_wfs",
      "zd_tb",
      "zd_tbbj",
      "dr_zjjs_wfs",
      "jn_zjjs_wfs",
      "zjjs_tb",
      "zjjs_tbbj",
      "dr_yjjs_wfs",
      "jn_yjjs_wfs",
      "yjjs_tb",
      "yjjs_tbbj",
      "dr_zdhp_wfs",
      "jn_zdhp_wfs",
      "zdhp_tb",
      "zdhp_tbbj",
      "dr_wzbz_wfs",
      "jn_wzbz_wfs",
      "wzbz_tb",
      "wzbz_tbbj",
      "dr_tpjp_wfs",
      "jn_tpjp_wfs",
      "tpjp_tb",
      "tpjp_tbbj",
      "dr_wdtk_wfs",
      "jn_wdtk_wfs",
      "wdtk_tb",
      "wdtk_tbbj",
      "dr_jspz_wfs",
      "jn_jspz_wfs",
      "jspz_tb",
      "jspz_tbbj",
      "dr_wxjs_wfs",
      "jn_wxjs_wfs",
      "wxjs_tb",
      "wxjs_tbbj",
      "dr_cy_wfs",
      "jn_cy_wfs",
      "cy_tb",
      "cy_tbbj",
      "dr_cz_wfs",
      "jn_cz_wfs",
      "cz_tb",
      "cz_tbbj"
    )


    //保存数据
    resultDF.write
      .format("csv")
      .option("sep", "^")
      .mode(SaveMode.Overwrite)
      .save(s"/daas/motl/dws/dws_app_vio_zqzf/ds=$ds")


    //增加分区
    spark.sql(
      s"""
        |alter table dws.dws_app_vio_zqzf add IF NOT EXISTS  partition  (ds='$ds')
        |
        |""".stripMargin)

  }

  def comIndex(df: DataFrame, spark: SparkSession, indexName: String): DataFrame = {

    import spark.implicits._
    import org.apache.spark.sql.functions._
    df
      //今年总分
      .withColumn(s"jn_${indexName}_wfs", sum($"dr_${indexName}_wfs") over Window.partitionBy(date_format($"wfsj", "yyyy")))
      //去年当日
      .withColumn(s"qn_dr_${indexName}_wfs", lag($"dr_${indexName}_wfs", 1, 0) over Window.partitionBy(date_format($"wfsj", "MM-dd")).orderBy(date_format($"wfsj", "yyyy")))
      //同比
      .withColumn(s"${indexName}_tb", round(($"dr_${indexName}_wfs" - $"qn_dr_${indexName}_wfs") / $"qn_dr_${indexName}_wfs" * 100, 2))
      //同比标记
      .withColumn(s"${indexName}_tbbj", when($"dr_${indexName}_wfs" > $"qn_dr_${indexName}_wfs", "上升").otherwise("下降"))

  }

}
