package com.shujia.dws

import com.shujia.common.{DateUtil, SparkTool}
import com.shujia.util.Geography
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.sql.expressions.UserDefinedFunction
import org.apache.spark.sql.{DataFrame, SaveMode, SparkSession}
import org.apache.spark.storage.StorageLevel

object DwsSpacetimeCompanionMskDay extends SparkTool {
  override def run(spark: SparkSession): Unit = {
    import spark.implicits._
    import org.apache.spark.sql.functions._

    // 计算两个时间字符串的时间差（单位：s）
    val diff_date: UserDefinedFunction = udf((date_str1: String, date_str2: String) => {
      DateUtil.diff_date(date_str1, date_str2)
    })

    // 将计算两个经纬度之间的距离注册成UDF 才能够在SQL/DSL中使用
    val calculateLength: UserDefinedFunction = udf((lg: String, lat: String, lg2: String, lat2: String) => {
      Geography.calculateLength(lg.toDouble, lat.toDouble, lg2.toDouble, lat2.toDouble)
    })

    // 从MySQL中加载确诊人员名单
    val confirmedMdnDF: DataFrame = spark
      .read
      .format("jdbc")
      .option("url", "jdbc:mysql://master:3306/crm")
      .option("dbtable", "confirmed")
      .option("user", "root")
      .option("password", "123456")
      .load()

    // 将确诊人员名单转换成本地Set集合
    val confirmMdnSet: Set[String] = confirmedMdnDF.select("mdn").as[String].collect().toSet
    // 将 Set集合广播到每一个Executor中
    val confirmMdnSetBro: Broadcast[Set[String]] = spark.sparkContext.broadcast(confirmMdnSet)

    // 加载位置数据融合表 约400w的数据
    val mergeLocDF: DataFrame = spark.table("dwi.dwi_res_regn_mergelocation_msk_d").where($"mdn" =!= "9740853D6AF8DB38FC15E82FDD97DD10" and $"day_id" === day_id)


    // 对多次使用RDD/DF进行缓存
    mergeLocDF.persist(StorageLevel.MEMORY_AND_DISK_SER)

    // 基于确诊人员名单结合位置数据融合表获取行动轨迹 约400条数据
    val confirmLocDF: DataFrame = mergeLocDF
      .filter(row => {
        val mdn: String = row.getAs[String]("mdn")
        confirmMdnSetBro.value.contains(mdn)
      })
      .select($"mdn" as "c_mdn", $"start_date" as "c_start_date", $"longi" as "c_longi", $"lati" as "c_lati", $"county_id" as "c_county_id")


    /**
     * 计算时空伴随者:
     * 1、时间相差一小时
     * 2、距离相差500m
     *
     * 优化1：不是所有的非确诊人员的位置记录都需要同确证人员的所有行动轨迹进行对比，一旦有一次计算是密接则后续就不需要比较了
     * 优化2：按照区县id关联，在同一个区县中再去对比找出时空伴随者
     *
     */
    val spaceTimeCompanionDF: DataFrame = mergeLocDF
      .filter(row => {
        val mdn: String = row.getAs[String]("mdn")
        !confirmMdnSetBro.value.contains(mdn)
      })
      .join(confirmLocDF.hint("broadcast"), $"county_id" === $"c_county_id")
      // 计算时间差
      .withColumn("diff_time", diff_date($"start_date", $"c_start_date"))
      // 过滤出时间小于等于一小时的数据
      .where($"diff_time" <= 3600)
      // 计算距离差
      .withColumn("distance", calculateLength($"longi", $"lati", $"c_longi", $"c_lati"))
      // 过滤出距离小于等于500m的数据
      .where($"distance" <= 500)
      .select(
        $"mdn"
        , $"start_date"
        , $"end_date"
        , $"county_id"
        , $"longi"
        , $"lati"
        , $"bsid"
        , $"grid_id"
        , $"c_mdn"
        , $"c_start_date"
        , $"c_longi"
        , $"c_lati"
      )
    spaceTimeCompanionDF.cache()

    spaceTimeCompanionDF
      .write
      .mode(SaveMode.Overwrite)
      .format("csv")
      .option("sep", ",")
      .save(s"/daas/motl/dws/dws_spacetime_companion_msk_d/day_id=$day_id")


    spark.sql(
      s"""
         |alter table dws.dws_spacetime_companion_msk_d add if not exists partition(day_id='$day_id')
         |""".stripMargin)

    spaceTimeCompanionDF
      .select("mdn")
      .distinct()
      .write
      .mode(SaveMode.Overwrite)
      .format("csv")
      .option("sep", ",")
      .save(s"/daas/motl/dws/dws_spacetime_companion_mdn_msk_d/day_id=$day_id")


    spark.sql(
      s"""
         |alter table dws.dws_spacetime_companion_mdn_msk_d add if not exists partition(day_id='$day_id')
         |""".stripMargin)

    // 释放缓存
    mergeLocDF.unpersist()
    spaceTimeCompanionDF.unpersist()

  }
}
