package com.whoami.idmagic.id

import java.util.regex.Pattern

import org.apache.spark.sql.SparkSession
import org.apache.spark.storage.StorageLevel

/**
  * 1. 采用连续出现时间段，最后取频次最高的id作为最终匹配id,看precise和recall
  * 2. 专for 长安福特
  * 3. 按ip切分，并且只取 频次最高，且在最高频次下只出现 1 次的pair_udid
  * 4. 使用点位id
  * 5. by ip 出最高频,
  * 6. 且统计ip重叠次数，即候选id: pair_udid在每个sip段出现的次数,相同的ts会被统计
  */
object idLink13 {

  def main(args: Array[String]): Unit = {
    val argsmap: Map[String, String] = parseArgs(args)
    val admlog = argsmap("log")
    val babel = argsmap("babel")
    val output = argsmap("output")
    val ipgeo = argsmap("ipgeo")
    val spots = argsmap("spots")
    val hourStr = argsmap("hours")
    val lap_ratio = argsmap("lap_ratio")

    val hours = hourStr.split(",").map(x => x.toInt)

    val spark = SparkSession
      .builder
      .appName(s"${this.getClass.getSimpleName}")
      .getOrCreate()

    val sceneName = "住宅用户"
    spark.read.orc(ipgeo).createOrReplaceTempView("ipgeo")
    //这里ip是数字，要转为字符串
    val homeIpDf = spark.sql(s"select distinct ip from ipgeo where scene='${sceneName}'")
    homeIpDf.map(row => longToIp(row.getString(0).toLong)).toDF("ip").persist(StorageLevel.MEMORY_AND_DISK_SER)
      .createOrReplaceTempView("homeip")

    val babeldf = spark.read.orc(babel)
    babeldf.createOrReplaceTempView("babel")

    spark.read.orc(admlog.split(","): _*).select("md5_imei", "raw_idfa", "media_uid", "mz_spot_id", "raw_ip", "timestamp").createOrReplaceTempView("a")
    val admdf = spark.sql("(SELECT a.md5_imei as udid, a.mz_spot_id, a.raw_ip as ip, a.`timestamp` as ts, a.md5_imei " +
      "FROM a " +
      "JOIN homeip as c " +
      "ON a.raw_ip=c.ip) UNION ( " +
      "SELECT a.raw_idfa as udid, a.mz_spot_id, a.raw_ip as ip, a.`timestamp` as ts, a.md5_imei " +
      "FROM a " +
      "JOIN homeip as c " +
      "ON a.raw_ip=c.ip " +
      ")")
    admdf.createOrReplaceTempView("adm")

    //这里使用media_uid 作为udid, 这是需要被匹配的id
    val mediasDf = spark.sql(s"SELECT a.md5_imei as udid, a.ip, a.ts, a.mz_spot_id as spid " +
      s"FROM adm as a " +
      s"WHERE a.md5_imei!='' " +
      s"AND a.mz_spot_id in ('${spots.split(",").mkString("','")}')")
      .persist(StorageLevel.MEMORY_AND_DISK_SER)

    //        mediasDf.write.orc(s"${output}/1")
    //        spark.read.orc(s"${output}/1")
    mediasDf.createOrReplaceTempView("t_1")

    //这里不需要抽样了！,取所有的数据
    spark.sql("SELECT DISTINCT udid, spid, ip, ts as ts2 from t_1").cache().createOrReplaceTempView("a")
    val hoursDf = hours.map(hour => {
      spark.sql(s"SELECT udid,spid,ip,ts2,${hour} as hour FROM a")
    }).reduce((a, b) => a.union(b))
    hoursDf.repartition(373)
    //        hoursDf.write.orc(s"${output}/3")
    hoursDf.createOrReplaceTempView("t_3")

    val foundIdsDf = spark.sql(s"" +
      s"SELECT c.hour, a.mz_spot_id as pair_spid, a.ip as pair_ip, a.ts as pair_ts, a.udid as pair_udid, c.spid as sspid, c.ip as sip, c.udid as sudid, c.ts2 as sts " +
      s"FROM adm as a " +
      s"JOIN t_3 as c " +
      s"ON a.ip=c.ip " +
      s"AND a.udid!='' " +
      s"AND a.ts >= (c.ts2 - c.hour*3600) AND a.ts < (c.ts2 + c.hour*3600) " +
      s"AND a.mz_spot_id!=c.spid " +
      s"").persist(StorageLevel.MEMORY_AND_DISK_SER)
    foundIdsDf.write.orc(s"${output}/4") //*
    foundIdsDf.createOrReplaceTempView("t_4")

    var sql = ""
    //统计测试集ip所有ip次数和重叠ip次数
    sql = "SELECT b.hour, b.sspid, b.sudid, b.sip, b.sts, b.pair_udid, b.pair_spid, b.pair_ip, b.pair_ts, a.lap_ip_cnt, c.all_sip_cnt, a.lap_ip_cnt/c.all_sip_cnt as lap_ratio " +
      "FROM t_4 as b " +
      "JOIN ( " +
      "SELECT hour,sspid,sudid,pair_udid,count(sip) as lap_ip_cnt " +
      "FROM ( " +
      "SELECT DISTINCT hour,sspid,sudid,sip,pair_udid " +
      "FROM t_4 " +
      ") " +
      "GROUP BY hour,sspid,sudid,pair_udid " +
      ") as a " +
      "ON a.hour=b.hour AND a.sspid=b.sspid AND a.sudid=b.sudid AND a.pair_udid=b.pair_udid " +
      "JOIN ( " +
      "SELECT hour,sspid,sudid,count(sip) as all_sip_cnt " +
      "FROM ( " +
      "SELECT DISTINCT hour,sspid,sudid,sip " +
      "FROM t_4 " +
      ") " +
      "GROUP BY hour,sspid,sudid " +
      ") as c " +
      "ON c.hour=b.hour AND c.sspid=b.sspid AND c.sudid=b.sudid "
    val t41df = spark.sql(sql)
    t41df.cache()
    t41df.write.orc(s"${output}/4_1")
    t41df.createOrReplaceTempView("t_4_1")

    //step 5, 按照出现的最大频率统计
    sql = s"SELECT hour, sspid, sudid, pair_udid, freq " +
      "FROM ( " +
      "SELECT pair_udid, count(pair_udid) as freq, sspid, hour, sudid " +
      "FROM ( " +
      "SELECT hour,sspid,sudid,pair_udid " +
      "FROM t_4_1 " +
      s"WHERE lap_ratio>=${lap_ratio} " +
      ") " +
      "GROUP BY hour, sspid, sudid, pair_udid " +
      ")"
    val t5Df = spark.sql(sql)
    t5Df.cache()
    t5Df.write.orc(s"${output}/5") //*
    t5Df.createOrReplaceTempView("t_5")

    //step5-1 只提取频次最高的id,且最高频次数只有一个的 pair_udid
    sql = s"SELECT a.hour, a.sspid, a.sudid, a.pair_udid, a.freq as max_freq " +
      s"FROM t_5 as a " +
      s"WHERE a.freq=( " +
      s"SELECT max(freq) " +
      s"FROM t_5 as b " +
      s"WHERE b.sudid=a.sudid " +
      s"AND b.sspid=a.sspid AND b.hour=a.hour " +
      s") "
    val pairedDf = spark.sql(sql).cache()
    pairedDf.write.orc(s"${output}/6") //*
    pairedDf.createOrReplaceTempView("t_6")

    //选最高频且数量是cnt=1的
    sql = s"SELECT c.sudid,c.sspid,c.hour,c.pair_udid,c.max_freq,b.cnt " +
      s"FROM t_6 as c " +
      s"JOIN ( " +
      s"SELECT a.hour,a.sspid,a.sudid,a.max_freq,a.cnt " +
      s"FROM ( " +
      s"SELECT hour, sspid, sudid, max_freq, count(pair_udid) as cnt " +
      s"FROM t_6 " +
      s"GROUP BY hour, sspid, sudid, max_freq " +
      s") as a where a.cnt=1 " +
      s") as b " +
      s"ON b.sudid=c.sudid " +
      s"AND b.sspid=c.sspid " +
      s"AND b.hour=c.hour " +
      s"AND b.max_freq=c.max_freq "
    val t7df = spark.sql(sql)
    t7df.write.orc(s"${output}/7") //*
  }

  def parseArgs(args: Array[String]): Map[String, String] = {
    args.filter(_.startsWith("-D")).map(_.split("=")).map(kv => {
      val key = kv(0).substring(2)
      val value = kv.slice(1, kv.length).mkString("")
        .stripPrefix("\"").stripSuffix("\"")
      (key, value)
    }).toMap
  }

  def ipToLong(ip: String): Long = {
    ip.split(Pattern.quote(".")).foldLeft(0L)((acc, b) => acc * 256 + b.toLong)
  }

  def longToIp(num: Long): String = {
    s"${(0 to 3).map(i => (num >> 8 * i) & 0xFF).reverse.mkString(".")}"
  }

}
