package com.whoami.idmagic.id

import java.util.regex.Pattern

import org.apache.spark.sql.SparkSession
import org.apache.spark.storage.StorageLevel

/**
  * 1. 采用按天切分时间段, 每天随机选择一条记录，连续出现时间段
  * 2. 最后取频次最高的id作为最终匹配id,看precise和recall
  * 3. 专for 雅思兰黛
  */
object idLink8 {

  def main(args: Array[String]): Unit = {
    val argsmap: Map[String, String] = parseArgs(args)
    val admlog = argsmap("log")
    val babel = argsmap("babel")
    val output = argsmap("output")
    val hourStr = argsmap("hours")
    val ipgeo = argsmap("ipgeo")
    val caids = argsmap("caids")
    val dayLimit = argsmap("day_limit")

    val hours = hourStr.split(",").map(x => x.toInt)
    val days = admlog.split(",").map(p => p.split("/").last)

    val spark = SparkSession
      .builder
      .appName(s"${this.getClass.getSimpleName}")
      .getOrCreate()


    val sceneName = "住宅用户"
    spark.read.orc(ipgeo).createOrReplaceTempView("ipgeo")
    //这里ip是数字，要转为字符串
    val homeIpDf = spark.sql(s"select distinct ip from ipgeo where scene='${sceneName}'")
    homeIpDf.map(row => longToIp(row.getString(0).toLong))
      .toDF("ip")
      .persist(StorageLevel.MEMORY_AND_DISK_SER)
      .createOrReplaceTempView("homeip")

    homeIpDf.filter(row => {
      val lbs = row.getString(0)
      !(lbs.equals("") || lbs.equals("0.000000x0.000000x0.000000") || lbs.contains("null")) || !lbs.contains("x")
    })

    val babeldf = spark.read.orc(babel)
    babeldf.createOrReplaceTempView("babel")

    val admdf = admlog.split(",").map(log => {
      val day = log.split("/").last
      spark.read.orc(log).select("md5_imei", "raw_idfa", "mz_spot_id", "raw_ip", "timestamp")
        .withColumn("day", lit(day)).createOrReplaceTempView("a")
      val tmpdf = spark.sql(
        "(SELECT a.md5_imei as udid, a.mz_spot_id, a.raw_ip as ip, a.`timestamp` as ts, a.day " +
          "from a " +
          "join homeip as c " +
          "on a.raw_ip=c.ip) union ( " +
          "SELECT a.raw_idfa as udid, a.mz_spot_id, a.raw_ip as ip, a.`timestamp` as ts, a.day " +
          "from a " +
          "join homeip as c " +
          "on a.raw_ip=c.ip " +
          ")")
      tmpdf
    }).reduce((a, b) => a.union(b)).repartition(373).persist(StorageLevel.MEMORY_AND_DISK_SER)
    admdf.createOrReplaceTempView("adm")
    //日期内所有monitor日志, 住宅ip下, imei/idfa, spid, ip, ts


    val dayMediasDf = spark.sql(s"SELECT a.udid, a.ip, a.ts, a.day, b.campaign_id as caid " +
      s"FROM adm as a " +
      s"JOIN babel as b " +
      s"ON a.mz_spot_id=cast(b.spots_id as bigint) " +
      s"AND a.udid!='' " +
      s"AND b.campaign_id in ('${caids.split(",").mkString("','")}')").persist(StorageLevel.MEMORY_AND_DISK_SER)

    dayMediasDf.write.orc(s"${output}/1")
    spark.read.orc(s"${output}/1").cache().createOrReplaceTempView("day_media")
    //日期内所有monitor日志, 住宅ip下, 指定活动id下, imei/idfa, spid, ip, ts, day, caid

    spark.sql("select distinct udid, day, caid from day_media").createOrReplaceTempView("b")
    spark.sql("select udid, count(udid) as cnt, caid from b group by caid, udid").createOrReplaceTempView("c")
    val joined3daysDf = spark.sql(s"select udid, caid from c where cnt>=${dayLimit}") //这里天数可以调整: cnt>${days.length/2}

    joined3daysDf.write.orc(s"${output}/2")
    spark.read.orc(s"${output}/2").createOrReplaceTempView("d")
    //日期内所有monitor日志, 住宅ip下, 指定活动id下, 出现了3天的udid


    val availIds = caids.split(",").map(name => {
      val tmpdf = spark.sql(s"select distinct udid from d where caid='${name}'")
      tmpdf.createOrReplaceTempView("e")
      val count = tmpdf.count()
      println(s"[stats][caid] ${name}, [count] ${count}")
      //测试集e

      val iptsDf = spark.sql("select a.udid, a.ip, a.ts, a.caid, a.day " +
        "from day_media as a " +
        "join e " +
        "on e.udid=a.udid")
      iptsDf
    }).reduce((a, b) => a.union(b))
    availIds.createOrReplaceTempView("sample_ids")
    availIds.write.orc(s"${output}/3-0")
    //测试集e的行为集


    val oneIdsDf = spark.sql("select a.udid, a.ip, a.ts as ts2, a.caid, a.day as day2 from (" +
      "select udid, ip, ts, caid, day, " +
      "row_number() over (partition by caid,udid,day order by rand()) as rn " +
      "from sample_ids " +
      ") as a where a.rn = 1 ").persist(StorageLevel.MEMORY_AND_DISK_SER)
    oneIdsDf.write.orc(s"${output}/3")

    spark.read.orc(s"${output}/3").createOrReplaceTempView("t_3")
    //测试集e的每天只取一条的行为集

    //step 4, gen not medias ip info
    val cartesianPair = for {
      a <- caids.split(",")
      b <- days
      c <- hours
    } yield (a, b, c)

    val foundIdsDf = cartesianPair.map { case (acaid, day, h) => {
      val sec = h * 3600
      val notMediasDf = spark.sql(
        s"SELECT a.udid as pair_udid, b.campaign_id as pair_caid, c.caid as scaid, c.udid as sudid, " +
          s"c.ip as sip, c.ts2 as sts, a.day " +
          s"FROM adm as a " +
          s"JOIN babel as b " +
          s"ON a.mz_spot_id=cast(b.spots_id as bigint) " +
          s"AND a.day='${day}' " +
          s"AND a.udid!='' " +
          s"AND b.campaign_id!='${acaid}' " +
          s"JOIN t_3 as c " +
          s"ON a.ip=c.ip AND a.ts >= (c.ts2 - ${sec}) AND a.ts < (c.ts2 + ${sec}) " +
          s"AND c.day2='${day}' " +
          s"AND c.caid='${acaid}' " +
          s"").withColumn("hour", lit(h)).persist(StorageLevel.MEMORY_AND_DISK_SER)
      notMediasDf.repartition(37)
    }
    }.reduce((a, b) => a.union(b))
    foundIdsDf.repartition(373).write.orc(s"${output}/4")
    //和测试行为集 同ip同天2小时内, 不同活动id下的候选行为集

    //step 5
    spark.read.orc(s"${output}/4").createOrReplaceTempView("t_4")

    //        val joinedIdsDf = spark.sql("select a.udid, a.caid, a.hour " +
    //                "from (select udid, count(udid) as cnt, caid, hour " +
    //                "from t_4 " +
    //                "group by udid,caid,hour) as a where a.cnt=3")

    //step 5, 这里没必要计算匹配率了,应该是按照最大频次的随机抽取
    var sql = "select a.sudid, a.scaid, a.hour, a.freq, a.pair_udid, b.isEq " +
      "from (select pair_udid, count(pair_udid) as freq, scaid, hour, sudid " +
      "from (select pair_udid,scaid,hour,sudid from t_4) " +
      "group by pair_udid,scaid,hour,sudid) as a join (" +
      "select pair_udid, sudid, case when (pair_udid==sudid) then 1 else 0 end as isEq from t_4 group by pair_udid,sudid " +
      ") as b on b.pair_udid=a.pair_udid and b.sudid=a.sudid "
    val t5Df = spark.sql(sql)
    t5Df.write.orc(s"${output}/5")
    t5Df.cache().createOrReplaceTempView("t_5")

    //step5-2 提取频次最高的id,随机抽取
    sql = s"select c.sudid,c.scaid,c.hour,c.max_freq,c.pair_udid " +
      s"from (select a.sudid,a.scaid,a.hour,b.max_freq,a.pair_udid, " +
      s"row_number() over(partition by a.sudid,a.scaid,a.hour,b.max_freq order by rand(37)) as rn " +
      s"from t_5 as a " +
      s"join (select sudid,scaid,hour,max(freq) as max_freq " +
      s"from t_5 group by sudid,scaid,hour) as b " +
      s"on a.sudid=b.sudid and a.scaid=b.scaid and a.hour=b.hour) as c " +
      s"where c.rn=1"
    val pairedDf = spark.sql(sql)
    pairedDf.write.orc(s"${output}/5_2")

    //step 6
    spark.sql("select a.pair_udid, a.hour, a.scaid " +
      "from t_5 as a " +
      "join t_3 as b " +
      "on a.pair_udid=b.udid").createOrReplaceTempView("tmp")
    spark.sql("select count(pair_udid) as cnt,scaid,hour " +
      "from tmp " +
      "group by scaid,hour")
      .write.orc(s"${output}/6")

    //step count
    spark.sql("select count(udid) as cnt, caid " +
      "from t_3 " +
      "group by caid")
      .write.orc(s"${output}/7")

  }

  def parseArgs(args: Array[String]): Map[String, String] = {
    args.filter(_.startsWith("-D")).map(_.split("=")).map(kv => {
      val key = kv(0).substring(2)
      val value = kv.slice(1, kv.length).mkString("")
        .stripPrefix("\"").stripSuffix("\"")
      (key, value)
    }).toMap
  }

  def ipToLong(ip: String): Long = {
    ip.split(Pattern.quote(".")).foldLeft(0L)((acc, b) => acc * 256 + b.toLong)
  }

  def longToIp(num: Long): String = {
    s"${(0 to 3).map(i => (num >> 8 * i) & 0xFF).reverse.mkString(".")}"
  }

}
