package com.whoami.idmagic.id

import java.util.regex.Pattern

import org.apache.spark.sql.SparkSession

/**
  * 1. 采用连续出现时间段，最后取频次最高的id作为最终匹配id,看precise和recall
  * 2. 专for 长安福特
  * 3. 按ip切分，并且只取 频次最高，且在最高频次下只出现 1 次的pair_udid
  * 4. 使用点位id
  */
object idLink10 {

    def main(args: Array[String]): Unit = {
        val argsmap: Map[String, String] = parseArgs(args)
        val admlog = argsmap("log")
        val babel = argsmap("babel")
        val output = argsmap("output")
        val ipgeo = argsmap("ipgeo")
        val spots = argsmap("spots")
        val hourStr = argsmap("hours")

        val hours = hourStr.split(",").map(x => x.toInt)

        val spark = SparkSession
                .builder
                .appName(s"${this.getClass.getSimpleName}")
                .getOrCreate()

        val sceneName = "住宅用户"
        spark.read.orc(ipgeo).createOrReplaceTempView("ipgeo")
        //这里ip是数字，要转为字符串
        val homeIpDf = spark.sql(s"select distinct ip from ipgeo where scene='${sceneName}'")
        homeIpDf.map(row => longToIp(row.getString(0).toLong)).toDF("ip").persist(StorageLevel.MEMORY_AND_DISK_SER)
                .createOrReplaceTempView("homeip")

        val babeldf = spark.read.orc(babel)
        babeldf.createOrReplaceTempView("babel")

        spark.read.orc(admlog.split(","):_*).select("md5_imei", "raw_idfa", "media_uid", "mz_spot_id", "raw_ip", "timestamp").createOrReplaceTempView("a")
        val admdf = spark.sql("(" +
                "SELECT a.md5_imei as udid, a.mz_spot_id, a.raw_ip as ip, a.`timestamp` as ts, a.md5_imei " +
                "FROM a " +
                "JOIN homeip as c " +
                "ON a.raw_ip=c.ip) UNION ( " +
                "SELECT a.raw_idfa as udid, a.mz_spot_id, a.raw_ip as ip, a.`timestamp` as ts, a.md5_imei " +
                "FROM a " +
                "JOIN homeip as c " +
                "ON a.raw_ip=c.ip " +
                ")")
        admdf.createOrReplaceTempView("adm")
        //todo 一条日志变两行?


        //这里使用media_uid 作为udid, 这是需要被匹配的id
        val mediasDf = spark.sql(s"SELECT a.md5_imei as udid, a.ip, a.ts, a.mz_spot_id as spid " +
                s"FROM adm as a " +
                s"WHERE a.md5_imei!='' " +
                s"AND a.mz_spot_id in ('${spots.split(",").mkString("','")}')")
                .persist(StorageLevel.MEMORY_AND_DISK_SER)

//        mediasDf.write.orc(s"${output}/1")
//        spark.read.orc(s"${output}/1")
        mediasDf.createOrReplaceTempView("t_1")

        //这里不需要抽样了！,取所有的数据
        spark.sql("SELECT DISTINCT udid, spid, ip, ts as ts2 from t_1").cache().createOrReplaceTempView("a")
        val hoursDf = hours.map(hour => {
            spark.sql(s"SELECT udid,spid,ip,ts2,${hour} as hour FROM a")
        }).reduce((a,b) => a.union(b))
        hoursDf.repartition(373)
        hoursDf.write.orc(s"${output}/3")
        hoursDf.createOrReplaceTempView("t_3")
        //测试集的行为集

        val foundIdsDf = spark.sql(s"" +
                s"SELECT a.udid as pair_udid, a.mz_spot_id as pair_spid, a.ip as pair_ip, a.ts as pair_ts, c.spid as sspid, c.udid as sudid, c.ip as sip, c.ts2 as sts, c.hour " +
                s"FROM adm as a " +
                s"JOIN t_3 as c " +
                s"ON a.ip=c.ip " +
                s"AND a.udid!='' " +
                s"AND a.ts >= (c.ts2 - c.hour*3600) AND a.ts < (c.ts2 + c.hour*3600) " +
                s"AND a.mz_spot_id!=c.spid " +
                s"").persist(StorageLevel.MEMORY_AND_DISK_SER)
        foundIdsDf.write.orc(s"${output}/4") //*
        foundIdsDf.createOrReplaceTempView("t_4")

        //step 5, 按照出现的最大频率统计
        var sql = s"" +
                s"SELECT sudid, sspid, hour, sip, freq, pair_udid, isEq " +
                "FROM ( " +
                    "SELECT pair_udid, count(pair_udid) as freq, sspid, hour, sudid, sip, case when (pair_udid==sudid) then 1 else 0 end as isEq  " +
                    "FROM ( " +
                        "SELECT pair_udid,sspid,hour,sudid,sip " +
                        "FROM t_4 " +
                    ") " +
                    "GROUP BY pair_udid,sspid,hour,sudid,sip " +
                ")"
        val t5Df = spark.sql(sql)
        t5Df.cache()
        t5Df.write.orc(s"${output}/5") //*
        t5Df.createOrReplaceTempView("t_5")

        //step5-1 只提取频次最高的id,且最高频次数只有一个的pair_udid
        sql = s"SELECT sudid,sspid,hour,sip,max_freq,pair_udid " +
                s"FROM ( " +
                    s"SELECT a.sudid,a.sspid,a.hour,a.sip,a.freq as max_freq,a.pair_udid " +
                    s"FROM t_5 as a " +
                    s"WHERE a.freq=( " +
                        s"SELECT max(freq) " +
                        s"FROM t_5 as b " +
                        s"WHERE b.sudid=a.sudid " +
                        s"AND b.sspid=a.sspid AND b.hour=a.hour AND b.sip=a.sip " +
                    s") " +
                s") "
        //todo b.sip=a.sip?
        val pairedDf = spark.sql(sql).cache()
        pairedDf.write.orc(s"${output}/6") //*
        pairedDf.createOrReplaceTempView("t_6")

        sql = s"SELECT c.sudid,c.sspid,c.hour,c.sip,c.max_freq,c.pair_udid,b.cnt " +
                s"FROM t_6 as c " +
                s"JOIN ( " +
                    s"SELECT a.sudid,a.sspid,a.hour,a.sip,a.max_freq,a.cnt " +
                    s"FROM ( " +
                        s"SELECT sudid,sspid,hour,sip,max_freq,count(pair_udid) as cnt " +
                        s"FROM t_6 " +
                        s"GROUP BY sudid,sspid,hour,sip,max_freq " +
                    s") as a where a.cnt=1 " +
                s") as b " +
                s"ON b.sudid=c.sudid " +
                s"AND b.sspid=c.sspid " +
                s"AND b.hour=c.hour " +
                s"AND b.sip=c.sip " +
                s"AND b.max_freq=c.max_freq "
        val t7df = spark.sql(sql)
        t7df.write.orc(s"${output}/7") //*
    }

    def parseArgs(args: Array[String]): Map[String, String] = {
        args.filter(_.startsWith("-D")).map(_.split("=")).map(kv => {
            val key = kv(0).substring(2)
            val value = kv.slice(1, kv.length).mkString("")
                    .stripPrefix("\"").stripSuffix("\"")
            (key, value)
        }).toMap
    }

    def ipToLong(ip: String): Long = {
        ip.split(Pattern.quote(".")).foldLeft(0L)((acc, b) => acc * 256 + b.toLong)
    }

    def longToIp(num: Long): String = {
        s"${(0 to 3).map(i => (num >> 8 * i) & 0xFF).reverse.mkString(".")}"
    }

}
