package profile.tagcombine

import java.io

import org.apache.log4j.{Level, Logger}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.types.{StructField, StructType}
import org.apache.spark.sql.{DataFrame, Encoders, Row, SparkSession}

import scala.collection.mutable

/**
  * Created by hunter.coder 涛哥  
  * 2019/4/23 16:26
  * 交流qq:657270652
  * Version: 1.0
  * 更多学习资料：https://blog.csdn.net/coderblack/
  * Description: 聚合多模块标签
  **/
object TagCombiner {

  def main(args: Array[String]): Unit = {
    Logger.getLogger("org").setLevel(Level.WARN)

    var cmTagPath = "G:\\data_shark\\testdata\\usertags\\out-cmlogtags"
    var dspTagPath = "G:\\data_shark\\testdata\\usertags\\out-dsptags"
    var idmpPath = "G:\\data_shark\\testdata\\usertags\\idmapping"
    if(args.size>2){
       val Array(cmTagPath,dspTagPath,idmpPath) = args
    }

    val spark = SparkSession.builder().appName("").master("local").getOrCreate()
    import spark.implicits._

    val cmTags: DataFrame = spark.read.parquet(cmTagPath)
    cmTags.show(10, false)
    val dspTags: DataFrame = spark.read.parquet(dspTagPath)
    dspTags.show(10,false)

    /**
      * 加载idmapping数据，并以hashmap形式收集到driver端
      */
    val idmp = spark.read.textFile(idmpPath)
    val idmpMap: collection.Map[String, List[String]] = idmp.map(line => {
      val split = line.split("\001")
      val guid = split(0)
      val ids = split(1).split(",")
      (guid, ids.toList)
    }).rdd.collectAsMap()

    // 将idmapping数据的kv调换，从guid->ids  转换成: id->guid
    val idmap = new mutable.HashMap[String, String]()
    for ((k, v) <- idmpMap) {
      for (id <- v) {
        idmap.put(id, k)
      }
    }

    // 将整理好的idmapping数据广播出去
    val bc = spark.sparkContext.broadcast(idmap)

    /*val encoder = Encoders.tuple(
      Encoders.STRING,
      Encoders.kryo[Map[String,Seq[(String,Double)]]],
      Encoders.kryo[Map[String,Seq[(String,Double)]]],
      Encoders.kryo[Map[String,Seq[(String,Double)]]]
    )

    val cmTagsEncoded = cmTags.map(row => {
      (row.getAs[String]("phone")
        , row.getAs[Map[String, Seq[(String, Double)]]](1)
        , row.getAs[Map[String, Seq[(String, Double)]]](2)
        , row.getAs[Map[String, Seq[(String, Double)]]](3))
    })(encoder)

    val a = cmTagsEncoded.count()
    cmTagsEncoded.printSchema()
    cmTagsEncoded.show(10,false)*/

    // 为cmtags查询到统一图id
    val cmTagsWithGid = cmTags.rdd
      .map(row => {
        val phone = row.getAs[String]("phone")
        val idsTags = row.getAs[Map[String,Seq[Row]]]("idsTags")
        val deviceTags: collection.Map[String, Seq[(String, Double)]] = row.getAs[Map[String, Seq[(String, Double)]]](2)
        val goodsKwTags: collection.Map[String, Seq[(String, Double)]] = row.getAs[Map[String, Seq[(String, Double)]]](3)

        /**
          * 查询统一图id --> gid
          */
        var imeiLst = new mutable.ListBuffer[String]()
        val imeis: Seq[Row] = idsTags.getOrElse("imei", null)
        val imeiset = imeis.map(r =>(r.getAs[String](0).hashCode.toString)).toSet

        val idmap = bc.value
        val idset = idmap.keySet

        val intersectId = idset.intersect(imeiset)
        val gid = if(intersectId.size>0) idmap.getOrElse(intersectId.toList(0),phone) else phone

        (gid,idsTags,deviceTags, goodsKwTags)
      })


    // 将dsp标签dataframe转成rdd
    val dspTagsWithGid = dspTags.rdd.map(row=>{
      val gid = row.getAs[String]("guid")
      val idsTags = row.getAs[Map[String,Seq[(String,Double)]]]("idsTags")
      val dspKwTags = row.getAs[Map[String,Seq[(String,Double)]]]("dspKwTags")
      (gid,idsTags,dspKwTags)
    })

    // 即将进行合并的两个rdd
    cmTagsWithGid.take(10).foreach(println)
    println("--------------------------------------")
    dspTagsWithGid.take(10).foreach(println)
    println("--------------------------------------")


    // 合并两个rdd
    val joinRdd = cmTagsWithGid.map(tp=>(tp._1,tp)).fullOuterJoin(dspTagsWithGid.map(tp=>(tp._1,tp)))

    // 过滤出关联不上的cmtags
    val cmOnlyRdd = joinRdd.filter(_._2._2.isEmpty).map(_._2._1)

    // 过滤出关联不上的dsptags
    val dspOnlyRdd = joinRdd.filter(_._2._1.isEmpty).map(_._2._2)

    // 过滤出能关联上的cmtags和dsptags
    val toMergeRdd = joinRdd.filter(tp=>(!tp._2._1.isEmpty && !tp._2._2.isEmpty))

    cmOnlyRdd.take(10).foreach(println)
    println("--------------------------------------")
    dspOnlyRdd.take(10).foreach(println)
    println("--------------------------------------")
    toMergeRdd.take(10).foreach(println)

    // 对能关联上的标签进行合并
    //toMergeRdd.



    spark.close()
  }

}
