package idmp

import idmp.LogDataIDMapping.extract
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.graphx.{Edge, Graph, VertexId, VertexRDD}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, Dataset, Row, SparkSession}
import util.SparkUtil

/**
 * 考虑上一日的idmp字典整合的idMapping程序
 */
object LogDataIDMappingV2 {
  def main(args: Array[String]): Unit = {
    val spark: SparkSession = SparkUtil.getSparkSession(this.getClass.getSimpleName)
    import spark.implicits._
    val todayLog: Dataset[String] = spark.read.textFile("E:\\doit项目资料\\DW12-文档资料\\day03\\yiee_logs\\2020-01-12")
    val web_ids: RDD[Array[String]] = extract(todayLog)
    val vertices: RDD[(Long, String)] = web_ids.flatMap(line => line.map(a => (a.hashCode.toLong, a)))
    val edgesTmp: RDD[Edge[String]] = web_ids.flatMap(line => for (i <- 0 to line.length - 2; j <- i + 1 until line.length) yield Edge(line(i).hashCode.toLong, line(j).hashCode.toLong, ""))
    val edges: RDD[Edge[String]] = edgesTmp.map((_, 1)).reduceByKey(_ + _).filter(_._2 > 2).map(_._1)

    // 将上一日的idmp映射字典解析成点、边集合
    val preDayIdmp: DataFrame = spark.read.parquet("data/idmp/2020-01-11")
    val preDayIdmpVertices: RDD[(Long, String)] = preDayIdmp.rdd.map({
      case Row(idFlag: Long, guid: Long) =>
        (idFlag, "")
    })
    val preDayIdmpEdges: RDD[Edge[String]] = preDayIdmp.rdd.map(line => {
      val idFlag: Long = line.getAs[Long](0)
      val guid: Long = line.getAs[Long]("guid")
      Edge(idFlag, guid, "")
    })

    // 将当日的点集合union上日的点集合、当日的边集合union上日的边集合，用union之后的点集合、边集合去构造图
    val graph: Graph[String, String] = Graph(vertices.union(preDayIdmpVertices), edges.union(preDayIdmpEdges))
    val graph2: Graph[VertexId, String] = graph.connectedComponents()
    val res_tuples: VertexRDD[VertexId] = graph2.vertices

    // 将结果和上日的映射字典做比对，调整GUID
    // 1.将上日的idmp映射字典收集到driver端再广播出去
    val preIdmpMap: collection.Map[Long, Long] = preDayIdmp.rdd.map(row => {
      (row.getAs[Long](0), row.getAs[Long](1))
    }).collectAsMap()
    val bc: Broadcast[collection.Map[VertexId, VertexId]] = spark.sparkContext.broadcast(preIdmpMap)
    // 2.将今日的图计算结果按照guid分组           第二个元素将分组后得到的所有标识id的迭代器转为Set
    val guidAndSets: RDD[(VertexId, Set[VertexId])] = res_tuples.map(tp => (tp._2, tp._1)).groupByKey().mapPartitions(iter => {
      val idMap: collection.Map[VertexId, VertexId] = bc.value
      iter.map(tp => {
        var todayGuid: VertexId = tp._1
        // 遍历这一组id，挨个去和上日的idmp字典中查找
        var find: Boolean = false
        for (elem <- tp._2 if !find) {
          val maybeId: Option[VertexId] = idMap.get(elem)
          // 如果这个id在昨天的idmp映射字典中找到了，那么就用昨天的guid代替今天这一组的guid
          if (maybeId.isDefined) {
            todayGuid = maybeId.get
            find = true
          }
        }
        (todayGuid, tp._2.toSet)
      })
    })
    // 上面得到的结果为guid对应其一组id，需将结果打散guid和id一一对应
    val todayIdmpResult: RDD[(VertexId, VertexId)] = guidAndSets.flatMap(tp => for (elem <- tp._2) yield (elem, tp._1))

    todayIdmpResult.coalesce(1).toDF("biaoshi_hashcode", "guid").write.parquet("data/idmp/2020-01-12")


    spark.close()
  }
}
