package idmp

import org.apache.commons.lang3.StringUtils
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.graphx.{Edge, Graph, VertexId, VertexRDD}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{Dataset, SparkSession}
import util.SparkUtil

import scala.collection.mutable.ListBuffer

/**
 * 为了做idMapping来写的一个图计算api使用demo，找出哪些标识是同一个人
 */
object Demo {
  def main(args: Array[String]): Unit = {
    val spark: SparkSession = SparkUtil.getSparkSession(this.getClass.getSimpleName)
    import spark.implicits._
    val df: Dataset[String] = spark.read.textFile("data/graphx/input")

    // 构造一个点RDD
    val vertices: RDD[(Long, String)] = df.rdd.flatMap(line => {
      val fields: Array[String] = line.split(",")
      // 在spark的图计算API中，点需要表示成一个tuple-->(点的唯一标识Long,点的数据本身)
      for (i <- 0 to fields.length - 2 if StringUtils.isNotBlank(fields(i))) yield (fields(i).hashCode.toLong, fields(i))
    })

    // 构造一个边RDD 在spark的图计算API中对边的描述结构：edge(起点id,目标点id,边数据)
    val edges: RDD[Edge[String]] = df.rdd.flatMap(line => {
      val fields: Array[String] = line.split(",")
      //      val list: ListBuffer[Edge[String]] = new ListBuffer[Edge[String]]
      //      for (i <- 0 to fields.length - 2) {
      //        val edge = Edge(fields(i).hashCode.toLong, fields(i + 1).hashCode.toLong, "")
      //        list += edge
      //      }
      //      list
      for (i <- 0 to fields.length - 3 if StringUtils.isNotBlank(fields(i)) && StringUtils.isNotBlank(fields(i + 1))) yield Edge(fields(i).hashCode.toLong, fields(i + 1).hashCode.toLong, "")
    })

    // 用构造好的点集合和边集合，构造一张图
    val graph: Graph[String, String] = Graph(vertices, edges)
    // 调用图的连通子图算法
    val graph2: Graph[VertexId, String] = graph.connectedComponents()
    // 从结果图中取出图的点集合，即可以得到我们想要的分组结果
    val vertices2: VertexRDD[VertexId] = graph2.vertices // (点id,点数据) 如果是同一组的那么点数据是相同的(是那一组里面的点id最小值)
    //vertices2.foreach(println(_))
    /**
     * (-1095633001,-1095633001)
     * (29003441,-1095633001)
     * (113568560,-1485777898)
     * (1567005,-1485777898)
     * (113568358,-1095633001)
     * (-1485777898,-1485777898)
     */
    // 将上面得到的映射关系RDD，收集到driver端然后作为变量广播出去
    val idMap: collection.Map[VertexId, VertexId] = vertices2.collectAsMap()
    val bc: Broadcast[collection.Map[VertexId, VertexId]] = spark.sparkContext.broadcast(idMap)


    // 利用这个映射关系结果，来加工我们的原始数据
    val res: Dataset[String] = df.map(line => {
      val bc_map: collection.Map[VertexId, VertexId] = bc.value
      val name: String = line.split(",").filter(StringUtils.isNotBlank(_))(0)
      val guid: VertexId = bc_map(name.hashCode.toLong)
      guid + "," + line
    })
    res.show(10, truncate = false)
    spark.close()
  }
}
