package aggregate

import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.graphx._
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SparkSession

object Demo2 {

  def sendMsg(ec: EdgeContext[(Int, Int), String, (Int, Int)]): Unit = {
    /**
      * 将源点数据+1后发送给目标点，源点数据缓存到元组第二个值用于比较
      */
    ec.sendToDst((ec.srcAttr._1 + 1, ec.dstAttr._1))
  }

  def mergeMsg(a: (Int, Int), b: (Int, Int)): (Int, Int) = {
    (math.max(a._1, b._1), math.max(a._2, b._2))
  }

  def sumEdgeCount(g: Graph[(Int, Int), String]): Graph[(Int, Int), String] = {
    val verts = g.aggregateMessages[(Int, Int)](sendMsg, mergeMsg)
    val g2 = Graph(verts, g.edges, (0, 0))

    /**
      * 将元组前后两值相减再求和，
      * 若 check > 0，说明图表发生变化，则进行递归。
      */
    val check: Int = g2.vertices.map(x => x._2._1 - x._2._2).reduce(_ + _)

    if (check > 0)
      sumEdgeCount(g2)
    else
      g
  }

  def main(args: Array[String]): Unit = {

    //设置运行环境
    val conf = new SparkConf().setAppName("SimpleGraphX").setMaster("local")
    val sc = new SparkContext(conf)
    val spark: SparkSession = SparkSession.builder().config(conf).getOrCreate()
    import spark.implicits._
    sc.setLogLevel("WARN")

    // 构建图
    val myVertices = sc.parallelize(Array(
      (1L, "张三"),
      (2L, "李四"),
      (3L, "王五"),
      (4L, "钱六"),
      (5L, "领导")))
    val myEdges = sc.makeRDD(Array(
      Edge(1L, 2L, "朋友"),
      Edge(2L, 3L, "朋友"),
      Edge(3L, 4L, "朋友"),
      Edge(4L, 5L, "上下级"),
      Edge(3L, 5L, "上下级")
    ))

    val myGraph = Graph(myVertices, myEdges)

    /**
      * 通过mapVertices将点属性集VD初始值全部设为（0,0），
      * 元组第一个元素存当前点的数据，第二个元素存上次图表该点的数据。
      */
    val initGraph: Graph[(Int, Int), String] = myGraph.mapVertices((_, _) => (0, 0))

    val verticesRDD: RDD[(VertexId, Int)] = sumEdgeCount(initGraph).vertices.map(x => (x._1, x._2._1))
    println("--------------------结果如下：-------------------------")
    verticesRDD.sortByKey().toDF("vertex","max_distance").show()

    spark.stop()
  }

}
