package aggregate

import org.apache.spark.graphx.{Edge, Graph, VertexId, VertexRDD}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, SparkSession}
import org.apache.spark.{SparkConf, SparkContext}

object Demo1 {

  def main(args: Array[String]): Unit = {

    //设置运行环境
    val conf = new SparkConf().setAppName("SimpleGraphX").setMaster("local")
    val sc = new SparkContext(conf)
    val spark: SparkSession = SparkSession.builder().getOrCreate()
    import spark.implicits._
    sc.setLogLevel("WARN")

    // 构建图
    val myVertices: RDD[(VertexId, Double)] = sc.parallelize(Array(
      (1L, 20.0),
      (2L, 30.0),
      (3L, 25.0),
      (4L, 32.0),
      (5L, 21.0),
      (6L, 28.0)))

    //设置relationships边

    val myEdges: RDD[Edge[String]] = sc.parallelize(Array(
      Edge(2L, 1L, "1"),
      Edge(2L, 4L, "2"),
      Edge(3L, 2L, "3"),
      Edge(3L, 6L, "4"),
      Edge(4L, 1L, "5"),
      Edge(5L, 2L, "6"),
      Edge(5L, 3L, "7"),
      Edge(5L, 6L, "8")))

    val myGraph = Graph(myVertices, myEdges)

    val vertices: VertexRDD[(Int, Double)] = myGraph.aggregateMessages[(Int, Double)](
      /**
        * triplet代表的是一个EdgeContext[VD, ED, A]，
        * 若果源点属性值小于目标点属性值，则源点为目标点粉丝，
        * 将数据元组A(Int,Double)发送出去
        */
      triplet => {
        if (triplet.srcAttr < triplet.dstAttr) {
          triplet.sendToDst((1, triplet.srcAttr))
        }
      },
      (a, b) => (a._1 + b._1, a._2 + b._2)
    )

    val avgage: VertexRDD[Double] = vertices.mapValues((id, value) =>
      value match {
        case (count, totalage) => totalage / count
      })

    println("--------------------他的粉丝的平均年纪为：-------------------------")
    avgage.sortByKey().toDF("id","avg_age").show()
    spark.stop()

  }

}
