package com.wtw.graph

import org.apache.spark.graphx.{Edge, Graph, VertexRDD}
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

object aggregateMessageDemo1 {
  def main(args: Array[String]): Unit = {
    val conf = new SparkConf().setAppName("SimpleGraphX").setMaster("local")
    val sc = new SparkContext(conf)
    sc.setLogLevel("WARN")

    val vertexArray: Array[(Long, (String, Int))] = Array(
      (1L, ("Alice", 28)),
      (2L, ("Bob", 27)),
      (3L, ("Charlie", 65)),
      (4L, ("David", 42)),
      (5L, ("Ed", 55)),
      (6L, ("Fran", 50))
    )

    val edgeArray = Array(
      Edge(2L, 1L, 7), // true 27
      Edge(2L, 4L, 2), // true 27
      Edge(3L, 2L, 4), // false
      Edge(3L, 6L, 3), // false
      Edge(4L, 1L, 1), // false
      Edge(5L, 2L, 2), // false
      Edge(5L, 3L, 8), // true 55
//      Edge(6L, 3L, 8), // true 55 + 50
      Edge(5L, 6L, 3)  // false
    )

    val vertexs: RDD[(Long, (String, Int))] = sc.parallelize(vertexArray)
    val edges = sc.parallelize(edgeArray)
    val graph = Graph(vertexs, edges)

    //    （2）求粉丝的平均年纪

    //aggregateMessages的泛型类型就是merge函数的参数类型
    //merge函数就是对每个顶点的所有操作的聚合
    val res = graph.aggregateMessages[(Int, Int)](triplet => {
      if (triplet.srcAttr._2 < triplet.dstAttr._2) triplet.sendToDst((triplet.srcAttr._2, 1))
      else triplet.sendToDst((0, 0))
    }, (a, b) => {
      (a._1 + b._1, b._2 + a._2)
    })

//    res.collect().foreach(println(_))
//    println("----------------各个顶点粉丝数量-----------------")

        res.map(x => {
          val sum = x._2._1
          val count = if(x._2._2 == 0) 1 else x._2._2
          sum / count
        }).collect().foreach(println(_))


  }
}
