package cn.dmp.graphx

import org.apache.spark.graphx.{Edge, Graph, VertexId}
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

object GraphxDemo {

    def main(args: Array[String]): Unit = {

        // 设置job所需的参数 sparkconf
        val sparkConf = new SparkConf()
        sparkConf.setAppName(s"${this.getClass.getSimpleName}")
        sparkConf.setMaster("local[*]")

        // sparkcontext
        val sc = new SparkContext(sparkConf)


        // 点集合, 顶点的ID要使用Long的数据
        val vertexRDD: RDD[(VertexId, (String, Int))] = sc.parallelize(Seq(
            (1, ("涛涛", 18)),
            (2, ("大鹏", 28)),
            (9, ("林冲", 38)),
            (6, ("静静", 18)),
            (133, ("翔哥", 24)),

            (16, ("帅帅", 16)),
            (21, ("徐峰", 48)),
            (44, ("腾云", 25)),
            (138, ("田田", 19)),

            (5, ("猛哥", 40)),
            (7, ("刘泽", 38)),
            (158, ("四海", 27))
        ))


        // 边集合 Edge
        val edgesRDD: RDD[Edge[Int]] = sc.parallelize(Seq(
            Edge(1, 133, 0),
            Edge(2, 133, 0),
            Edge(6, 133, 0),
            Edge(9, 133, 0),

            Edge(16, 138, 0),
            Edge(6, 138, 0),
            Edge(21, 138, 0),
            Edge(44, 138, 0),

            Edge(5, 158, 0),
            Edge(7, 158, 0)
        ))

        // 图 -> （点(元组-> ID，attribute)RDD + 边[Edge]RDD）
        val graph = Graph(vertexRDD, edgesRDD)

        // 好友推荐 连通图 -> 同一个连通图分支中的所有点会向改分支中的最小的顶点进行组合，组合成元组（点的id, 该分支中的最小的顶点ID）
        val cc = graph.connectedComponents().vertices
          //.foreach(println)

       // cc.map(t => (t._2, List(t._1))).reduceByKey((a, b) => a ++ b).foreach(println)


        cc.join(vertexRDD).map{
            case (vId, (commonMinId, (name, age))) => (commonMinId, List(name))
        }.reduceByKey(_ ++ _).foreach(println)

        sc.stop()
    }

}
