package com.yanduo.graphx

import org.apache.spark.graphx.{Edge, Graph, VertexId}
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

/**
  * graphx共同好友推荐
  *
  * @author Gerry chan
  * 2020/5/30 15:58
  * @version 1.0
  */
object ComFriends {
  def main(args: Array[String]): Unit = {
    val sparkConf = new SparkConf()
    sparkConf.setAppName(s"${this.getClass.getSimpleName}")
    sparkConf.setMaster("local[*]")

    //RDD序列化到磁盘，worker 域worker之间的数据传输
    val sc = new SparkContext(sparkConf)

    //构建点
    val uv: RDD[(VertexId, (String, Int))] = sc.parallelize(Seq(
      (0, ("zhangsan", 23)),
      (2, ("lisi", 25)),
      (6, ("王五", 26)),
      (9, ("万鑫", 40)),
      (133, ("严峻", 31)),

      (16, ("崔斌", 34)),
      (21, ("张贤", 15)),
      (44, ("高俊涛", 20)),
      (138, ("chengfang", 19)),

      (5, ("章现已", 26)),
      (7, ("杨雅国", 19)),
      (158, ("张晓东", 26))
    ))

    //构建边
    val ue:RDD[Edge[Int]] = sc.parallelize(Seq(
      //第三个参数代表方向， 这里没有方向0
      Edge(0, 133, 0),
      Edge(2, 133, 0),
      Edge(9, 133, 0),
      Edge(6, 133, 0),

      Edge(6,  138, 0),
      Edge(16, 138, 0),
      Edge(44, 138, 0),
      Edge(21, 138, 0),

      Edge(5, 158, 0),
      Edge(7, 158, 0)
    ))

    //构建图
    val graph = Graph(uv, ue)
    //连通图 commonV -> (userId, 共同的点) ，其中userId为最小的点
    val commonV = graph.connectedComponents().vertices
    //
    //对RDD进行jon处理, 进行数据整合
    /**
      * uv --> (userId, (性名，年龄))
      * commonV --> (userId,共同的点)
      *
      * reduceByKey(_ ++ _)  拼接List
      * 最后结果：
      * (5,List(("章现已", 26),("杨雅国", 19),....))
      * (0, List(("zhangsan", 23),("王五", 26),("严峻", 31))..)
      *
      * cmId 为共同的顶点ID
      */
    uv.join(commonV).map{
      case (userId, ((name, age), cmId)) => (cmId, List((name, age)))
    }.reduceByKey(_ ++ _)

    sc.stop()

  }

}
