package cn.dmp.report

import org.apache.spark.graphx.{Edge, Graph}
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

object CommFriends {
  def main(args: Array[String]): Unit = {
    val conf: SparkConf = new SparkConf()
      .setAppName("cn.xiao.graphx.CommFriends")
      .setMaster("local[*]")
      .set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")

    val sc = new SparkContext(conf)

    //一参:userId 二参:名字  三参:age
    val uv: RDD[(Long, (String, Int))] = sc.parallelize(Seq(
      (1, ("张飞", 23)),
      (2, ("关羽", 25)),
      (6, ("刘备", 30)),
      (9, ("黄忠", 40)),
      (133, ("马超", 32)),

      (16, ("吕布", 23)),
      (21, ("董卓", 25)),
      (44, ("高俅", 31)),
      (138, ("貂蝉", 20)),

      (5, ("曹操", 30)),
      (7, ("小乔", 19)),
      (158, ("凤雏", 21))
    ))

    //一参:userId  二参:顶点  2点构成边
    //三参:有向关系描述  例子: Edge(1, 133, "表哥") Edge(133, 1, "表妹")
    val ue: RDD[Edge[Int]] = sc.parallelize(Seq(

      Edge(1, 133, 0),
      Edge(2, 133, 0),
      Edge(9, 133, 0),
      Edge(6, 133, 0),

      Edge(6, 138, 0),
      Edge(16, 138, 0),
      Edge(44, 138, 0),
      Edge(21, 138, 0),

      Edge(5, 158, 0),
      Edge(7, 158, 0)

    ))

    //图
    val graph = Graph(uv, ue)
    val commonV =graph.connectedComponents().vertices
    //结果 依据最小的顶点进行描述关系
    //    (7,5)
    //    (16,1)
    //    (44,1)
    //    (21,1)
    //    (133,1)
    //    (1,1)
    //    (9,1)
    //    (5,5)


      //把节点换成人名
//    commonV.map(t =>(t._2,List(t._1))).reduceByKey( _ ++ _).foreach(println)
//    uv.join(commonV).map {
//      case (userId, ((name,age),cmId)) =>(cmId,List(name,age))
//    }.reduceByKey(_++_).foreach(println)

  }
}
