package xubo.wangcaifeng.love

import org.apache.log4j.{Level, Logger}
import org.apache.spark.graphx.{Edge, Graph}
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

object CommonFirendsPlus {
  Logger.getLogger("org").setLevel(Level.WARN)
  def main(args: Array[String]): Unit = {
    val sparkConf = new SparkConf()
      .setAppName("共同好友")
      .setMaster("local[*]")
    val sc = new SparkContext(sparkConf)
    val rawdata: RDD[Array[String]] = sc.textFile("data/people.txt")
      .map(_.split("\t"))
    val uv: RDD[(Long, String)] = rawdata.flatMap(arr => {
      arr.map(name => (name.hashCode.toLong, name))
    })
    //边的集合
    val ue: RDD[Edge[Int]] = rawdata.flatMap(arr => {
      arr.map(name => Edge(arr(0).hashCode.toLong, name.hashCode.toLong,0))
    })
    //构建图
    val graph: Graph[String, Int] = Graph(uv,ue)
    //有关系的人都会向这群人中hashcode最小的聚合
    val cc = graph.connectedComponents().vertices
    cc.join(uv).map {
      case (userhashcode, (minuserhashcode,name)) => (minuserhashcode,Set(name))
    }.reduceByKey(_++_).foreach(println)

    /**
      * 去重的方法最好不要写在前面这样会给后面造成一些麻烦，用set集合最好，因为set中不能有
      * 重复的值
      */
  sc.stop()
  }

}
