package cn.dmp.graphx

import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.graphx.{Edge, Graph}
import org.apache.spark.rdd.RDD


object UserTagsMergeDemo {
  def main(args: Array[String]): Unit = {

    def main(args: Array[String]): Unit = {
      val conf: SparkConf = new SparkConf()
        .setAppName("cn.xiao.graphx.CommFriends")
        .setMaster("local[*]")
        .set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")

      val sc = new SparkContext(conf)

      val data: RDD[Array[String]] = sc.textFile(args(0)).map(_.split("\t"))

      //根据数据创建出点的集合
      val uv: RDD[(Long, (String, List[(String, Int)]))] = data.flatMap(arr => {
        //区分人名和标签
        val useNames = arr.filter(_.indexOf(":") == -1)
        //val userTags = arr.filter(_.indexOf(":") != -1).toList
        val userTags = arr.filter(_.indexOf(":") != -1).map(kvs => {
          val kv = kvs.split(":")
          (kv(0), kv(1).toInt)
        }).toList


        //第一个人带标签,剩下的人不带标签
        useNames.map(name => {
          if (name.equals(useNames(0))) (name.hashCode.toLong, (name, userTags))
          else (name.hashCode.toLong, (name, List.empty[(String, Int)]))
        })

      })


      //根据数据创建出边的集合
      val ue: RDD[Edge[Int]] = data.flatMap(arr => {
        val userName = arr.filter(_.indexOf(":") == -1)
        userName.map(name => Edge(userName(0).hashCode.toLong, name.hashCode.toLong, 0))
      })

      //创建一个图
      val graph = Graph(uv, ue)
      val cc =graph.connectedComponents().vertices

      //聚合数据
      cc.join(uv).map {
        case (id,(cmId,(name,tags))) =>(cmId,(Seq(name),tags))
      }.reduceByKey {
        case(t1,t2)=>{
          val k = t1._1 ++ t2._1

          val v = t1._2 ++ t2._2 groupBy(_._1) mapValues(_.foldLeft(0)(_+_._2)) toList

          (k,v)
        }
      }
        .map(t=>(t._2._1.toSet,t._2._2)).foreach(println)


    }



  }
}
