package cn.sheep.dolphin.graphxlearning

import cn.sheep.dolphin.common.DolphinAppComm
import org.apache.spark.graphx.{Edge, Graph}
import org.apache.spark.rdd.RDD

/**
  * author: old sheep
  * QQ: 64341393 
  * Created 2018/12/3
  */
object GoodFriendsPlusV2 {

	def main(args: Array[String]): Unit = {

		val sc = DolphinAppComm.createSparkContext("用户标签合并")

		// 读取数据文件
		val simpleData = sc.textFile("F:\\20180715\\DAY14\\simple.data")

		// 构建点集合 RDD[(Long, VD)]
		val vertices: RDD[(Long, (String, Array[(String, Int)]))] = simpleData.flatMap(line => {
			val fields = line.split(",")
			val names = fields.head.split("\t")
			// Array(标签)
			val tags: Array[(String, Int)] = fields.tail.map(kv => {
				val keyValue = kv.split(":")
				(keyValue.head, keyValue.tail.head.toInt)
			})

			names.map(name => {
				if (name.equals(names.head)) { // 你是第一个人嘛？
					(name.hashCode.toLong, (name, tags))
				} else (name.hashCode.toLong, (name, Array.empty[(String, Int)]))
			})
		})

		// 构建边集合
		val edges: RDD[Edge[String]] = simpleData.flatMap(line => {
			val names = line.split(",").head.split("\t")
			val firstPerson = names.head
			names.map(name => Edge(firstPerson.hashCode().toLong, name.hashCode().toLong, ""))
		})

		// 构建图对象
		val graph = Graph(vertices, edges)

		// 连通图算法(拿到了他们是否是同一个分支图中的点了)
		val cc = graph.connectedComponents().vertices

		// 聚合数据
		vertices.join(cc).map{
			case (_, ((name, tags), minId)) => (minId, (Set(name), tags))
		}.reduceByKey((a, b) => {
			val name = a._1 ++ b._1
			val tuples = (a._2 ++ b._2).groupBy(_._1).mapValues(_.map(_._2).sum).toArray
			(name, tuples)
		}).foreach(tp => println(tp._2._1.mkString(",") + "\t" + tp._2._2.mkString("$$$")))


		sc.stop()
	}

}
