package cn.doitedu.graphx

import cn.doitedu.commons.utils.SparkUtil
import org.apache.spark.graphx.{Edge, Graph, VertexId}
import org.apache.spark.sql.Dataset

import scala.collection.mutable.ListBuffer

object Demo {

  def main(args: Array[String]): Unit = {

    val spark = SparkUtil.getSparkSession("图计算demo")
    import spark.implicits._

    val ds: Dataset[String] = spark.read.textFile("testdata/graphx/x.txt")

    val ds2 = ds.map(line => {
      val split = line.split(",")
      (split(0), split(1), split(2), split(3))
    })

    // 将原始数据抽象出点集合
    val vertextRdd = ds2.rdd.flatMap(tp => {
      val vertices = new ListBuffer[(Long, String)]
      vertices += ((tp._2.hashCode, tp._2))
      vertices += ((tp._3.hashCode, tp._3))
      vertices
    }
    )
    vertextRdd.foreach(println)

    println("---------------------------------")

    // 抽象出边集合
    val edgesRdd = ds2.rdd.map(tp=>{
      Edge(tp._2.hashCode,tp._3.hashCode,"")
    })

    // 将点集合，和边集合，构造成sparkgraphx种的图对象（Graph）
    val graph = Graph(vertextRdd, edgesRdd)

    // 在图对象上调 最大连通子图算法
    val resultGraph: Graph[VertexId, String] = graph.connectedComponents()

    // 从结果中取出结果图的所有点
    val vertices = resultGraph.vertices
    vertices.foreach(println)  // 它就已经上一个id-》guid的字典库

    vertices.map(tp=>tp._1+","+tp._2).saveAsTextFile("testdata/graph_out/day01")


    // 利用上面得到的字典库，加工原始日志，标记guid
    val idToId: collection.Map[Long, Long] = vertices.collectAsMap()
    val bc = spark.sparkContext.broadcast(idToId)

    ds2.map(tp=>{
      val dict: collection.Map[VertexId, VertexId] = bc.value
      val guid = dict.get(tp._2.hashCode).get
      (tp._1,tp._2,tp._3,tp._4,guid)
    }).toDF("场次","标识1","标识2","出场费","guid")
        .show(100,false)




    spark.close()

  }


}
