package org.example

import org.apache.spark.graphx.{Edge, Graph, VertexId}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.SparkSession.builder


object tuxing {
  def main(args: Array[String]): Unit = {
    //  创建spark运行环境
    val spark = builder()
      .master("local[*]")
      .appName("spark")
      .getOrCreate()
    val sc = spark.sparkContext
    //   创建图：1，创建顶
    val users: RDD[(VertexId, (String))] = sc.parallelize(Seq(
      (1L, ("张三")),
      (2L, ("李四")),
      (3L, ("王五")),
      (4L, ("小六")),
      (5L, ("阿七"))
    ),1)
    // 2.创建边
    val relationships: RDD[Edge[String]] = sc.parallelize(Seq(
       Edge(1L, 2L, "friend"),
       Edge(1L, 3L, "colleague"),
       Edge(2L, 3L, "friend"),
       Edge(3L, 4L, "client"),
       Edge(4L, 5L, "boss"),
       Edge(5L, 3L, "employee")
     ),1)
    //   3.建图
    val socialGraph = Graph(users, relationships)
    //   4.计算指标
    val degrees= socialGraph.degrees.collect().mkString(", ")
    println(s"节点度数：$degrees")
    //   5.数据导出
    import spark.implicits._
    val verticesDF = socialGraph.vertices.map { case (id, name) => (id, name) }
      .toDF("id", "name")
    val edgesDF = socialGraph.edges.map(e => (e.srcId, e.dstId, e.attr))
      .toDF("src", "dst", "relationship")
    verticesDF.write.option("header","true").csv("output/vertices")
    edgesDF.write.option("herder","true").csv("output/edges")
    sc.stop()
  }
}
