package cn.lagou.sparkhw

import org.apache.log4j.{Level, Logger}
import org.apache.spark.graphx.{Edge, Graph, VertexId}
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

object Graphx{
  def main(args: Array[String]): Unit = {
    Logger.getLogger("org").setLevel(Level.ERROR)
    val conf: SparkConf = new SparkConf().setAppName(this.getClass.getCanonicalName)
      .setMaster("local[*]")

    val sc = new SparkContext(conf)

    //定义顶点(Long, info)
    val vertexArray: Array[(VertexId, String)] = Array((1L, "SFO"), (2L, "ORD"), (3L, "DFW"))
    val vertexRDD: RDD[(VertexId, String)] = sc.makeRDD(vertexArray)

    //定义边(Long, Long, attr)
    val edgeArray: Array[Edge[Int]] = Array(Edge(2L, 1L, 1800), Edge(2L, 3L, 800), Edge(3L, 1L, 1400))
    val edgeRDD: RDD[Edge[Int]] = sc.makeRDD(edgeArray)

    // 图的定义，构造vertexRDD和edgeRDD
    val graph: Graph[String, Int] = Graph(vertexRDD, edgeRDD)

    //属性操作
    println("******************所有的顶点************************")
    graph.vertices.foreach(println)

    println("******************所有的顶点************************")
    graph.edges.foreach(println)

    println("******************所有的triplets************************")
    graph.triplets.foreach(println)

    println(s"顶点数：${graph.numVertices}")
    println(s"边数：${graph.numEdges}")

    println("******************距离大于1000,并按边降序排列************************")
    graph.edges.filter(_.attr>1000).sortBy(+_.attr).foreach(println)  //sortBy里面- 代表降序


    sc.stop()
  }
}
