package main.test.GraphFramesAPI

import org.apache.spark.sql.{DataFrame, SQLContext}
import org.apache.spark.{SparkConf, SparkContext}
import org.graphframes.GraphFrame

object GraphAlgorithms {
  def main(args: Array[String]): Unit = {
    //  def constructGraph: GraphFrame = {
    //    The following example demonstrates how
    //    to create a GraphFrame from vertex and edge DataFrame
    val sparkConf = new SparkConf()
    sparkConf.setAppName("createGraph").setMaster("local[*]")
    val sc = new SparkContext(sparkConf)
    val sqlContext = new SQLContext(sc)
    val v = sqlContext.createDataFrame(List(
      ("a", "Alice", 34),
      ("b", "Bob", 36),
      ("c", "Charlie", 30),
      ("d", "David", 29),
      ("e", "Esther", 32),
      ("f", "Fanny", 36)
//      ("g", "Gabby", 60)
    )).toDF("id", "name", "age")

    // Edge DataFrame
    val e = sqlContext.createDataFrame(List(
      ("a", "b", "friend"),
      ("b", "c", "follow"),
      ("c", "b", "follow"),
      ("f", "c", "follow"),
      ("e", "f", "follow"),
      ("e", "d", "friend"),
      ("d", "a", "friend"),
      ("a", "e", "friend")
    )).toDF("src", "dst", "relationship")


    // Create a GraphFrame
    val g = GraphFrame(v, e)

    /**
     * GraphFrames provides the same suite of standard
     * graph algorithms as GraphX, plus some new ones.
     * provide brief descriptions and code snippets below. see
     * https://graphframes.github.io/graphframes/docs/_site/api/scala/index.html#org.graphframes.lib.package
     * for more details
     * some of the algorithms are currently wrappers around
     * GraphX implementations, so they may not be more scalable than GraphX.
     * More algorithms will be migrated to native GraphFrames implementations in the future.
     */

    // Breadth-first search (BFS)
    /**
     * It  finds the shortest path(s) from one vertex (or a set of vertices) to another vertex (or a set of vertices). The beginning and end vertices are specified
     * as Spark DataFrame expressions.
     * We use BFS to find path between vertex with name "Esther" to
     * a vertex with age < 32 below.
     */

    // Search from "Esther" for users of age < 32.
    val paths = g.bfs.fromExpr("name = 'Esther'").toExpr("age < 32").run()
    paths.show()

    // Specify edge filters or max path lengths.

    val paths2: DataFrame = g.bfs.fromExpr("name = 'Esther'").toExpr("age < 32")
        .edgeFilter("relationship != 'friend'")
        .maxPathLength(3).run()
    paths2.show()


    //Connected components
    /**
     * Computes the connected component mmebership of each
     * vertex and returns a graph with
     * each vertex assigned a component ID.
     *
     * NOTE: With GraphFrames 0.3.0 and later releases, the default Connected Components algorithm requires setting a Spark checkpoint directory. Users can revert to the old algorithm using connectedComponents.setAlgorithm("graphx")
     */
    sc.setCheckpointDir("./ckpt/")
    val result = g.connectedComponents.run()
    result.select("id", "component").orderBy("component").show()

    //Strongly Connected components
    /**
     * 有向图中双向可达的点以及对应边组成的集合叫做SCC.
     * Compute the strongly connected component(SCC) of each vertex and
     * return a graph with each vertex assigned to the SCC containing that vertex.
     */

    val SCC = g.stronglyConnectedComponents.maxIter(10).run()
    SCC.select("id","component").orderBy("component").show()

    // Label Propagation Algorithm(LPA)
    /**
     * Run static Label Propagation Algorithm for detecting communities in network.
     * Each node in the network is initially assigned to its own community.
     * At every superstep, nodes send their community affiliation to all neighbors and update their state
     * to the mode community affiliation of incoming messages.
     *
     * LPA is a standard community detection algorithm for graphs.
     * It is very inexpensive computationally, although (1) convergence is not guaranteed and (2)
     * one can end up with trivial solutions (all node are identified into a single community).
     *
     * Question: graphframes中的LPA算法的实现原理是什么。
     * 针对下面这个例子，应该怎么解释这个结果。
     * +---+-------------+
     * | id|        label|
     * +---+-------------+
     * |  f| 670014898176|
     * |  a| 670014898176|
     * |  e|1460288880640|
     * |  d| 670014898176|
     * |  c|1382979469312|
     * |  g| 146028888064|
     * |  b|1047972020224|
     * +---+-------------+
     */
    val APLres: DataFrame = g.labelPropagation.maxIter(5).run()
    APLres.select("id", "label").show()

    //  PageRank
    /**
     * There are two implementations of PageRank.
     * 1 use org.apache.spark.graphx.graph interface with
     * aggregateMessages and runs PageRank for fixed number of iterations..
     * This can be executed by setting maxIter.
     *
     * 2 use the org.apache.spark.graphx.Pregel interface
     * and runs PageRank until convergence and this can be run by setting tol.
     *
     * The both support non-personalized and personalized PageRank,
     * where setting a sourceId personalizes the results for that vertex.
     */
    // Run PageRank until convergence to tolerance "tol".
//    val pagerankRes = g.pageRank.resetProbability(0.15).tol(0.01).run()

    // Display resulting pagerank and final edge weights
    // Note that the displayed pagerank may be truncated, e.g.,
    // missing the E notation.
    // In spark 1.5+, you can use show(truncate=false) to avoid truncation.
//    pagerankRes.vertices.select("id", "pagerank").show(truncate = false)
//    pagerankRes.edges.select("src","dst","weight").show(truncate = false)

    // Run PageRank for a fixed number of iterations
    //使用下面的方法计算pagerank会和使用graphx计算的结果有些不同
    // pagerank的数值会有不同, 这是因为使用graphframe构建图的时候会有孤立的点的现象
    // 并使用下面的方法也会为孤立点赋予pagerank值，这就要调整其他点的pagerank值来保持节点之间的相对大小稳定

    val pagres2: GraphFrame = g.pageRank.resetProbability(0.15).maxIter(1).run()
    pagres2.edges.show()
    pagres2.vertices.show()

    //Run pagerank personalized for vertex "a"
    // 如果设置了一个soureId,则初始化节点属性值的时候，只有sourceID顶点值为1，其他为0,然后再进行PageRank算法。
    val pagres3: GraphFrame = g.pageRank.resetProbability(0.15).maxIter(1).sourceId("a").run()
    pagres3.edges.show()
    pagres3.vertices.show()











  }

}
