package SparkGraphXInAction

import org.apache.spark._
import org.apache.log4j.{Level, Logger}
import org.apache.spark.SparkContext
import org.apache.spark.SparkContext._
import org.apache.spark.SparkConf
import org.apache.spark.graphx._
import org.apache.spark.graphx.Graph._
import org.apache.spark.rdd.RDD
import org.apache.spark.graphx.util.GraphGenerators

import scala.reflect.ClassTag

/**
  * Created by Administrator on 2017/5/7 0007.
  */
object TestQuanJuJuLeiXiShu {
  //全局聚类系数
  def clusteringCoefficient[VD:ClassTag,ED:ClassTag](g:Graph[VD,ED]) = {
    //aggregateMessages函数生成了一个VertexRDD，每个顶点都包含与它相连的其他顶点的ID组成的集合。
    val numTriplets =
      g.aggregateMessages[Set[VertexId]](
      et => { et.sendToSrc(Set(et.dstId));
              et.sendToDst(Set(et.srcId)) },
      (a,b) => a++b)
      .map(x => {val s = (x._2 - x._1).size; s*(s-1)/2})
      .reduce(_ + _)

    if (numTriplets == 0) 0.0 else g.triangleCount.vertices.map(_._2).reduce(_ + _)/numTriplets.toDouble
  }

  def main(args: Array[String]): Unit = {
    // 屏蔽日志
    Logger.getLogger("org.apache.spark").setLevel(Level.WARN)
    Logger.getLogger("org.eclipse.jetty.server").setLevel(Level.OFF)

    //设置运行环境
    val conf = new SparkConf().setAppName("SimpleGraphX").setMaster("local")
    val sc = new SparkContext(conf)

    val g = GraphLoader.edgeListFile(sc, "data/facebook/0.edges")
    val feat = sc.textFile("data/facebook/0.feat").map(x => (x.split(" ")(0).toLong, x.split(" ")(78).toInt == 1))
    val g2 = g.outerJoinVertices(feat)((vid, vd, u) => u.get)

    val cg2 = clusteringCoefficient(g2)
    println(cg2)

    val cg2_0 = clusteringCoefficient(g2.subgraph(_=>true, (vid,vd) => vd))
    println(cg2_0)

    val cg2_1 = clusteringCoefficient(g2.subgraph(_=>true, (vid,vd) => !vd))
    println(cg2_1)
  }
}
