package SparkGraphXInAction

import org.apache.spark._
import org.apache.log4j.{Level, Logger}
import org.apache.spark.SparkContext
import org.apache.spark.SparkContext._
import org.apache.spark.SparkConf
import org.apache.spark.graphx._
import org.apache.spark.graphx.Graph._
import org.apache.spark.rdd.RDD
import org.apache.spark.graphx.util.GraphGenerators

/**
  * Created by Administrator on 2017/5/3 0003.
  */
object TestReadRDF {

  def readRdf(sc:SparkContext, filename:String) ={
    val r = sc.textFile(filename).map(_.split("\t"))
    //使用与mergeGraphs函数类似的方法来创建一个包含不重复顶点的字典。
    val v = r.map(_(1)).union(r.map(_(3))).distinct.zipWithIndex
    //另一个借鉴mergeGraphs的编码技巧：通过交换元素操作，将存储顶点名称与ID关系的RDD转换成VertexRDD.
    Graph(v.map(_.swap),
            r.map(x => (x(1),(x(2),x(3))))
              .join(v)
              .map(x => (x._2._1._2,(x._2._2, x._2._1._1)))
              .join(v)
              .map(x => new Edge(x._2._1._1, x._2._2, x._2._1._2)))
  }

  def main(args: Array[String]): Unit = {
    // 屏蔽日志
    Logger.getLogger("org.apache.spark").setLevel(Level.WARN)
    Logger.getLogger("org.eclipse.jetty.server").setLevel(Level.OFF)

    //设置运行环境
    val conf = new SparkConf().setAppName("SimpleGraphX").setMaster("local")
    conf.set("spark.driver.memory","4g")
    conf.set("spark.driver.cores","8")
    val sc = new SparkContext(conf)
    val gTypes = readRdf(sc,"data/yagoTypes.tsv")
    println(gTypes.numVertices)

  }
}
