package com.gin.graphx

import com.facebook.thrift.protocol.TCompactProtocol
import com.vesoft.nebula.connector.connector.NebulaDataFrameReader
import com.vesoft.nebula.connector.ssl.SSLSignType
import com.vesoft.nebula.connector.{NebulaConnectionConfig, ReadNebulaConfig}
import org.apache.spark.SparkConf
import org.apache.spark.sql.SparkSession
import org.slf4j.LoggerFactory

object NebulaToGraphxReadDemo {

  private val LOG = LoggerFactory.getLogger(this.getClass)

  val nebula_ip: String = "172.16.41.143"

  def main(args: Array[String]): Unit = {
    val sparkConf = new SparkConf
    sparkConf
      .set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
      .registerKryoClasses(Array[Class[_]](classOf[TCompactProtocol]))
    val spark = SparkSession
      .builder()
      .master("local")
      .config(sparkConf)
      .getOrCreate()

    //只打印错误日志
    spark.sparkContext.setLogLevel("ERROR")

    /*val vidOrderByList = spark.read
      .option("header", true)
      .option("delimiter", ",")
      .csv("spark-graphx/src/main/resources/vid_order_by_list.csv")
    vidOrderByList.show()*/

    readVertexGraph(spark)
    readEdgeGraph(spark)

    spark.close()
    sys.exit()

  }


  def readVertexGraph(spark: SparkSession): Unit = {
    LOG.info("start to read graphx vertex")
    val config =
      NebulaConnectionConfig
        .builder()
        .withMetaAddress(nebula_ip + ":9559")
        .withTimeout(6000)
        .withConenctionRetry(2)
        .build()
    val nebulaReadVertexConfig: ReadNebulaConfig = ReadNebulaConfig
      .builder()
      .withSpace("xwqy_element_shareholding_graph")
      .withLabel("t_es_enterprise")
      .withNoColumn(false)
      .withReturnCols(List("tid", "tname", "uniscid"))
      .withLimit(10)
      .withPartitionNum(2)
      .build()

    val vertexRDD = spark.read.nebula(config, nebulaReadVertexConfig).loadVerticesToGraphx()
    LOG.error("vertex rdd first record: " + vertexRDD.first())
    LOG.error("vertex rdd count: {}", vertexRDD.count())
  }

  def readEdgeGraph(spark: SparkSession): Unit = {
    LOG.info("start to read graphx edge")
    val config =
      NebulaConnectionConfig
        .builder()
        .withMetaAddress(nebula_ip + ":9559")
        .withTimeout(6000)
        .withConenctionRetry(2)
        .build()
    val nebulaReadEdgeConfig: ReadNebulaConfig = ReadNebulaConfig
      .builder()
      .withSpace("xwqy_element_shareholding_graph")
      .withLabel("e_es_shareholder_to_enterprise")
      .withNoColumn(false)
      .withReturnCols(List("invtype", "invtype_en", "conprop", "condate", "updated"))
      .withLimit(10)
      .withPartitionNum(10)
      .build()
    val edgeRDD = spark.read.nebula(config, nebulaReadEdgeConfig).loadEdgesToGraphx()
    LOG.error("edge rdd first record:" + edgeRDD.first())
    LOG.error("edge rdd count: {}", edgeRDD.count())
  }

}
