package com.edata.bigdata.nebula

import com.edata.bigdata.annotations.Edata_Loader
import com.edata.bigdata.basic.Loader
import com.vesoft.nebula.connector.ReadNebulaConfig
import com.vesoft.nebula.connector.connector.NebulaDataFrameReader
import org.apache.spark.sql.{DataFrame, SparkSession}

@Edata_Loader(target = "NELOADER")
class NeLoader extends Loader with NeConnector {
  override var sourceType: String = _
  override var session: SparkSession = _

  private var readConfigBuilder = ReadNebulaConfig.builder()


  override def find(args: String*): DataFrame = {
    val connConfig = createNebulaConnConfig()
    val schemaType = args(0)
    schemaType.toLowerCase match {
      case "vertex" => {
        val vertexReadConfig = readConfigBuilder
          .withUser(USER)
          .withPasswd(PASSWORD)
          .build()
        session.read.nebula(connConfig, vertexReadConfig).loadVerticesToDF()
      }
      case "edge" => {
        val edgeReadConfig = readConfigBuilder
          .withUser(USER)
          .withPasswd(PASSWORD)
          .build()
        session.read.nebula(connConfig,edgeReadConfig).loadEdgesToDF()
      }
    }
  }

  def setReaderSpace(space_name: String): Unit = {
    readConfigBuilder = readConfigBuilder.withSpace(space_name)
  }

  def setReaderLabel(label: String): Unit = {
    readConfigBuilder = readConfigBuilder.withLabel(label)
  }

  def setReaderNoColumn(flag: Boolean): Unit = {
    readConfigBuilder = readConfigBuilder.withNoColumn(flag)
  }

  def setReaderReturnCols(cols: List[String]): Unit = {
    readConfigBuilder = readConfigBuilder.withReturnCols(cols)
  }

  def setReaderLimit(num: Int): Unit = {
    readConfigBuilder = readConfigBuilder.withLimit(num)
  }

  def setReaderPartitionNum(num: Int): Unit = {
    readConfigBuilder = readConfigBuilder.withPartitionNum(num)
  }



}
