package com.gin.nebula

import com.facebook.thrift.protocol.TCompactProtocol
import com.vesoft.nebula.connector.connector.NebulaDataFrameReader
import com.vesoft.nebula.connector.{NebulaConnectionConfig, ReadNebulaConfig}
import org.apache.log4j.BasicConfigurator
import org.apache.spark.SparkConf
import org.apache.spark.sql.SparkSession
import org.slf4j.LoggerFactory
import com.gin.utils.{Argument, CompareConfigs, NebulaClientUdf, NebulaDatabaseEntry, TagConfigEntry}

object NebulaReadHiveSinkCompare {

  private val LOG = LoggerFactory.getLogger(this.getClass)

  BasicConfigurator.configure()

  // 指定参数 -c "E:\code\learn\gin\spark-nebula-compare\src\main\resources\nebula_compare.conf" --partDt "2023-05-20"
  def main(args: Array[String]): Unit = {

    val PROGRAM_NAME = "NebulaCompareExecutor"

    val sparkConf = new SparkConf
    sparkConf
      .set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
      .registerKryoClasses(Array[Class[_]](classOf[TCompactProtocol]))
      .setAppName(PROGRAM_NAME)

    val sparkSession: SparkSession = SparkSession
      .builder()
      .master("local")
      .config(sparkConf)
      .enableHiveSupport()
      .getOrCreate()

    // spark 日志级别 DEBUG INFO ERROR
    //sparkSession.sparkContext.setLogLevel("INFO")

    //自定义入参处理
    if (args.length == 0) {
      LOG.error("args is empty {}", args)
    }
    val options = CompareConfigs.parser(args, PROGRAM_NAME)
    val udfArgs: Argument = options match {
      case Some(config) => config
      case _ =>
        LOG.error("Argument parse failed")
        sys.exit(-1)
    }
    val configs = CompareConfigs.parse(udfArgs.filePath)
    val partDt = udfArgs.partDt
    LOG.info(s"Configs ${configs} \n partDt ${partDt}")
    val nebulaConfig = configs.nebulaConfig

    //执行语句连接
    //val graphProvider = new NebulaClientUdf(nebulaConfig.graphAddress, nebulaConfig.timeout)

    // 基于每个标签配置进行数据处理
    if (configs.tagsConfig.nonEmpty) {
      for (tagConfig <- configs.tagsConfig) {

        val tagHiveDf = sparkSession.sql(tagConfig.hiveReadExec)
        tagHiveDf.show(numRows = 10, truncate = false)

        //读取企业
        val readConfigs = getNebulaReadNebulaConfig(nebulaConfig, tagConfig)
        val tagNebulaDf = sparkSession.read.nebula(readConfigs._1, readConfigs._2).loadVerticesToDF()
        tagNebulaDf.printSchema()
        tagNebulaDf.show(numRows = 10, truncate = false)

        // 是否存储读取到的nebula数据到hive表中
        if (tagConfig.nebulaSinkHiveExec.nonEmpty) {
          val tagTempView = s"${tagConfig.tagName}_temp_view"
          tagNebulaDf.createOrReplaceTempView(tagTempView)
          //sparkSession.sql(s"SELECT * FROM $tagTempView").show(numRows = 10, truncate = false)

          sparkSession.sql(tagConfig.nebulaSinkHiveExec)
        }

        if (tagConfig.exceptSinkHiveExec.nonEmpty) {
          val diffDF = tagNebulaDf.select("_vertexId").except(tagHiveDf.select(tagConfig.vertex))
          diffDF.show(numRows = 10, truncate = false)

          val tagExceptTempView = s"${tagConfig.tagName}_except_temp_view"
          diffDF.createOrReplaceTempView(tagExceptTempView)
          //sparkSession.sql(s"SELECT * FROM $tagExceptTempView").show(numRows = 10, truncate = false)

          sparkSession.sql(tagConfig.exceptSinkHiveExec)

        }


      }
    }


    sparkSession.close()

  }


  def getNebulaReadNebulaConfig(nebulaDatabaseEntry: NebulaDatabaseEntry, tagConfigEntry: TagConfigEntry): (NebulaConnectionConfig, ReadNebulaConfig) = {
    val config =
      NebulaConnectionConfig
        .builder()
        .withMetaAddress(nebulaDatabaseEntry.metaAddress)
        .withTimeout(nebulaDatabaseEntry.timeout)
        .withConenctionRetry(nebulaDatabaseEntry.connectionRetry)
        .withExecuteRetry(nebulaDatabaseEntry.executeRetry)
        .build()

    val nebulaReadVertexConfig: ReadNebulaConfig = ReadNebulaConfig
      .builder()
      .withSpace(nebulaDatabaseEntry.space)
      .withLabel(tagConfigEntry.tagName)
      .withNoColumn(tagConfigEntry.noColumn)
      .withReturnCols(tagConfigEntry.returnCols)
      .withLimit(tagConfigEntry.limit)
      .withPartitionNum(tagConfigEntry.partition)
      .build()
    (config, nebulaReadVertexConfig)
  }



}
