package com.gin.utils

import com.typesafe.config.{Config, ConfigFactory}
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FSDataInputStream, FileSystem, Path}
import org.apache.log4j.Logger
import java.io.{File, InputStreamReader}
import java.nio.file.Files
import scala.collection.mutable
import scala.collection.mutable.ListBuffer
import scala.collection.JavaConverters._
import scala.util.control.Breaks.break


object CompareConfigs {
  private[this] val LOG = Logger.getLogger(this.getClass)

  def parser(args: Array[String], programName: String): Option[Argument] = {
    val parser = new scopt.OptionParser[Argument](programName) {
      head(programName, "1.0.0")

      opt[String]('c', "config")
        .required()
        .valueName("fileName")
        .action((x, c) => c.copy(filePath = x))
        .text("com.gin.nebula.NebulaClientExecutor fileName")

      opt[String]("partDt")
        .valueName("partDt Val")
        .action((x, c) => c.copy(partDt = x))
        .text("partDt for executing sql")

    }
    parser.parse(args, Argument())
  }

  /**
   *
   * @param configPath
   * @return
   */
  def parse(configPath: String): CompareConfigs = {
    var config: Config = null
    if (configPath.startsWith("hdfs://")) {
      val hadoopConfig: Configuration = new Configuration()
      val fs: FileSystem = org.apache.hadoop.fs.FileSystem.get(hadoopConfig)
      val file: FSDataInputStream = fs.open(new Path(configPath))
      val reader = new InputStreamReader(file)
      config = ConfigFactory.parseReader(reader)
    } else {
      if (!Files.exists(new File(configPath).toPath)) {
        throw new IllegalArgumentException(s"${configPath} not exist")
      }
      config = ConfigFactory.parseFile(new File(configPath))
    }

    //nebula连接配置
    val nebulaConf = config.getConfig("nebula")
    val graphAddress = getOrElse(nebulaConf, "address.graph", "")
    val metaAddress = nebulaConf.getString("address.meta")
    val timeout = getOrElse(nebulaConf, "timeout", 6000)
    val connectionRetry = getOrElse(nebulaConf, "connectionRetry", 1)
    val executeRetry = getOrElse(nebulaConf, "executeRetry", 1)
    val user = nebulaConf.getString("user")
    val pwd = nebulaConf.getString("pwd")
    val space = nebulaConf.getString("space")

    val nebulaDatabaseEntry = NebulaDatabaseEntry(graphAddress, metaAddress,
      timeout, connectionRetry, executeRetry, user, pwd, space)

    //点配置
    val tags = mutable.ListBuffer[TagConfigEntry]()
    val tagConfigs = getConfigsOrNone(config, "tags")
    if (tagConfigs.isDefined) {
      for (tagConfig <- tagConfigs.get.asScala) {
        if (!tagConfig.hasPath("name") ||
          !tagConfig.hasPath("noColumn") ||
          !tagConfig.hasPath("hiveReadExec")) {
          LOG.error("The `name`, `noColumn` and `hiveReadExec` must be specified")
          break()
        }

        val tagName = tagConfig.getString("name")
        val noColumn = getOrElse(tagConfig, "noColumn", true)
        val returnCols = tagConfig.getStringList("returnCols").asScala.toList
        val vertex = tagConfig.getString("vertex")

        LOG.info(s"name ${tagName}  noColumn ${noColumn}  returnCols ${returnCols}")

        val limit = getOrElse(tagConfig, "limit", 10000)
        val partition = getOrElse(tagConfig, "partition", 3)

        LOG.info(s"limit ${limit}  partition ${partition}")

        val hiveReadExec = getOrElse(tagConfig, "hiveReadExec", "")
        val nebulaSinkHiveExec = getOrElse(tagConfig, "nebulaSinkHiveExec", "")

        val exceptSinkHiveExec = if (tagConfig.hasPath("exceptSinkHiveExec")) {
          tagConfig.getString("exceptSinkHiveExec")
        } else {
          ""
        }

        LOG.info(s"hiveReadExec ${hiveReadExec}")
        LOG.info(s"nebulaSinkHiveExec ${nebulaSinkHiveExec}")
        LOG.info(s"exceptSinkHiveExec ${exceptSinkHiveExec}")


        val entry = TagConfigEntry(tagName,
          noColumn,
          returnCols,
          vertex,
          limit,
          partition,
          hiveReadExec,
          nebulaSinkHiveExec,
          exceptSinkHiveExec)
        LOG.info(s"Tag Config: ${entry}")
        tags += entry
      }
    }


    LOG.info(s"config ${config}")
    CompareConfigs(nebulaDatabaseEntry, tags)
  }


  /**
   * Get the com.vesoft.exchange.common.config list by the path.
   *
   * @param config The com.vesoft.exchange.common.config.
   * @param path   The path of the com.vesoft.exchange.common.config.
   * @return
   */
  private[this] def getConfigsOrNone(config: Config,
                                     path: String): Option[java.util.List[_ <: Config]] = {
    if (config.hasPath(path)) {
      Some(config.getConfigList(path))
    } else {
      None
    }
  }


  /**
   * Get the value from com.vesoft.exchange.common.config by the path. If the path not exist, return the default value.
   *
   * @param config       The com.vesoft.exchange.common.config.
   * @param path         The path of the com.vesoft.exchange.common.config.
   * @param defaultValue The default value for the path.
   * @return
   */
  private[this] def getOrElse[T](config: Config, path: String, defaultValue: T): T = {
    if (config.hasPath(path)) {
      config.getAnyRef(path).asInstanceOf[T]
    } else {
      defaultValue
    }
  }

}

/**
 * 参数封装对象
 *
 * @param filePath  文件路径
 * @param statement nebula graph 执行语句
 */
final case class Argument(filePath: String = "nebula_compare.conf",
                          partDt: String = ""
                         ) {
  override def toString: String =
    s"Argument:{" +
      s"filePath=$filePath, " +
      s"partDt=$partDt}"
}

/**
 * Configs 配置对象
 */
case class CompareConfigs(nebulaConfig: NebulaDatabaseEntry,
                          tagsConfig: ListBuffer[TagConfigEntry]) {

  override def toString: String =
    s"CompareConfigs:{" +
      s"nebulaConfig=$nebulaConfig, " +
      s"tagsConfig=$tagsConfig}"
}

/**
 * nebula
 *
 * @param graphAddress graphd 地址
 * @param user         用户名
 * @param pwd          密码
 * @param space        图空间
 */
case class NebulaDatabaseEntry(graphAddress: String,
                               metaAddress: String,
                               timeout: Int,
                               connectionRetry: Int,
                               executeRetry: Int,
                               user: String,
                               pwd: String,
                               space: String) {
  require(metaAddress.trim.nonEmpty, "nebula.address.meta cannot be empty")
  require(timeout > 0, "nebula.timeout cannot be negative number")
  require(connectionRetry > 0, "nebula.timeout cannot be negative number")
  require(executeRetry > 0, "nebula.timeout cannot be negative number")
  require(user.trim.nonEmpty, "nebula.user cannot be empty")
  require(pwd.trim.nonEmpty, "nebula.space cannot be empty")
  require(space.trim.nonEmpty, "nebula.pwd cannot be empty")

  override def toString: String =
    s"NebulaDatabaseEntry:{" +
      s"graphAddress=$graphAddress, " +
      s"graphAddress=$metaAddress, " +
      s"timeout=$timeout, " +
      s"connectionRetry=$connectionRetry, " +
      s"executeRetry=$executeRetry, " +
      s"user=$user, " +
      s"pwd=$pwd, " +
      s"space=$space}"

}


/**
 *
 * @param name 实体标签名
 *
 */
case class TagConfigEntry(tagName: String,
                          noColumn: Boolean = true,
                          returnCols: List[String],
                          vertex: String,
                          limit: Int,
                          partition: Int,
                          hiveReadExec: String,
                          nebulaSinkHiveExec: String,
                          exceptSinkHiveExec: String) {
  require(tagName.trim.nonEmpty, "nebula.tags.name cannot be empty")
  require(vertex.trim.nonEmpty, "nebula.tags.vertex cannot be empty")

  override def toString: String = {
    s"Tag tagName: $tagName, " +
      s"noColumn: $noColumn, " +
      s"returnCols: $returnCols, " +
      s"vertex: $vertex, " +
      s"limit: $limit, " +
      s"partition: $partition, " +
      s"hiveReadExec: $hiveReadExec, " +
      s"nebulaSinkHiveExec: $nebulaSinkHiveExec, " +
      s"exceptSinkHiveExec: $exceptSinkHiveExec."
  }
}

/**
 * nebula
 *
 * @param nGql 查询语句-可选
 */
case class NebulaExecutorEntry(nGql: String,
                               timeWait: Long) {
  override def toString: String =
    s"NebulaExecutorEntry:{" +
      s"waitTime=$timeWait,"

  s"nGql=$nGql}"
}

