package com.kingsoft.dc.khaos.module.spark.source

import java.sql.{Date, Timestamp}
import java.text.SimpleDateFormat
import java.time.Duration
import java.util
import java.util.Properties
import com.alibaba.fastjson.JSON
import com.kingsoft.dc.khaos.KhaosContext
import com.kingsoft.dc.khaos.extender.meta.model.table.DmTable
import com.kingsoft.dc.khaos.metadata.{Dependency, KhaosStructField}
import com.kingsoft.dc.khaos.module.spark.constants._
import com.kingsoft.dc.khaos.module.spark.metadata.source._
import com.kingsoft.dc.khaos.module.spark.util.{FileUtils, MetaUtils}
import com.kingsoft.dc.khaos.util.{KhaosConstants, Logging}
import org.apache.kafka.clients.consumer.{KafkaConsumer, OffsetAndTimestamp}
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.serialization.StringDeserializer
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.functions.col
import org.apache.spark.sql.types._
import org.apache.spark.sql.{Column, DataFrame, Row, SparkSession}
import org.apache.spark.util.LongAccumulator
import org.json4s.DefaultFormats
import org.json4s.jackson.JsonMethods.{compact, parse, render}

import scala.collection.JavaConversions._
import scala.collection.immutable.HashMap
import scala.collection.{immutable, mutable}
import scala.collection.mutable.{ArrayBuffer, ListBuffer}
import com.alibaba.druid.filter.config.ConfigTools
import com.kingsoft.dc.khaos.extender.meta.model.ds.KafkaConnect
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.FileSystem
import org.apache.spark.SparkFiles

import java.io.{FileWriter, IOException}


/**
 * create by yansu on 2020/07/02 15:08
 */
class KafkaSource extends SourceStrategy with Logging with Serializable {
  private var _kafkaConfig: KafkaSourceConfig = null
  private var tableEntiy: DmTable = null
  private var properties: Properties = null
  private var kafkaConnect: KafkaConnect = null

  // 每个task消费的数据
  private var taskRunNumber: Long = 10000
  private var moduleId = ""

  // task的个数
  private var taskNumber: Int = 0

  // topic分区数
  private var topicPartitionSize: Int = 0
  // topic数据格式
  private var topicType: String = "csv"
  // csv格式数据分割符
  private var topicChar: String = ","
  private var abnormalAccumulator: LongAccumulator = _
  private var session: SparkSession = _

  private var _host = "mysql-share.internal-bigdata.com"
  private var _port = "13306"
  private var _username = "di"
  private var _password = "Kingsoft.com123"

  private var defaultDs = true
  private var principal = ""
  private var keytab = ""
  private var krb5Conf = ""
  private var _dbname = "di_manage"
  private var _tblname = "di_offset_test"
  private var _session_timout = "300000"
  private var _deafult_api_timeout = 600000
  private var _poll_timeout = 1000
  private var _retry_times = 3
  private var _jdbc_driver = ""
  private var _jdbc_url_param = ""
  private var _common_public_kde_realm = "HADOOP.COM"

  /** 数据抽取 */
  override def source(kc: KhaosContext,
                      module_id: String,
                      config: String,
                      dependence: Dependency): DataFrame = {

    implicit val formats = DefaultFormats
    val kafkaConfig = parse(config, true).extract[KafkaSourceConfig]
    this._kafkaConfig = kafkaConfig
    this.moduleId = module_id
    session = kc.sparkSession
    abnormalAccumulator = kc._valuesChannel.getValues[LongAccumulator](KhaosConstants.KHAOS_ABNORMALACCUMULATOR)
    // kafka鉴权
    MetaUtils.checkReadAuth(kc,
      kafkaConfig.db_name,
      kafkaConfig.table_name,
      kafkaConfig.extender.auth.clazz,
      compact(render(kafkaConfig.extender.auth.params)))
    // 初始化kafka配置
    properties = initKafkaConf(kc)
    kafkaConsumers(kc)
  }

  /**
   * 初始化kafka配置
   *
   * @return Properties
   */
  def initKafkaConf(kc: KhaosContext): Properties = {
    loadProperties(kc)
    val hosts = getKafkaConnection(kc)
    val groupID = kc.conf.getString(SchedulerConstants.DATA_DEV_ID)
    val props = new Properties

    if (!kafkaConnect.getUseKrbs.isEmpty && kafkaConnect.getUseKrbs.toBoolean) {
      //此用户名提出来方便下面引用
      principal = kc.conf.getString("proxy.user")
      //内部kerberos
      if (defaultDs) {
        log.info("kafka 开启kerberos认证")
        keytab = principal + ".keytab"
        val keyTabAllPath = System.getenv("SPARK_YARN_STAGING_DIR") + "/" + keytab
        log.info("keyTabAllPath===>" + keyTabAllPath)
        kc.sparkSession.sparkContext.addFile(keyTabAllPath)
        System.setProperty(KafkaConstants.JAVA_SECURITY_KRB5_CONF, kc.conf.getString("proxy.krb5.conf"))
        //外部kerberos
      } else {
        log.info("kafka 开启外部kerberos认证")
        val keytabFile = kafkaConnect.getKeytabFile
        val krb5File = kafkaConnect.getKrb5File

        keytab = System.getenv("SPARK_YARN_STAGING_DIR") + "/kafka_source_" + moduleId + ".keytab"
        krb5Conf = System.getenv("SPARK_YARN_STAGING_DIR") + "/kafka_source_krb5_" + moduleId + ".conf"
        log.info("keyTabPath===>" + keytab)
        log.info("krb5Path===>" + krb5Conf)
        FileUtils.decoderBase64File(keytabFile, keytab, FileSystem.newInstance(new Configuration()))
        FileUtils.decoderBase64File(krb5File, krb5Conf, FileSystem.newInstance(new Configuration()))
        kc.sparkSession.sparkContext.addFile(keytab)
        System.setProperty(KafkaConstants.JAVA_SECURITY_KRB5_CONF, krb5Conf)
      }


      props.put(KafkaConstants.SECURITY_PROTOCOL, "SASL_PLAINTEXT")
      props.put(KafkaConstants.SASL_MECHANISM, "GSSAPI")
      props.put(KafkaConstants.SASL_KERBEROS_SERVICE_NAME, "kafka")
      props.put(KafkaConstants.REQUEST_TIMEOUT_MS, s"${_deafult_api_timeout}")

    }

    props.put(KafkaConstants.BOOTSTRAP_SERVER, hosts)
    props.put(KafkaConstants.GROUP_ID, groupID)
    props.put(KafkaConstants.SESSION_TIMEOUT_MS, _session_timout)
    props.put(KafkaConstants.AUTO_OFFSET_RESET, KafkaConstants.EARLIEST)
    props.put(KafkaConstants.KEY_DESERIALIZER, classOf[StringDeserializer].getName)
    props.put(KafkaConstants.VALUE_DESERIALIZER, classOf[StringDeserializer].getName)
    props
  }


  def initJAASConf(role: String): String = {
    var keytabPath = ""
    var principalRealm = ""
    if (defaultDs) {
      keytabPath = SparkFiles.get(keytab)
      principalRealm = principal + "@" + _common_public_kde_realm
    } else {
      keytabPath = SparkFiles.get(keytab.split("/").last)
      principalRealm = kafkaConnect.getPrincipal
    }

    log.info("keytabPath===>" + keytabPath)
    log.info("principalRealm===>" + principalRealm)
    log.info("role===>" + role)
    var jaasPath = ""
    if (role == "driver") {
      jaasPath = keytabPath.substring(0, keytabPath.indexOf(s"${principal}")) + principal + "/kafka_source_jaas.conf"
    } else {
      jaasPath = System.getenv("PWD") + "/kafka_source_jaas.conf"
    }
    log.info("jaasPath===>" + jaasPath)

    val jaasconf =
      s"""|KafkaClient {
          |  com.sun.security.auth.module.Krb5LoginModule required
          |  useTicketCache=false
          |  useKeyTab=true
          |  principal="$principalRealm"
          |  serviceName="kafka"
          |  keyTab="$keytabPath";
          |};
          |""".stripMargin
    log.info("jaasconf===>" + jaasconf)

    var writer: FileWriter = null
    try {
      writer = new FileWriter(jaasPath)
      writer.write(jaasconf)
      writer.flush()
    } catch {
      case e: IOException => {
        log.error(e.getMessage, e)
        throw new IOException(e.getMessage, e)
      }
    } finally {
      if (writer != null) writer.close()
    }
    System.setProperty(KafkaConstants.JAVA_SECURITY_AUTH_LOGIN_CONFIG, jaasPath)
  }

  /**
   * 获取kafka集群物理地址
   *
   * @return kafka host
   */
  def getKafkaConnection(kc: KhaosContext) = {
    val className = _kafkaConfig.extender.meta.clazz
    val db_name = _kafkaConfig.db_name
    val topic_name = _kafkaConfig.table_name
    val kafkaMeta = MetaUtils.getKafkaMeta(kc,
      db_name,
      topic_name,
      className,
      compact(render(_kafkaConfig.extender.meta.params)),
      this)
    kafkaConnect = kafkaMeta.getDsKafkaConnect
    tableEntiy = kafkaMeta.getTableEntiy
    import scala.collection.JavaConverters._
    tableEntiy.getParams.asScala.foreach(map => {
      map.get("pKey") match {
        case KafkaConstants.DATA_FORMAT => topicType = map.get("pValue").toString
        case KafkaConstants.PARTITION_SIZE => topicPartitionSize = map.get("pValue").toString.toInt
        case KafkaConstants.CSV_DELIMITER => topicChar = map.get("pValue").toString
        case _ =>
      }
    })

    defaultDs = kafkaMeta.getDefaultDs


    kafkaConnect.getServerAddress
  }

  /*
初始化配置文件
 */
  def loadProperties(kc: KhaosContext): Unit = {
    try {
      val kafkaProperties: Map[String, String] = kc.conf.getAllWithPrefix("module.kafka.source.").toMap
      log.info("KafkaSource Properties")
      kafkaProperties.foreach { case (k, v) => log.info(k + "   " + v) }
      _host = kafkaProperties.getOrElse(KafkaConstants.MODULE_KAFKA_SOURCE_MYSQL_HOST, "mysql-di.db.sdns.kscbigdata.cloud")
      _port = kafkaProperties.getOrElse(KafkaConstants.MODULE_KAFKA_SOURCE_MYSQL_PORT, "13306")
      _username = kafkaProperties.getOrElse(KafkaConstants.MODULE_KAFKA_SOURCE_MYSQL_USERNAME, "di_service")
      _password = kafkaProperties.getOrElse(KafkaConstants.MODULE_KAFKA_SOURCE_MYSQL_PASSWORD, "kgrFR2aqkompSVKNb2nuR2YUi2VdGBU4SEFMYXAXbk3QAUI8Mear/nKYi4N4M+TnkDAgejysFoNKIkab9hTOUQ==")
      _dbname = kafkaProperties.getOrElse(KafkaConstants.MODULE_KAFKA_SOURCE_MYSQL_DBNAME, "di_manage")
      _tblname = kafkaProperties.getOrElse(KafkaConstants.MODULE_KAFKA_SOURCE_MYSQL_TBLNAME, "di_offset_test")
      taskRunNumber = kafkaProperties.getOrElse(KafkaConstants.MODULE_KAFKA_SOURCE_CONSUMER_PER_NUMS, "10000").toLong
      _session_timout = kafkaProperties.getOrElse(KafkaConstants.MODULE_KAFKA_SOURCE_SESSION_TIMEOUT, "300000")
      _deafult_api_timeout = kafkaProperties.getOrElse(KafkaConstants.MODULE_KAFKA_SOURCE_DEAFULT_API_TIMEOUT, "600000").toInt
      _poll_timeout = kafkaProperties.getOrElse(KafkaConstants.MODULE_KAFKA_SOURCE_POLL_TIMEOUT, "1000").toInt
      _retry_times = kafkaProperties.getOrElse(KafkaConstants.MODULE_KAFKA_SOURCE_POLL_RETRY_TIMES, "3").toInt
      _jdbc_driver = kafkaProperties.getOrElse(KafkaConstants.MODULE_KAFKA_SOURCE_MYSQL_JDBC_DRIVER, "com.mysql.cj.jdbc.Driver")
      _jdbc_url_param = kafkaProperties.getOrElse(KafkaConstants.MODULE_KAFKA_SOURCE_MYSQL_JDBC_URL_PARAM, "")
      _common_public_kde_realm = kafkaProperties.getOrElse(KafkaConstants.MODULE_KAFKA_SOURCE_COMMON_PUBLIC_KDE_REALM, "HADOOP.COM")


    } catch {
      case e: Exception =>
        log.error("未读取到Kafka配置! 改用默认配置")
    }
  }

  /**
   * 获取当前消费者组的topic各个分区的offset
   *
   * @return offset集合
   */
  def getTopicPartitionOffset(kc: KhaosContext) = {
    //    val properties: Properties = FileConfigReader.load(KafkaConstants.MYSQL_OFFSET_FILE)
    val ip = _host
    val port = _port
    val userName = _username
    val passWord = _password
    val db_name = _dbname
    var tbl_name = _tblname
    val jdbc_url_param = _jdbc_url_param
    val driver = _jdbc_driver
    val publicKey = "MFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBAKUJNn3fAkJ7FEtgAnwh8IZaV5VOEsP/V/tA2InE4aUtHkzTZKPUxa+kPkcPByx69bhzLrsTRg7t/Om7OcLEiwsCAwEAAQ=="
    val dev = kc.conf.getString(SchedulerConstants.RUN_ENV)
    dev.toLowerCase match {
      case "test" => {
        tbl_name = KafkaConstants.TBL_NAME_TEST
      }
      case _ => {
        tbl_name = KafkaConstants.TBL_NAME_ONLINE
      }
    }
    val decryptPassword = ConfigTools.decrypt(publicKey, passWord)
    val url = s"jdbc:mysql://$ip:$port/$db_name${jdbc_url_param}"
    //val driver = "com.mysql.jdbc.Driver"
    val prop = new Properties
    prop.put("driver", driver)
    prop.put("user", userName)
    prop.put("password", decryptPassword)
    val pattern = getOffsetPattern(kc) + "%"

    val sql = s"(SELECT * FROM ${db_name}.${tbl_name} WHERE partitions LIKE '${pattern}') tmpTableKafka"
    log.info(s"查询offset sql: $sql")
    val df = kc.sparkSession.read.jdbc(url, sql, prop)
    df.show(100, false)
    var hashMap = new HashMap[String, Long]()
    // 设置超时时间
    val partitionAndOffset = df.collect()
    if (partitionAndOffset.length > 0) {
      for (elem <- partitionAndOffset) {
        val partition = elem.getAs[String](KafkaConstants.PARTITIONS)
        val offset = elem.getAs[String](KafkaConstants.OFFSET).toLong
        hashMap += (partition -> offset)
      }
    }
    hashMap
  }

  /**
   * 根据用户填写的时间偏移,基于调度时间计算出需要消费的时间戳
   *
   * @return 时间戳
   */
  def getTimestampFromCT(kc: KhaosContext) = {
    val m = _kafkaConfig.time_offset_m.toInt
    val s = _kafkaConfig.time_offset_s.toInt
    var time_offset = 0l
    time_offset = (m * 60 + s) * 1000
    // 获取调度时间,计算业务时间 "job.biz.date":"20200511"
    val day = kc.conf.getString("job.biz.date")
    // 作业调度时间 "job.biz.time":"20:13:45",
    val time = day + kc.conf.getString("job.biz.time").replace(":", "")
    val dateFormat = new SimpleDateFormat("yyyyMMddHHmmss")
    val timeInMillis = dateFormat.parse(time).getTime

    timeInMillis - time_offset
  }

  /**
   * 计算消费者组消费者的数量
   */
  def getConsumerInfo(kc: KhaosContext): (HashMap[Int, Long], ListBuffer[HashMap[String, Long]]) = {
    val topicName: String = _kafkaConfig.table_name.toLowerCase
    //连接mysql库查询offset数据
    val mysqlOffset: Map[String, Long] = getTopicPartitionOffset(kc)
    //结束消费时间戳
    val endTimestamp: Long = getTimestampFromCT(kc)
    log.info("消费结束时间点 ==> " + endTimestamp)

    //获取任务配置信息,分情况判断startOffset
    //reset_offset 历史作业才生效的key,v3.3已弃用,这里用来兼容历史作业
    val reset_offset: Boolean = _kafkaConfig.strat_offset.getOrElse(false)
    val resetConf: ResetConf = _kafkaConfig.reset_config.getOrElse(ResetConf(reset = Option("earliest"), timestamp = Option("")))
    var incrSync: Boolean = _kafkaConfig.incr_sync.getOrElse(true)
    //var incrSync: Boolean = kc.conf.getBoolean("incrSync",defaultValue = true)
    val resetType: String = resetConf.reset.getOrElse("earliest")
    val timestamp: String = resetConf.timestamp.getOrElse("")

    if (reset_offset) {
      //历史作业判断,如果消费点设置打开,则增量同步默认关闭 v3.3 2021-04-13
      incrSync = false
    }

    log.info(s"jobConsumer reset_offset:$reset_offset incrSync:$incrSync resetType:$resetType 自定义timestamp:$timestamp")

    if (!kafkaConnect.getUseKrbs.isEmpty && kafkaConnect.getUseKrbs.toBoolean) {
      initJAASConf("driver")
    }
    //创建消费者客户端
    val consumer = new KafkaConsumer[String, String](properties)
    //设置查询超时时间
    val duration: Duration = Duration.ofMillis(_deafult_api_timeout)
    //设置查询kafka  offset的参数
    val query: util.Map[TopicPartition, java.lang.Long] = new util.HashMap[TopicPartition, java.lang.Long]()
    for (index <- 0 until topicPartitionSize) {
      val partition = new TopicPartition(topicName, index)
      query.put(partition, endTimestamp)
    }
    //请求kafka, 查询结束时间戳对应的offset
    val metaPartitionAndEndOffset: immutable.Seq[(TopicPartition, OffsetAndTimestamp)] = consumer.offsetsForTimes(query, duration).toList
    // 每个分区最终消费到的offset
    var partitionAndEndOffset = new HashMap[Int, Long]()

    var partitionAndStartOffsetList = new ListBuffer[HashMap[String, Long]]()

    val offsetInfoMap: mutable.HashMap[String, String] = collection.mutable.HashMap[String, String]()
    log.info("根据时间点获取的offset值 ==> " + metaPartitionAndEndOffset.mkString("|"))
    //  当前作业生成的消费者名称: 租户id,项目id,job.id,topic,groupId,partitionName,offset
    val jobConsumerPattern: String = getOffsetPattern(kc)

    // task计数器
    var task = 0
    for (partitionAndOffset <- metaPartitionAndEndOffset) {
      //分区
      val metaTopicPartition: TopicPartition = partitionAndOffset._1
      //endOffset和对应时间戳
      val metaOffsetAndTimestamp: OffsetAndTimestamp = partitionAndOffset._2
      // 开始offset
      var startOffset: Long = 0
      //分区标识
      val patternPartition: String = jobConsumerPattern + metaTopicPartition.partition()

      //确定startOffset
      resetType match {
        case "earliest" =>
          if (incrSync && mysqlOffset.contains(patternPartition)) {
            //增量同步打开且在mysql offset表中已有记录,代表当前job消费过这个topic,本次任务继续上次的offset进行消费
            startOffset = mysqlOffset(patternPartition)
          } else {
            //增量同步关闭
            startOffset = 0
          }
        case "constum" =>
          if (incrSync && mysqlOffset.contains(patternPartition)) {
            //增量同步打开且在mysql offset表中已有记录,代表当前job消费过这个topic,本次任务继续上次的offset进行消费
            startOffset = mysqlOffset(patternPartition)
          } else {
            //增量同步关闭
            val dateFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss")
            val startTimestamp: Long = dateFormat.parse(timestamp).getTime
            log.info("constum开始消费时间点: " + startTimestamp)
            val constumQuery: util.Map[TopicPartition, java.lang.Long] = new util.HashMap[TopicPartition, java.lang.Long]()
            constumQuery.put(metaTopicPartition, startTimestamp)

            val list: List[(TopicPartition, OffsetAndTimestamp)] = consumer.offsetsForTimes(constumQuery, duration).toList
            val value: OffsetAndTimestamp = list.head._2
            if (value != null) {
              startOffset = value.offset()
            } else {
              // value 如果为null,代表数据不存在
              throw new Exception("数据中不存在指定时间点对应的起始offset值,请重新选择开始时间点!")
            }
          }
      }

      //确定结束offset
      var endOffset: Long = 0
      val partitions = new util.ArrayList[TopicPartition]()
      partitions.add(metaTopicPartition)
      if (metaOffsetAndTimestamp != null) {
        endOffset = metaOffsetAndTimestamp.offset()
        //开始大于结束
        if (startOffset > endOffset) {
          if (timestamp != "" && !incrSync) {
            //自定义时间且不是增量同步
            throw new Exception(s"分区${metaTopicPartition.partition()}:根据计算,startOffset($startOffset) > endOffset($endOffset), 自定义时间:$timestamp,endTimestamp:$endTimestamp,请合理选择作业消费点(消费开始点位为自定义时间,消费结束点位为具体业务时间减去作业配置的前推时间)")
          } else {
            throw new Exception(s"分区${metaTopicPartition.partition()}:根据计算,startOffset($startOffset) > endOffset($endOffset), endTimestamp:$endTimestamp,请合理选择作业消费点(消费开始点位为offset表中记录的,消费结束点位为具体业务时间减去作业配置的前推时间)")
          }
        }
      } else {
        endOffset = consumer.endOffsets(partitions, duration).get(metaTopicPartition)
      }

      var tmpOffset: Long = startOffset
      val count: Long = endOffset - startOffset
      if (count >= taskRunNumber) {
        //分task读取当前分区
        val taskStartOffset: ListBuffer[Long] = new ListBuffer[Long]()
        while (tmpOffset <= endOffset) {
          var partitionAndStartOffsetMap = new HashMap[String, Long]
          // 每个task 固定消费 taskRunNumber 条
          taskStartOffset += tmpOffset
          partitionAndStartOffsetMap += (metaTopicPartition.partition().toString -> tmpOffset)
          partitionAndStartOffsetList += partitionAndStartOffsetMap
          tmpOffset += taskRunNumber
          task += 1
        }
      } else {
        var partitionAndStartOffsetMap = new HashMap[String, Long]
        partitionAndStartOffsetMap += (metaTopicPartition.partition().toString -> tmpOffset)
        partitionAndStartOffsetList += partitionAndStartOffsetMap
        task += 1
      }
      // list ((0,1),(0,2),(1,1))
      partitionAndEndOffset += (metaTopicPartition.partition() -> endOffset)
      log.info(s"${metaTopicPartition.partition()} startOffset ==> " + startOffset)
      log.info(s"${metaTopicPartition.partition()} endOffset ==> " + endOffset)
      offsetInfoMap.put(patternPartition, endOffset.toString)
    }
    log.info("消费者个数: " + task)
    taskNumber = task
    consumer.close()
    log.info("partitionAndStartOffsetList ==> " + partitionAndStartOffsetList.mkString("|"))
    kc._valuesChannel.emit(KafkaConstants.PARTITIONS_OFFSET, offsetInfoMap)

    /**
     * //每个分区对应的endOffset
     * partitionAndEndOffset
     * 0 100
     * 1 200
     * 3 205
     * //每个分区按照taskRunNumber切分,每个task读取对应的startOffset-endOffset
     * partitionAndStartOffsetList
     * 0   0
     * 1   0
     * 1   100
     * 2   0
     * 2   100
     * 2   200
     * */
    (partitionAndEndOffset, partitionAndStartOffsetList)
  }

  /**
   * 计算消费者组消费者的数量
   */
  def getConsumerInfoOld(kc: KhaosContext) = {
    val tpOffsets = getTopicPartitionOffset(kc)
    val timeOffset: Long = getTimestampFromCT(kc)
    log.info("消费结束时间点 ==> " + timeOffset)
    val query: util.Map[TopicPartition, java.lang.Long] = new util.HashMap[TopicPartition, java.lang.Long]()
    for (index <- 0 to topicPartitionSize - 1) {
      val partition = new TopicPartition(_kafkaConfig.table_name.toLowerCase, index)
      query.put(partition, timeOffset)
    }
    val consumer = new KafkaConsumer[String, String](properties)
    // 每个消费者消费固定的
    var task = 0
    // 设置超时时间
    val duration = Duration.ofMillis(_deafult_api_timeout)
    val partitionAndEndOffset = consumer.offsetsForTimes(query, duration).toList
    // 每个task最终消费到的offset
    var partitionAndTaskEndOffset = new HashMap[Int, Long]()

    var partitionAndStartOffsetList = new ListBuffer[HashMap[String, Long]]()

    val offsetInfoMap: mutable.HashMap[String, String] = collection.mutable.HashMap[String, String]()
    log.info("根据时间点获取的offset值 ==> " + partitionAndEndOffset.mkString("|"))
    //    租户id,项目id，job.id,topic,groupId,partitionName,offset
    val pattern = getOffsetPattern(kc)
    for (partitionAndOffset <- partitionAndEndOffset) {

      val taskStartOffset = new ListBuffer[Long]()
      val topicPartition = partitionAndOffset._1
      val offsetAndTimestamp = partitionAndOffset._2
      // 第一次读topic,offset为0
      var startOffset = 0l
      val reset_offset = _kafkaConfig.strat_offset.getOrElse(false)
      val patternPartition = pattern + topicPartition.partition()
      if (tpOffsets.contains(pattern + topicPartition.partition())) {
        // 增加重置消费开关
        if (reset_offset) {
          val resetConf: ResetConf = _kafkaConfig.reset_config.getOrElse(ResetConf(reset = Option("earliest"), timestamp = Option("")))
          val resetType = resetConf.reset.getOrElse("earliest")
          val timestamp = resetConf.timestamp.getOrElse("")
          resetType match {
            case "earliest" => {
              startOffset = 0l
            }
            case "constum" => {
              val dateFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss")
              val startTimestamp = dateFormat.parse(timestamp).getTime
              val query1: util.Map[TopicPartition, java.lang.Long] = new util.HashMap[TopicPartition, java.lang.Long]()
              query1.put(topicPartition, startTimestamp)
              try {
                startOffset = consumer.offsetsForTimes(query1, duration).toList.head._2.offset()
              } catch {
                case e: NullPointerException => {
                  throw new Exception("数据中不存在指定时间点对应的起始offset值,请重新选择开始时间点!")
                }
              }
            }
          }
        } else {
          // 从数据库中取出的offset值
          startOffset = tpOffsets(patternPartition)
        }
      } else {
        // 增加重置消费开关
        if (reset_offset) {
          val resetConf: ResetConf = _kafkaConfig.reset_config.getOrElse(ResetConf(reset = Option("earliest"), timestamp = Option("")))
          val resetType = resetConf.reset.getOrElse("earliest")
          val timestamp = resetConf.timestamp.getOrElse("")
          resetType match {
            case "earliest" => {
              startOffset = 0l
            }
            case "constum" => {
              val dateFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss")
              val startTimestamp = dateFormat.parse(timestamp).getTime
              log.info("开始消费时间点: " + startTimestamp)
              val query1: util.Map[TopicPartition, java.lang.Long] = new util.HashMap[TopicPartition, java.lang.Long]()
              query1.put(topicPartition, startTimestamp)
              try {
                startOffset = consumer.offsetsForTimes(query1, duration).toList.head._2.offset()
              } catch {
                case e: NullPointerException => {
                  throw new Exception("数据中不存在指定时间点对应的起始offset值,请重新选择开始时间点!")
                }
              }
            }
          }
        }
      }
      var endOffset = 0l
      try {
        endOffset = offsetAndTimestamp.offset()
        val partitions = new util.ArrayList[TopicPartition]()
        partitions.add(topicPartition)
        // 结束end offset为0或者开始大于结束
        if (endOffset == 0 || startOffset > endOffset) {
          endOffset = consumer.endOffsets(partitions, duration).get(topicPartition)
        }
      } catch {
        case e: NullPointerException => {
          val partitions = new util.ArrayList[TopicPartition]()
          partitions.add(topicPartition)
          endOffset = consumer.endOffsets(partitions, duration).get(topicPartition)
        }
      }
      var tmpOffset = startOffset
      val count = endOffset - startOffset
      if (count >= taskRunNumber) {
        while (tmpOffset <= endOffset) {
          var partitionAndStartOffsetMap = new HashMap[String, Long]
          // 每个消费者组固定消费10万条
          taskStartOffset += tmpOffset
          partitionAndStartOffsetMap += (topicPartition.partition().toString -> tmpOffset)
          partitionAndStartOffsetList += partitionAndStartOffsetMap
          tmpOffset += taskRunNumber
          task += 1
        }
      } else {
        var partitionAndStartOffsetMap = new HashMap[String, Long]
        partitionAndStartOffsetMap += (topicPartition.partition().toString -> tmpOffset)
        partitionAndStartOffsetList += partitionAndStartOffsetMap
        task += 1
      }
      // list ((0,1),(0,2),(1,1))
      partitionAndTaskEndOffset += (topicPartition.partition() -> endOffset)
      log.info("startOffset ==> " + startOffset)
      log.info("endOffset ==> " + endOffset)
      offsetInfoMap.put(pattern + topicPartition.partition(), endOffset.toString)
    }
    log.info("消费者个数: " + task)
    taskNumber = task
    consumer.close()
    log.info("partitionAndStartOffsetList ==> " + partitionAndStartOffsetList.mkString("|"))
    kc._valuesChannel.emit(KafkaConstants.PARTITIONS_OFFSET, offsetInfoMap)
    (partitionAndTaskEndOffset, partitionAndStartOffsetList)
  }

  /**
   * 拼接offset对应的key
   *
   * @return
   */
  def getOffsetPattern(kc: KhaosContext): String = {
    //    租户id,项目id，job.id，dsId,topic,groupId,partitionName,offset
    // 2021-02-03 更新,因三态问题,去掉dsId
    // ==> tenantID , projectId  , dbName , topic , groupID(作业唯一id)
    //implicit val formats = DefaultFormats
    //val params = _kafkaConfig.extender.auth.params.extract[SqlExtendedAuthParams]
    //val dsId = params.ds_id

    val tenantID = kc.conf.getString(SchedulerConstants.TENANT_ID)
    val projectId = kc.conf.getString(SchedulerConstants.PROJECT_ID)
    val dbName = _kafkaConfig.db_name
    val topic = _kafkaConfig.table_name.toLowerCase
    val groupID = kc.conf.getString(SchedulerConstants.DATA_DEV_ID)
    //  val moduleID = moduleId
    tenantID + "_" + projectId + "_" + dbName + "_" + topic + "_" + groupID + "_"
  }

  /**
   * kafka消费者
   *
   * @return df
   */
  def kafkaConsumers(kc: KhaosContext): DataFrame = {

    // topic的分区对应rdd的分区,由rdd分区索引指定去消费哪一个topic的分区
    // 将executor需要的参数用广播变量发布出去
    val consumerInfo = getConsumerInfo(kc)
    log.info("consumerInfo._1 ... ")
    consumerInfo._1.foreach(tp => {
      log.info(tp._1 + " : " + tp._2)
    })
    log.info("consumerInfo._2 ... ")
    consumerInfo._2.foreach(map => {
      map.foreach(tp => {
        log.info(tp._1 + " : " + tp._2)
      })
    })


    val arrRDD = new Array[Int](taskNumber)
    var partitionNum = 0
    for (i <- 0 to taskNumber - 1) {
      arrRDD(i) = partitionNum
      partitionNum += 1
    }
    val tmpRDD = kc.sparkSession.sparkContext.makeRDD(arrRDD).repartition(taskNumber)

    // kafka连接配置信息
    val propertiesBroadcast = kc.sparkSession.sparkContext.broadcast(properties)
    // 各个分区的时间戳结束offset
    val partitionAndStartOffsetBroadcast = kc.sparkSession.sparkContext.broadcast(consumerInfo._2)

    // 第二次轮询的各个spark分区开始消费的offset
    val taskEndOffsetMapBroadcast = kc.sparkSession.sparkContext.broadcast(consumerInfo._1)
    // topic名称
    val topicBroadcast = kc.sparkSession.sparkContext.broadcast(_kafkaConfig.table_name.toLowerCase)
    // 每个task消费的条数
    val taskRunNumberBroadcast = kc.sparkSession.sparkContext.broadcast(taskRunNumber)

    val value: RDD[String] = tmpRDD.mapPartitionsWithIndex((index, iter) => {
      log.info("消费者: " + index)
      println("消费者: " + index)
      // 拿到对应分区的开始offset
      val paeob = partitionAndStartOffsetBroadcast.value(index).head
      val partitions = paeob._1.toInt
      val taskStartOffset = paeob._2

      if (!kafkaConnect.getUseKrbs.isEmpty && kafkaConnect.getUseKrbs.toBoolean) {
        initJAASConf("executor")
      }

      val consumer = new KafkaConsumer[String, String](propertiesBroadcast.value)

      // 根据时间戳获取结束end offset,若时间戳获取到的end offset大于start,则从start读取到最后
      val endOffset = taskEndOffsetMapBroadcast.value(partitions)

      log.info("开始offset: " + taskStartOffset)
      log.info("结束offset: " + endOffset)
      log.info("partitions: " + partitions)

      val topicPartition = new TopicPartition(topicBroadcast.value, partitions)
      consumer.assign(util.Arrays.asList(topicPartition))
      consumer.seek(topicPartition, taskStartOffset)
      var flag = true
      //当前重试次数
      var currentRetryTimes: Long = 0
      //当前消费offset
      var currentOffset: Long = taskStartOffset
      //当前poll时间
      var currentPollTimeOut: Long = _poll_timeout

      val arrayBuffer = new ArrayBuffer[String]()
      try {
        // 记录总共消费的条数
        var tmp = 0
        while (flag) {
          val msgList = consumer.poll(Duration.ofMillis(currentPollTimeOut))
          val count = msgList.count
          if (null != msgList && count > 0) {
            for (record <- msgList if flag) {
              if (record.offset >= endOffset || tmp == taskRunNumberBroadcast.value) {
                flag = false
              } else {
                // 将数据加入集合
                arrayBuffer += record.value()
                // 终止offset,根据用户输入的时间段来确定
                tmp += 1
                currentOffset = record.offset
              }
            }
          } else {
            //不消费endOffset对应的消息 [startOffset,endOffset)
            if ((currentOffset + 1) >= endOffset || tmp == taskRunNumberBroadcast.value) {
              //达到预计消费条数, 修改flag = false, 结束task
              flag = false
            } else {
              //重试次数+1
              currentRetryTimes += 1
              //超时时间 * 2
              currentPollTimeOut = currentPollTimeOut * 2

              //未达到预计消费条数, 并且kafka返回空数据集, 则开始重试
              if (currentRetryTimes > _retry_times) {
                //达到最大重试次数, 数据依然没有完全消费, 则抛出异常
                val errormessage: String = s"kafka source 消费者: $index,重试${currentRetryTimes - 1}次消费后, kafka server端没有返回足够的数据,起始offset:$taskStartOffset,当前offset:$currentOffset,预计JobEndOffset:${endOffset},请检查kafka状态是否正常,如果kafka负载太高,建议调整作业超时参数"
                log.error(errormessage)
                throw new Exception(errormessage)
              }
              log.warn(s"消费者: $index, 进行第${currentRetryTimes}次重试消费,pollTimeout:$currentPollTimeOut,起始offset:$taskStartOffset,当前offset:$currentOffset,预计JobEndOffset:${endOffset} ")
            }
          }
        }
      } catch {
        case e: Exception => {
          throw e
        }
      } finally {
        consumer.close()
      }
      log.info(s"消费者: $index 消费到的数据: " + arrayBuffer.size)
      println("消费到的数据: " + arrayBuffer.size)
      arrayBuffer.toIterator
    })
    val df = structDataFrame(value, kc)
    df
  }

  /**
   * 构造DataFrame
   *
   * @param value
   * @return
   */
  def structDataFrame(value: RDD[String], kc: KhaosContext): DataFrame = {
    val columnArray = new ArrayBuffer[Column]()
    // 构建structType
    val schemaArr = new ArrayBuffer[StructField]()
    for (elem <- _kafkaConfig.extract_fields) {
      schemaArr.append(StructField(elem.field, getDataType(elem.data_type), true))
      columnArray += col(elem.field)
    }
    // 根据表字段和表字段类型生成structype
    //Array(StructField("id", IntegerType, true), StructField("name", StringType, true), StructField("phone", DoubleType, true))
    val schema: StructType = StructType(schemaArr.toArray)

    // 增加数据容错机制 20200730
    var frame: DataFrame = null
    topicType match {
      case KafkaConstants.CSV => {
        val char = "\\" + topicChar
        // 字段数量,数据切分后少于该数量则当做错误数据进行去除
        val size = _kafkaConfig.extract_fields.size
        // 高级选项是否开始异常数据过滤
        val ignore_abnormal = _kafkaConfig.advanced_options.ignore_abnormal.getOrElse(IgnoreAbnormal(on_off = Option(false))).on_off.getOrElse(false)
        var dataRDD: RDD[Row] = null
        if (ignore_abnormal) {
          // 忽略异常
          dataRDD = value.map(line => {
            val errorNumber = 9
            var tmp = 0
            var resRow: Row = Row()
            val arr = line.split(char, -1)
            if (arr.length != size) {
              if (tmp <= errorNumber) {
                println("错误数据: " + line)
                tmp += 1
              }
              // 累加全局错误数据数
              abnormalAccumulator.add(1)
              resRow = null
            } else {
              try {
                resRow = rdd2Row(schema, arr)
              } catch {
                case e: Exception => {
                  if (tmp <= errorNumber) {
                    println("错误数据: " + line)
                    tmp += 1
                  }
                  abnormalAccumulator.add(1)
                  resRow = null
                }
              }
            }
            resRow
          })
        } else {
          // 不忽略异常
          dataRDD = value.map(line => {
            var resRow: Row = Row()
            val attributes: Array[String] = line.split(char, -1)
            if (attributes.length != size) {
              throw new Exception(s"数据格式不正确: ${line}")
            }
            try {
              resRow = rdd2Row(schema, attributes)
            } catch {
              case e: Exception => {}
                throw new Exception("数据类型不匹配! line==>" + line)
            }
            resRow
          })
        }
        dataRDD = dataRDD.filter(_ != null)
        frame = kc.sparkSession.createDataFrame(dataRDD, schema)
        frame = convertDataType(_kafkaConfig.extract_fields, frame)
      }
      case KafkaConstants.JSON => {
        val json_field = "json_field_" + System.currentTimeMillis()
        val spark = kc.sparkSession
        import org.apache.spark.sql.functions._
        import spark.implicits._
        val ignore_abnormal = _kafkaConfig.advanced_options.ignore_abnormal.getOrElse(IgnoreAbnormal(on_off = Option(false))).on_off.getOrElse(false)
        var rdd = value.mapPartitions(iter => {
          //每个task统计异常条数,最后加到全局累加器
          var abnormalCount: Long = 0
          val returnMap = iter.map(lineData => {
            var res = lineData
            try {
              //lineData为json字符串
              if (!JSON.isValid(lineData) || !JSON.isValidObject(lineData))
                throw new Exception(s"数据JSON格式错误 data==>[$lineData]")
            } catch {
              case e: Exception =>
                if (!ignore_abnormal) {
                  //不忽略异常数据 抛错
                  throw new Exception(s"kafka read 解析json数据失败, line==>[$lineData]", e)
                } else {
                  //忽略脏数据
                  if (abnormalCount < 10)
                    logError(s"kafka read 解析json数据失败,忽略异常开启,异常数据==>[$lineData]")
                  abnormalCount += 1
                  res = ""
                  abnormalAccumulator.add(1)
                }
            }
            res
          })
          returnMap
        })

        // 过滤掉忽略异常的脏数据
        rdd = rdd.filter(_ != "")
        frame = rdd.toDF(json_field)
        frame = frame.withColumn(json_field, to_json(struct(col(json_field))))
        frame = frame.withColumn(json_field, json_tuple(col(json_field), json_field))
        for (elem <- _kafkaConfig.extract_fields) {
          val field_name = elem.field
          val data_type = elem.data_type
          if (data_type.equalsIgnoreCase("object")) {
            frame = frame.withColumn(field_name, json_tuple(col(json_field), field_name))
            val object_name_arr = field_name.split(":", -1)
            val field_name_s = object_name_arr(1)
            frame = frame.withColumn(field_name, to_json(struct(col(field_name))))
            frame = frame.withColumn(field_name, json_tuple(col(field_name), field_name))
            frame = frame.withColumn(field_name_s, json_tuple(col(field_name), field_name_s))
          } else if (data_type.equalsIgnoreCase("array")) {
            frame = frame.withColumn(field_name, struct(field_name))
          } else {
            frame = frame.withColumn(field_name, json_tuple(col(json_field), field_name))
          }
        }
      }
      case _ => {
      }
    }
    val filter = _kafkaConfig.filter.get
    if (null != filter && !filter.equals("")) {
      frame = frame.where(filter)
    }
    frame.select(columnArray: _*)
  }

  def rdd2Row(schema: StructType, attributes: Array[String]) = {
    var row: Row = Row()
    for (i <- 0 until schema.fields.length) {
      //元数据schema和文件对应schema顺序及数量一致
      val colType = schema.fields.toList(i).dataType
      colType match {
        case StringType => {
          if (MetaDataConstants.NULL == attributes(i)) {
            row = Row.merge(row, Row(null))
          } else {
            row = Row.merge(row, Row(attributes(i)))
          }
        }
        case LongType => {
          if (MetaDataConstants.NULL == attributes(i)) {
            //数据中包含NULL值
            row = Row.merge(row, Row(null))
          } else {
            row = Row.merge(row, Row(attributes(i).toLong))
          }
        }
        case DateType => {
          if (MetaDataConstants.NULL == attributes(i)) {
            row = Row.merge(row, Row(null))
          } else {
            //  row = Row.merge(row, Row(attributes(i)))
            row = Row.merge(row, Row(Date.valueOf(attributes(i))))
          }
        }
        case TimestampType => {
          if (MetaDataConstants.NULL == attributes(i)) {
            row = Row.merge(row, Row(null))
          } else {
            if (attributes(i).contains("\\+")) {
              val colArr = attributes(i).split("\\+", -1)
              if (colArr.length > 1) {
                row = Row.merge(row, Row(Timestamp.valueOf(colArr(0))))
              } else {
                row = Row.merge(row, Row(Timestamp.valueOf(attributes(i))))
              }
            } else {
              row = Row.merge(row, Row(Timestamp.valueOf(attributes(i))))
            }
          }
        }
        case FloatType => {
          if (MetaDataConstants.NULL == attributes(i)) {
            row = Row.merge(row, Row(null))
          } else {
            row = Row.merge(row, Row(attributes(i).toFloat))
          }
        }
        case DoubleType => {
          if (MetaDataConstants.NULL == attributes(i)) {
            row = Row.merge(row, Row(null))
          } else {
            row = Row.merge(row, Row(attributes(i).toDouble))
          }
        }
        case NullType => row = {
          Row.merge(row, Row(attributes(i)))
        }
        case _ => {
          row = Row.merge(row, Row(attributes(i)))
        }
      }
    }
    row
  }

  def convertDataType(sinkSchema: List[ExtractFieldInfo], data: DataFrame): DataFrame = {
    var value: DataFrame = data
    for (ef <- sinkSchema) {
      val field: String = ef.field
      val data_type: String = ef.data_type
      value = value.withColumn(field, value.col(field).cast(getDataType(data_type)))
    }
    value
  }

  def getDataType(dataType: String): DataType = {
    var value: DataType = null
    value = dataType match {
      case ColumnType.STRING => DataTypes.StringType
      case ColumnType.NUMBER => DataTypes.LongType
      case ColumnType.DATE => DataTypes.DateType
      case ColumnType.DECIMAL => DataTypes.DoubleType
      case ColumnType.TIME => DataTypes.StringType
      case ColumnType.DATETIME => DataTypes.TimestampType
      case _ => DataTypes.StringType
    }
    value
  }


  /** 获取上游的Schema */
  override def schema(dc: KhaosContext, config: String, dependence: Dependency) = {
    val fieldSchema = ArrayBuffer[KhaosStructField]()
    implicit val formats = DefaultFormats
    val kafkaInfo = parse(config, true).extract[KafkaSourceConfig]
    val extrFields = kafkaInfo.extract_fields

    for (ef <- extrFields) {
      fieldSchema += KhaosStructField(ef.field, ef.data_type)
    }
    fieldSchema.toList
  }
}
