package com.zt.bigdata.template.spark

import java.{lang, util}

import com.fasterxml.jackson.databind.ObjectMapper
import com.fasterxml.jackson.module.scala.DefaultScalaModule
import com.zt.bigdata.spark.common.dto.{BasicStreamingParameter, StreamRecord}
import com.zt.bigdata.template.hdfs.HDFSTemplate
import com.zt.bigdata.template.hdfs.HDFSTemplate.FileFormat
import com.zt.bigdata.template.kafka.KafkaTemplate
import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.kafka.common.TopicPartition
import org.apache.spark.TaskContext
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.streaming.OutputMode
import org.apache.spark.sql.{DataFrame, Dataset, SparkSession}
import org.apache.spark.streaming.dstream.DStream
import org.apache.spark.streaming.kafka010.ConsumerStrategies.{Assign, Subscribe}
import org.apache.spark.streaming.kafka010.{CanCommitOffsets, ConsumerStrategy, HasOffsetRanges, OffsetRange}
import org.slf4j.Logger

import scala.collection.JavaConversions._

trait BasicStreamingTemplate[P <: BasicStreamingParameter] extends BasicTemplate[P] {

  implicit val logger: Logger = log

  implicit class ImplicitsStream(stream: DStream[_]) extends Serializable {
    def add(a: Int, b: Int) = a + b

    //为stream 隐式新增each方法
    def each(category: String,
             processFun: (ConsumerRecord[_, _], Seq[_], Int, Int, Long) => Long,
             preparePartitionProcessFun: (RDD[ConsumerRecord[_, _]], Boolean, Int) => Seq[_] = (_, _, _) => Seq(),
             postPartitionProcessFun: (Iterator[Any], Seq[_], Int) => Unit = (_, _, _) => Unit,
             processErrorFun: Long => Unit = _ => Unit)(implicit logger: Logger): Unit = {

      val errorCode = errorMonitor(category, stream.context.sparkContext, processErrorFun)

      stream
        .foreachRDD(rdd => {
          if (errorCode.isZero) {
            val offsetRanges = rdd.asInstanceOf[HasOffsetRanges].offsetRanges
            val processedOffsetRanges: RDD[OffsetRange] = rdd.mapPartitions(records => {
              val partitionId = TaskContext.get.partitionId
              val offsetRange: OffsetRange = offsetRanges(TaskContext.get.partitionId)
              var processedOffset: Long = offsetRange.fromOffset
              val prepareArgs = preparePartitionProcessFun(rdd.asInstanceOf[RDD[ConsumerRecord[_, _]]], records.isEmpty, partitionId)
              records.zipWithIndex.foreach(r => {
                val record = r._1.asInstanceOf[ConsumerRecord[_, _]]
                if (errorCode.isZero) {
                  try {
                    val result = processFun(record, prepareArgs, r._2, partitionId, processedOffset)
                    if (result != RESULT.SUCCESS) {
                      errorCode.add(result)
                    } else {
                      processedOffset += 1
                    }
                  } catch {
                    case e: Throwable =>
                      logger.error(s"process cause error: partitionId:$partitionId offset:$processedOffset record:${record.value()}", e)
                      errorCode.add(RESULT.INTERNAL_SERVER_ERROR)
                  }
                }
              })
              postPartitionProcessFun(records, prepareArgs, partitionId)
              Iterator("").filter(_ => offsetRange != null).map(_ =>
                OffsetRange(offsetRange.topic, offsetRange.partition, offsetRange.fromOffset, processedOffset))
            })
            stream.asInstanceOf[CanCommitOffsets].commitAsync(processedOffsetRanges.collect())
          }
        })
    }
  }


  /**
    * 隐式类声明
    *
    * @param spark
    */
  implicit class ImplicitsSS(spark: SparkSession) extends Serializable {

    /**
      * 冷备份配置
      *
      * @param enabledCheckpoint
      * @param checkpointLocation
      * @return
      */
    def streaming(enabledCheckpoint: Boolean = true, checkpointLocation: String = "/tmp/checkpoint/"): SparkSession = {
      if (enabledCheckpoint) {
        spark.conf.set("spark.sql.streaming.checkpointLocation", checkpointLocation)
      }
      spark
    }

    /**
      * 订阅kafka消息 封装到Dataset中 有schema
      *
      * @param kafkaServers
      * @param topics
      * @param startingOffsets
      * @param pollTimeoutMs
      * @param numRetries
      * @param retryIntervalMs
      * @param m
      * @tparam E
      * @return Dataset
      */
    def sourceKafkaStream[E <: Product](kafkaServers: String, topics: String,
                                        startingOffsets: String = "latest",
                                        pollTimeoutMs: Long = 1024,
                                        numRetries: Int = 1,
                                        retryIntervalMs: Long = 10)(implicit m: Manifest[E]): Dataset[StreamRecord[E]] = {
      KafkaTemplate.sourceKafkaStream[E](spark, kafkaServers, topics, startingOffsets, pollTimeoutMs, numRetries, retryIntervalMs)(m)
    }

    /**
      * 订阅kafka消息 封装到Dataset 无schema
      *
      * @param kafkaServers
      * @param topics
      * @param startingOffsets
      * @param pollTimeoutMs
      * @param numRetries
      * @param retryIntervalMs
      * @return Dataset
      */
    def sourceKafkaStringStream(kafkaServers: String, topics: String,
                                startingOffsets: String = "latest",
                                pollTimeoutMs: Long = 512,
                                numRetries: Int = 3,
                                retryIntervalMs: Long = 10): Dataset[StreamRecord[String]] = {
      KafkaTemplate.sourceKafkaStringStream(spark, kafkaServers, topics, startingOffsets, pollTimeoutMs, numRetries, retryIntervalMs)
    }

    /**
      * 订阅kafka消息
      *
      * @param kafkaServers
      * @param topics
      * @param startingOffsets
      * @param pollTimeoutMs
      * @param numRetries
      * @param retryIntervalMs
      * @return DataFrame
      */
    def sourceKafkaRawStream(kafkaServers: String, topics: String,
                             startingOffsets: String = "latest",
                             pollTimeoutMs: Long = 512,
                             numRetries: Int = 3,
                             retryIntervalMs: Long = 10): DataFrame = {
      KafkaTemplate.sourceKafkaRawStream(spark, kafkaServers, topics, startingOffsets, pollTimeoutMs, numRetries, retryIntervalMs)
    }

  }

  /**
    * 隐式类声明
    *
    * @param ds
    */
  implicit class ImplicitsDS(ds: Dataset[_]) extends Serializable {

    /**
      * 发布消息到指定的Topic
      *
      * @param kafkaServers
      * @param topics
      */
    def sinkKafkaStream(kafkaServers: String, topics: String): Unit = {
      KafkaTemplate.sinkKafkaStream(ds, kafkaServers, topics)
      ds.sparkSession.streams.awaitAnyTermination()
    }

    /**
      * 持久化数据到HDFS
      *
      * @param path
      * @param fileFormat 保存的文件格式
      * @param triggerIntervalMilliSec
      * @param outputMode 追加模式
      */
    def sinkHDFSStream(path: String, fileFormat: String = FileFormat.TEXT, triggerIntervalMilliSec: Long = 5000,
                       outputMode: OutputMode = OutputMode.Append): Unit = {
      HDFSTemplate.sinkHDFSStream(ds, path, fileFormat, triggerIntervalMilliSec, outputMode)
      ds.sparkSession.streams.awaitAnyTermination()
    }
  }

  // 对gateway吐出的数据在进行处理之前进行格式检查
  def formatCheck(id: String, ts: java.lang.Long, op: String, item: String): Boolean = {
    if (id == null || ts == 0 || ts == null || op == null) {
      log.error(s"Formatted Item [$item] NOT exist [/_id_/_ts_/_op_] field(s).")
      false
    } else {
      true
    }
  }

  // 指定topic消费
  def assignOffsets(assignOffsetMessage: String, assignOffset: Boolean, kafkaParams: Map[String, Object], topic: String): ConsumerStrategy[String, String] = {
    // assignOffsetMessage: {"topicA":{"0":23,"1":-1},"topicB":{"0":-2}}  {"topic":{"partition:offset"}}
    if (assignOffset && assignOffsetMessage != "" && assignOffsetMessage != null) {
      val mapper = new ObjectMapper
      mapper.registerModule(DefaultScalaModule)
      val parameterMap = mapper.readValue(assignOffsetMessage, classOf[Map[String, Any]])
      if (parameterMap.contains(topic)) {
        val str = mapper.writeValueAsString(parameterMap(topic))
        val partitionAndOffsetStr = mapper.readValue(str, classOf[Map[String, Int]])
        val topicAndPartitionMessage: util.Map[TopicPartition, lang.Long] = new util.HashMap[TopicPartition, lang.Long]()
        partitionAndOffsetStr.foreach(item => topicAndPartitionMessage.put(new TopicPartition(topic, item._1.toInt), item._2.toLong))
        Assign[String, String](topicAndPartitionMessage.keys.toList, kafkaParams, topicAndPartitionMessage)
      } else {
        Subscribe[String, String](List(topic), kafkaParams)
      }
    } else {
      Subscribe[String, String](List(topic), kafkaParams)
    }
  }
}

object RESULT {

  val SUCCESS = 0L
  val BAD_REQUEST = 400L
  val UNAUTHORIZED = 401L
  val FORBIDDEN = 403L
  val NOT_FOUND = 404L
  val CONFLICT = 409L
  val LOCKED = 423L
  val UNSUPPORTED_MEDIA_TYPE = 415L
  val INTERNAL_SERVER_ERROR = 500L
  val NOT_IMPLEMENTED = 501L
  val SERVICE_UNAVAILABLE = 503L

}